repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
AMARTELKE/Pangenome-with-Panaroo
|
[
"b720debf8616882668d53600038c334393080d9b"
] |
[
"scripts/reference_based_layout.py"
] |
[
"import networkx as nxvisited\nimport re\nimport networkx.classes.function as function\nimport networkx.algorithms.connectivity.cuts as cuts\nimport networkx as nx\nimport pandas as pd\n\n\ndef get_dist(ref_s, ref_t, max_dist):\n s = ref_s.split(\"_\")[-1]\n t = ref_t.split(\"_\")[-1]\n return min(abs(int(s) - int(t)), abs(abs(int(s) - int(t)) - max_dist))\n\n\ndef add_to_queue(G, s, nodes, visited, sink, mapping, ref_g_id, max_dist):\n add = []\n for i in nodes:\n if i in visited:\n continue\n if ref_g_id not in G.nodes[i]['genomeIDs'].split(\";\"):\n add.append(i)\n else:\n #if we have discovered a refound gene we just continue\n g2g = dict([(j.split(\"_\")[0], j.split(\"_\")[1])\n for j in G.nodes[i]['geneIDs'].split(\";\")])\n if g2g[ref_g_id] == \"refound\":\n sink[\"sink\"] = i\n visited.add(i)\n continue\n dist = get_dist(mapping.loc[G.nodes[s]['name'], \"gene_id\"],\n mapping.loc[G.nodes[i]['name'],\n \"gene_id\"], max_dist)\n if dist > 100:\n sink[\"sink\"] = i\n visited.add(i)\n return add\n\n\ndef create_mapping(G, ref_g_id):\n #look up table for name vs node id\n gene_dict = {}\n for n in G.nodes():\n gene_ids = [(i.split(\"_\")[0], i)\n for i in G.nodes[n]['geneIDs'].split(\";\")]\n gene_ids = list(filter(lambda x: ref_g_id == x[0], gene_ids))\n if len(gene_ids) != 0:\n gene_dict[G.nodes[n]['name']] = gene_ids[0][1]\n elif len(gene_ids) > 1:\n raise NameError(\"A problem occurred with node!\")\n mapping = pd.DataFrame.from_dict(gene_dict, orient='index')\n mapping.columns = [\"gene_id\"]\n #we dont want to include refound genes in this step TODO also consider in add reference edges step\n mapping = mapping.loc[~mapping.loc[:, \"gene_id\"].str.contains(\"refound\"), ]\n return mapping\n\n\ndef add_ref_edges(G, mapping):\n name_dict = dict([(G.nodes[n]['name'], n) for n in G.nodes()])\n for n in mapping.index:\n mapping.loc[n, \"seq\"] = int(mapping.loc[n, \"gene_id\"].split(\"_\")[2])\n mapping.sort_values(\"seq\", inplace=True)\n j = 0\n for i in range(1, mapping.shape[0]):\n node1 = str(name_dict[mapping.index[i]])\n node2 = str(name_dict[mapping.index[i - 1]])\n if not G.has_edge(node1, node2):\n j += 1\n G.add_edge(node1, node2)\n return G\n\n\ndef remove_var_edges(g):\n for n in g:\n if G.nodes[n][\"highVar\"] == 1 and ref_g_id not in G.nodes[n][\n 'genomeIDs'].split(\";\"):\n var_nodes.append(n)\n var_nodes = []\n g.remove_nodes_from(var_nodes)\n return g\n\n\ndef layout(graph, ref_g_id, cut_edges_out, ignore_high_var,\n add_reference_edges):\n G = nx.read_gml(graph)\n #look up table for name vs node id\n mapping = create_mapping(G, ref_g_id)\n gene_order = [\n int(mapping.loc[n, \"gene_id\"].split(\"_\")[2]) for n in mapping.index\n ]\n max_dist = max(gene_order)\n if ignore_high_var:\n G = remove_var_edges(G)\n if add_reference_edges:\n G = add_ref_edges(G, mapping)\n #write gml with reference edges to disk to be used in cytoscape instead of the original final_graph.gml\n nx.write_gml(G, graph.replace(\".gml\", \"_with_ref.gml\"))\n name_dict = dict([(G.nodes[n]['name'], n) for n in G.nodes()])\n #set capacity for edges for the min cut algorithm as the weight of that edge\n for e in G.edges:\n try:\n G.edges[e][\"capacity\"] = G.edges[e][\"size\"]\n except:\n G.edges[e][\"capacity\"] = 1\n #store edges to be taken out of the graph\n cut_edges = []\n i = 0\n cur_try = 0\n #iterate over all reference nodes in mapping table\n while i < len(mapping.index):\n n = mapping.index[i]\n print(i)\n if n not in name_dict:\n i += 1\n continue\n nid = name_dict[n]\n visited = set([nid])\n sink = {\"sink\": None}\n queue = add_to_queue(G, nid, G.neighbors(nid), visited, sink, mapping,\n ref_g_id, max_dist)\n #depth first search\n last_target = None\n while len(queue) != 0:\n target = queue.pop(0)\n visited.add(target)\n neighbors = G.neighbors(target)\n #for each reference node explore all edges that lead to non-reference nodes\n queue = queue + add_to_queue(G, nid, neighbors, visited, sink,\n mapping, ref_g_id, max_dist)\n last_target = None\n #did we find a long-range connection?\n if sink[\"sink\"] is not None:\n print(\"found path\")\n visited.add(sink[\"sink\"])\n s_t_graph = function.induced_subgraph(G, visited)\n s_t_graph = nx.Graph(s_t_graph)\n #the induced graph could contain reference edges which need to be removed\n remove = []\n for e in s_t_graph.edges:\n if ref_g_id in G.nodes[e[0]]['genomeIDs'].split(\";\") \\\n and ref_g_id in G.nodes[e[1]]['genomeIDs'].split(\";\"):\n g2g1 = dict([(j.split(\"_\")[0], j.split(\"_\")[1])\n for j in G.nodes[e[0]]['geneIDs'].split(\";\")])\n g2g2 = dict([(j.split(\"_\")[0], j.split(\"_\")[1])\n for j in G.nodes[e[1]]['geneIDs'].split(\";\")])\n if g2g1[ref_g_id] == \"refound\" or g2g2[\n ref_g_id] == \"refound\":\n continue\n else:\n n1 = mapping.loc[G.nodes[e[0]][\"name\"]][0]\n n2 = mapping.loc[G.nodes[e[1]][\"name\"]][0]\n if abs(int(n1.split(\"_\")[2]) -\n int(n2.split(\"_\")[2])) < 100:\n remove.append(e)\n s_t_graph.remove_edges_from(remove)\n #print some info about that long-range connection\n #print(n)\n #print(nid, sink[\"sink\"])\n #min cut between the two reference nodes\n cut = []\n cut_weight, partitions = nx.algorithms.flow.minimum_cut(\n s_t_graph, nid, sink[\"sink\"])\n for p1_node in partitions[0]:\n for p2_node in partitions[1]:\n if s_t_graph.has_edge(p1_node, p2_node):\n cut.append((p1_node, p2_node))\n #cardinality cut TODO make this an option\n #cut = cuts.minimum_edge_cut(s_t_graph, nid, sink[\"sink\"])\n for e in cut:\n print(G.nodes[e[0]]['name'], G.nodes[e[1]]['name'])\n cut_edges.append(e)\n #delete cut edges from the graph\n if len(cut) == 0:\n #something happened as no min cut can be found\n i += 1\n raise NameError(\n \"no min cut could be found; sorry this shouldn't happen\")\n G.remove_edges_from(cut)\n sink[\"sink\"] = None\n #there may be more paths from that node -> apply again on the same node\n else:\n #all nodes explored; move on\n i += 1\n sink[\"sink\"] = None\n #write cut edges to disk\n with open(cut_edges_out, \"w\") as f:\n f.write(\"shared name\\tis_cut_edge\\n\")\n for e in cut_edges:\n f.write(\"%s (interacts with) %s\\t1\\n\" % (e[0], e[1]))\n f.write(\"%s (interacts with) %s\\t1\\n\" % (e[1], e[0]))\n #DEBUG to compress the graph\n #for n in G.nodes:\n # gene_ids = [(i.split(\"_\")[0], i) for i in G.nodes[n]['geneIDs'].split(\";\")]\n # gene_ids = list(filter(lambda x: ref_g_id == x[0],gene_ids))\n # if len(gene_ids) == 1:\n # G.nodes[n]['geneIDs'] = \"\"\n # else:\n # G.nodes[n]['geneIDs'] = gene_ids[0][1]\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n \"enable reference-based layouting through detecting long-range connection between otherwise distant genes in a reference genome\"\n )\n parser.add_argument(\n \"ref_g_id\", help='reference genome id (should be a complete genome)')\n parser.add_argument(\"graph\", help='path to final_graph.gml')\n parser.add_argument(\"cut_edges_out\", help='file for cut edges')\n parser.add_argument(\n \"--add_reference_edges\",\n action=\"store_true\",\n help=\n 'add edges between consecutive genes in the reference genome even if they have been removed by panaroo'\n )\n parser.add_argument(\"--ignore_high_var\",\n action=\"store_true\",\n help='ignore highly variable genes')\n args = parser.parse_args()\n layout(**vars(args))\n"
] |
[
[
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Programmer-RD-AI/kickstarter-NLP-V7
|
[
"d751a0eae257b544a15cae3e18ba4ce9fdd7055c"
] |
[
"wandb/run-20211031_160136-30z8d5k3/files/code/00.py"
] |
[
"from sklearn.model_selection import *\nimport wandb\nimport nltk\nfrom nltk.stem.porter import *\nfrom torch.nn import *\nfrom torch.optim import *\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torchvision\nimport random\nfrom tqdm import *\nfrom torch.utils.data import Dataset, DataLoader\nstemmer = PorterStemmer()\nPROJECT_NAME = 'kickstarter-NLP-V7'\ndevice = 'cuda'\n\n\ndef tokenize(sentence):\n return nltk.word_tokenize(sentence.lower())\n\n\nprint(tokenize('$100'))\n\n\ndef stem(word):\n return stemmer.stem(word.lower())\n\n\nprint(stem('organic'))\n\n\ndef bag_of_words(t_words, words):\n t_words = [stem(w) for w in t_words]\n bag = np.zeros(len(words))\n for idx, w in enumerate(words):\n if w in t_words:\n bag[idx] = 1.0\n return bag\n\n\nprint(bag_of_words(['hi'], ['hi', 'how', 'hi']))\ndata = pd.read_csv('./data.csv')[:1250]\nprint(data.columns)\nX = data['blurb']\ny = data['state']\nwords = []\nlabels = {}\nlabels_r = {}\nidx = 0\ndata = []\nfor label in tqdm(list(y.tolist())):\n if label not in list(labels.keys()):\n idx += 1\n labels[label] = idx\n labels_r[idx] = label\n\nfor X_batch, y_batch in zip(tqdm(X), y):\n X_batch = tokenize(X_batch)\n new_X = []\n for Xb in X_batch:\n new_X.append(stem(Xb))\n words.extend(new_X)\n data.append([\n new_X,\n np.eye(labels[y_batch], len(labels))[labels[y_batch]-1]\n ])\nwords = sorted(set(words))\nnp.random.shuffle(data)\n\nX = []\ny = []\nfor d in tqdm(data):\n X.append(bag_of_words(d[0], words))\n y.append(d[1])\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.125, shuffle=False)\nX_train = torch.from_numpy(np.array(X_train)).to(device).float()\ny_train = torch.from_numpy(np.array(y_train)).to(device).float()\nX_test = torch.from_numpy(np.array(X_test)).to(device).float()\ny_test = torch.from_numpy(np.array(y_test)).to(device).float()\n\n\ndef get_loss(model, X, y, criterion):\n preds = model(X)\n loss = criterion(preds, y)\n return loss.item()\n\n\ndef get_accuracy(model, X, y,):\n preds = model(X)\n correct = 0\n total = 0\n for pred, yb in zip(preds, y):\n pred = int(torch.argmax(pred))\n yb = int(torch.argmax(yb))\n if pred == yb:\n correct += 1\n total += 1\n acc = round(correct/total, 3)*100\n return acc\n\n\nclass Model(Module):\n def __init__(self):\n super().__init__()\n self.hidden = 8\n self.activation = ReLU()\n self.bn = BatchNorm1d(self.hidden)\n self.linear1 = Linear(len(words), self.hidden)\n self.linear2 = Linear(self.hidden, self.hidden)\n self.linear3 = Linear(self.hidden, len(labels))\n\n def forward(self, X):\n preds = self.linear1(X)\n preds = self.activation(self.bn(self.linear2(preds)))\n preds = self.linear3(preds)\n return preds\n\n\nmodel = Model().to(device)\ncriterion = MSELoss()\noptimizer = Adam(model.parameters(), lr=0.001)\nepochs = 100\nbatch_size = 32\nwandb.init(project=PROJECT_NAME, name='baseline')\nfor _ in tqdm(range(epochs)):\n for i in range(0, len(X_train), batch_size):\n X_batch = X_train[i:i+batch_size]\n y_batch = y_train[i:i+batch_size]\n preds = model(X_batch)\n loss = criterion(preds, y_batch)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n model.eval()\n torch.cuda.empty_cache()\n wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)})\n torch.cuda.empty_cache()\n wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})\n torch.cuda.empty_cache()\n wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})\n torch.cuda.empty_cache()\n wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)})\n torch.cuda.empty_cache()\n model.train()\nwandb.finish()\ntorch.cuda.empty_cache()\ntorch.save(model,'model.pt')\ntorch.save(model,'model.pth')\ntorch.save(model.state_dict(),'model-sd.pt')\ntorch.save(model.state_dict(),'model-sd.pth')\ntorch.save(words,'words.pt')\ntorch.save(words,'words.pth')\ntorch.save(data,'data.pt')\ntorch.save(data,'data.pth')\ntorch.save(labels,'labels.pt')\ntorch.save(labels,'labels.pth')\ntorch.save(idx,'idx.pt')\ntorch.save(idx,'idx.pth')\ntorch.save(y_train,'y_train.pt')\ntorch.save(y_test,'y_test.pth')\ntorch.save(y,'y.pt')\ntorch.save(y,'y.pth')\n"
] |
[
[
"pandas.read_csv",
"torch.cuda.empty_cache",
"numpy.random.shuffle",
"torch.save",
"numpy.array",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zhenv5/fedlearner
|
[
"a8ff0eaef48e174d432a40d23d12c1f57e842ebd",
"a8ff0eaef48e174d432a40d23d12c1f57e842ebd"
] |
[
"fedlearner/trainer/embedding.py",
"fedlearner/trainer/data/data_block_loader.py"
] |
[
"# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n# pylint: disable=protected-access\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom fedlearner.trainer import operator\n\n\ndef _sharded_size(total_size, shard_id, num_shards):\n return int(total_size / num_shards) + ((total_size % num_shards) > shard_id)\n\n\nclass Embedding(object):\n def __init__(self, config, devices=(None,)):\n self._config = config\n self._devices = devices\n self._num_shards = len(devices)\n self._use_fid_v2 = config['use_fid_v2']\n\n self._weights = []\n with tf.variable_scope(\"lagrange_embedding_pooling/%s\"%config['name']):\n for i in range(config['num_groups']):\n shards = []\n for shard_id in range(self._num_shards):\n with tf.device(self._devices[shard_id]), \\\n tf.variable_scope('shard_%d'%shard_id):\n weight_name = 'embedding_weight_' + '_'.join([\n str(j) for j, k in enumerate(\n config['slot_weight_index']) if k == i])\n shards.append(tf.get_variable(\n name=weight_name,\n shape=(_sharded_size(config['weight_hash_sizes'][i],\n shard_id, self._num_shards),\n config['weight_sizes'][i]),\n initializer=config['initializers'][i]\n ))\n self._weights.append(shards)\n\n @property\n def weights(self):\n return self._weights\n\n @property\n def config(self):\n return self._config\n\n def _lookup_one_shard(self, features, shard_id):\n name = self._config['name']\n\n slot_size = tf.constant(self._config['slot_size'], dtype=tf.int64)\n slot_weight_index = tf.constant(self._config['slot_weight_index'],\n dtype=tf.int64)\n slot_output_offset = tf.constant(self._config['slot_output_offset'],\n dtype=tf.int64)\n slot_hash_size = tf.constant(self._config['slot_hash_size'],\n dtype=tf.int64)\n slot_weight_offset = tf.constant(self._config['slot_weight_offset'],\n dtype=tf.int64)\n\n fmt = '%s_%d_'%(name, shard_id)\n\n num_unique_fids_per_partition = features.pop(\n fmt+'num_unique_fids_per_partition')\n fid_to_unique_index = features.pop(fmt+'fid_to_unique_index')\n unique_fid_hash = features.pop(fmt+'unique_fid_hash')\n assert isinstance(unique_fid_hash, tuple)\n batch_size = features.pop(fmt+'batch_size')\n instance_ids = features.pop(fmt+'instance_ids')\n fids = features.pop(fmt+'fids')\n\n bwd_deps = [\n tf.identity(num_unique_fids_per_partition,\n name=\"%s_Identity_num_unique_fids_per_partition\"%(fmt)),\n tf.identity(fid_to_unique_index,\n name=\"%s_Identity_fid_to_unique_index\"%(fmt)),] + [\n tf.identity(t, name=\"%s_Identity_unique_fid_hash_%d\"%(fmt, i)) \\\n for (i, t) in enumerate(unique_fid_hash)\n ]\n\n with tf.control_dependencies(bwd_deps):\n output = operator.lagrange_lite_ops.lagrange_embedding_pooling(\n output_size=self._config['output_size'],\n weight_sizes=self._config['weight_sizes'],\n use_fid_v2=self._use_fid_v2,\n num_shards=self._num_shards,\n batch_size=batch_size,\n instance_ids=instance_ids,\n fids=fids,\n slot_size=slot_size,\n slot_weight_index=slot_weight_index,\n slot_output_offset=slot_output_offset,\n slot_hash_size=slot_hash_size,\n slot_weight_offset=slot_weight_offset,\n weights=[w[shard_id] for w in self.weights])\n\n return output\n\n def lookup(self, features):\n if self._num_shards == 1:\n with tf.device(self._devices[0]):\n return self._lookup_one_shard(features, 0)\n\n outputs = []\n for shard_id in range(self._num_shards):\n with tf.device(self._devices[shard_id]):\n outputs.append(self._lookup_one_shard(features, shard_id))\n return tf.add_n(outputs)\n",
"# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\nimport logging\nimport tensorflow.compat.v1 as tf\n\nclass DataBlockLoader(object):\n def __init__(self, batch_size, role, bridge, trainer_master):\n self._batch_size = batch_size\n self._role = role\n self._bridge = bridge\n self._trainer_master = trainer_master\n assert self._trainer_master is not None\n\n self._count = 0\n if role == 'follower':\n self._block_queue = queue.Queue()\n self._bridge.register_data_block_handler(self._data_block_handler)\n\n def _data_block_handler(self, msg):\n logging.debug('DataBlock: recv \"%s\" at %d', msg.block_id, msg.count)\n assert self._count == msg.count\n if not msg.block_id:\n block = None\n else:\n block = self._trainer_master.request_data_block(msg.block_id)\n if block is None:\n return False\n self._count += 1\n self._block_queue.put(block)\n return True\n\n def get_next_block(self):\n if self._role == 'leader':\n while True:\n block = self._trainer_master.request_data_block()\n if block is not None:\n if not self._bridge.load_data_block(\n self._count, block.block_id):\n continue\n else:\n self._bridge.load_data_block(self._count, '')\n break\n self._count += 1\n else:\n block = self._block_queue.get()\n return block\n\n def make_dataset(self):\n def gen():\n while True:\n block = self.get_next_block()\n if not block:\n break\n yield block.data_path\n\n dataset = tf.data.Dataset.from_generator(gen, tf.string)\n dataset = dataset.prefetch(2)\n dataset = tf.data.TFRecordDataset(dataset)\n dataset = dataset.batch(self._batch_size, drop_remainder=True)\n dataset = dataset.prefetch(2)\n return dataset\n\n def make_batch_iterator(self):\n return self.make_dataset().make_one_shot_iterator()\n"
] |
[
[
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.identity"
],
[
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.data.Dataset.from_generator"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shawn-Guo-CN/EGG
|
[
"0a5b258108e2cd1c873d7f67e8c92551bb3d809c"
] |
[
"egg/zoo/objects_game/features.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch.utils import data\nimport numpy as np\nfrom functools import reduce\nfrom egg.zoo.objects_game.util import compute_binomial\nimport itertools\nimport os\nimport pathlib\n\nclass VectorsLoader:\n def __init__(self,\n perceptual_dimensions=[4, 4, 4, 4, 4],\n n_distractors=1,\n batch_size=32,\n train_samples=128000,\n validation_samples=4096,\n test_samples=1024,\n shuffle_train_data=False,\n dump_data_folder=None,\n load_data_path =None,\n seed=None):\n\n self.perceptual_dimensions = perceptual_dimensions\n self._n_features = len(self.perceptual_dimensions)\n self.n_distractors = n_distractors\n\n self.batch_size = batch_size\n self.train_samples = train_samples\n self.validation_samples = validation_samples\n self.test_samples = test_samples\n\n self.shuffle_train_data = shuffle_train_data\n\n self.load_data_path = load_data_path\n\n self.dump_data_folder = pathlib.Path(dump_data_folder) if dump_data_folder is not None else None\n\n seed = seed if seed else np.random.randint(0, 2 ** 31)\n self.random_state = np.random.RandomState(seed)\n\n @property\n def n_features(self):\n return self._n_features\n\n @n_features.setter\n def n_features(self, n_features):\n self._n_features = n_features\n\n\n def upd_cl_options(self, opts):\n opts.perceptual_dimensions = self.perceptual_dimensions\n opts.train_samples = self.train_samples\n opts.validation_samples = self.validation_samples\n opts.test_samples = self.test_samples\n opts.n_distractors = self.n_distractors\n\n def load_data(self, data_file):\n data = np.load(data_file)\n train, train_labels = data['train'], data['train_labels']\n valid, valid_labels = data['valid'], data['valid_labels']\n test, test_labels = data['test'], data['test_labels']\n\n # train valid and test are of shape b_size X n_distractors+1 X n_features \n self.train_samples = train.shape[0]\n self.validation_samples = valid.shape[0]\n self.test_samples = test.shape[0]\n\n self.n_distractors = train.shape[1] - 1\n self.perceptual_dimensions = [-1] * train.shape[-1]\n self._n_features = len(self.perceptual_dimensions)\n\n return (train, train_labels), (valid, valid_labels), (test, test_labels)\n\n def _fill_split(self, all_vectors, n_samples, tuple_dict):\n split_list = []\n len_all_vectors = len(all_vectors)\n tuple_dim = self.n_distractors+1\n done = 0\n while done < n_samples:\n candidates_tuple = self.random_state.choice(len_all_vectors, replace=False, size=tuple_dim)\n key = ''\n for vector_idx in candidates_tuple:\n key += f'{str(vector_idx)}-'\n key = key[:-1]\n if key not in tuple_dict:\n tuple_dict[key] = True\n possible_batch = all_vectors[candidates_tuple]\n split_list.append(possible_batch)\n done += 1\n else:\n continue\n\n target_idxs = self.random_state.choice(self.n_distractors+1, n_samples)\n\n return (np.array(split_list), target_idxs), tuple_dict\n\n def generate_tuples(self, data):\n data = np.array(data)\n train_data, tuple_dict = self._fill_split(data, self.train_samples, {})\n valid_data, tuple_dict = self._fill_split(data, self.validation_samples, tuple_dict)\n test_data, _ = self._fill_split(data, self.test_samples, tuple_dict)\n return train_data, valid_data, test_data\n\n def collate(self, batch):\n tuples, target_idxs = [elem[0] for elem in batch], [elem[1] for elem in batch]\n receiver_input = np.reshape(tuples, (self.batch_size, self.n_distractors+1, -1))\n labels = np.array(target_idxs)\n targets = receiver_input[np.arange(self.batch_size), labels]\n return torch.from_numpy(targets).float(), torch.from_numpy(labels).long(), torch.from_numpy(receiver_input).float()\n\n def get_iterators(self):\n if self.load_data_path:\n train, valid, test = self.load_data(self.load_data_path)\n else: # if load_data_path wasn't given then I need to generate the tuple\n world_dim = reduce(lambda x, y: x*y, self.perceptual_dimensions)\n possible_tuples = compute_binomial(world_dim, self.n_distractors+1)\n\n list_of_dim = [range(1, elem+1) for elem in self.perceptual_dimensions]\n all_vectors = list(itertools.product(*list_of_dim))\n\n assert self.train_samples > 0 and self.validation_samples > 0 and self.test_samples > 0, 'Train size, validation size and test size must all be greater than 0'\n assert possible_tuples > self.train_samples + self.validation_samples + self.test_samples , f'Not enough data for requested split sizes. Reduced split samples or increase perceptual_dimensions'\n train, valid, test = self.generate_tuples(data=all_vectors)\n\n assert self.train_samples >= self.batch_size and self.validation_samples >= self.batch_size and self.test_samples >= self.batch_size, 'Batch size cannot be smaller than any split size'\n\n train_dataset = TupleDataset(*train)\n valid_dataset = TupleDataset(*valid)\n test_dataset = TupleDataset(*test)\n\n train_it = data.DataLoader(train_dataset, batch_size=self.batch_size, collate_fn=self.collate, drop_last=True, shuffle=self.shuffle_train_data)\n validation_it = data.DataLoader(valid_dataset, batch_size=self.batch_size, collate_fn=self.collate, drop_last=True)\n test_it = data.DataLoader(test_dataset, batch_size=self.batch_size, collate_fn=self.collate, drop_last=True)\n\n if self.dump_data_folder:\n self.dump_data_folder.mkdir(exist_ok=True)\n path = self.dump_data_folder / f'{self.perceptual_dimensions}_{self.n_distractors}_distractors'\n np.savez_compressed(path,\n train=train[0],\n train_labels=train[1],\n valid=valid[0],\n valid_labels=valid[1],\n test=test[0],\n test_labels=test[1],\n n_distractors=self.n_distractors)\n\n return train_it, validation_it, test_it\n\nclass TupleDataset(data.Dataset):\n def __init__(self, tuples, target_idxs):\n self.list_of_tuples = tuples\n self.target_idxs = target_idxs\n\n def __len__(self):\n return len(self.list_of_tuples)\n\n def __getitem__(self, idx):\n if idx < 0 or idx >= len(self.list_of_tuples):\n raise RuntimeError('Accessing dataset through wrong index: < 0 or >= max_len')\n return self.list_of_tuples[idx], self.target_idxs[idx]\n\n"
] |
[
[
"numpy.reshape",
"numpy.arange",
"torch.from_numpy",
"numpy.savez_compressed",
"numpy.load",
"numpy.array",
"numpy.random.RandomState",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangxieric/Meta-Aug
|
[
"2d0a3c0535be8a6c6d9bf7fd45188f7f97b86ec6"
] |
[
"models/IMDB_sentiment_cls/run_BERT.py"
] |
[
"import os\nimport pandas as pd\nimport torch\nimport sys\nsys.path.insert(0, \"/nfs/Meta-Aug/\")\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom transformers import get_linear_schedule_with_warmup\nfrom transformers.optimization import AdamW\nfrom transformers import BertModel\nfrom transformers import AutoTokenizer\nfrom sys import platform\nimport pickle\nimport gzip\nfrom utils import train, validate, test, Metric\nfrom BertClassifier import BertClassifier\nfrom core.config import prep_config\nfrom core.prep_meta import cal_len_atr\nfrom core.prep_meta import cal_pos_atr\nimport nltk\nimport numpy as np\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\n\n\nclass TextDataPreprocess(Dataset):\n \"\"\"\n Text Data Encoding, which generates the intial token_ids of the input text.\n \"\"\"\n def __init__(self, dataset, max_seq_len = 100):\n super(TextDataPreprocess, self).__init__()\n self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n self.config_path = '../../configs/IMDB_senti_cls.json'\n self.config = prep_config(self.config_path)\n \n self.max_seq_len = self.config['model']['max_seq_len']\n self.use_meta = self.config['model']['use_meta']\n if self.use_meta:\n self.meta_atr = self.config['model']['meta_atr']\n self.dataset = dataset \n self.inputs = self.get_input(self.dataset)\n\n def __len__(self):\n return len(self.dataset)\n \n def __getitem__(self, idx):\n outputs = []\n for part_input in self.inputs:\n outputs.append(part_input[idx])\n return outputs\n \n def get_input(self, input_data):\n # input_data is a dataframe variable\n sentences = input_data['review'].values\n labels = input_data['label'].values\n \n tokens_seq = list(map(self.tokenizer.tokenize, sentences))\n result = list(map(self.trunate_and_pad, tokens_seq))\n \n input_ids = [i[0] for i in result]\n attention_mask = [i[1] for i in result]\n \n if self.use_meta:\n # calculate length attribute\n len_atr = np.array(list(map(cal_len_atr, sentences)))\n len_atr = len_atr.reshape(-1, 1)\n \n # calculate part_of_speech attribute\n pos_atr = np.array(list(map(cal_pos_atr, sentences)))\n \n atr_scores = np.concatenate((len_atr,pos_atr), axis = 1)\n return (\n torch.Tensor(input_ids).type(torch.long),\n torch.Tensor(attention_mask).type(torch.long),\n torch.Tensor(labels).type(torch.long),\n torch.Tensor(atr_scores).type(torch.float),\n )\n else:\n return(\n torch.Tensor(input_ids).type(torch.long),\n torch.Tensor(attention_mask).type(torch.long),\n torch.Tensor(labels).type(torch.long),\n None,\n )\n \n def trunate_and_pad(self, tokens_seq):\n \n # add '[cls]' to the beginning\n tokens_seq = ['[cls]'] + tokens_seq\n # Length Control\n if len(tokens_seq) > self.max_seq_len:\n tokens_seq = tokens_seq[0 : self.max_seq_len]\n padding = [0] * (self.max_seq_len - len(tokens_seq))\n \n # Convert tokens_seq to token_ids\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens_seq)\n input_ids += padding\n attention_mask = [1] * len(tokens_seq) + padding\n token_types_ids = [0] * self.max_seq_len\n\n assert len(input_ids) == self.max_seq_len\n assert len(attention_mask) == self.max_seq_len\n assert len(token_types_ids) == self.max_seq_len\n \n return input_ids, attention_mask, token_types_ids\n\n\ndef model_train_validate_test(train_df, test_df, target_dir, \n max_seq_len=100,\n epochs=20,\n batch_size=32,\n lr=2e-05,\n patience=3,\n max_grad_norm=10.0,\n if_save_model=True,\n checkpoint=None):\n \"\"\"\n Parameters\n ----------\n train_df : pandas dataframe of train set.\n test_df : pandas dataframe of test set.\n target_dir : the path where you want to save model.\n max_seq_len: the max truncated length.\n epochs : the default is 3.\n batch_size : the default is 32.\n lr : learning rate, the default is 2e-05.\n patience : the default is 1.\n max_grad_norm : the default is 10.0.\n if_save_model: if save the trained model to the target dir.\n checkpoint : the default is None.\n \"\"\"\n bertclassifier = BertClassifier(requires_grad = True)\n \n print(20 * \"=\", \" Preparing for training \", 20 * \"=\")\n # Path to save the model, create a folder if not exist.\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n \n # -------------------- Data loading --------------------------------------#\n \n # For the IMDB dataset, there is no validation dataset\n\n print(\"\\t* Loading training data...\")\n train_data = TextDataPreprocess(train_df, max_seq_len = max_seq_len)\n train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)\n \n print(\"\\t* Loading test data...\")\n test_data = TextDataPreprocess(test_df, max_seq_len = max_seq_len) \n test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size)\n \n # -------------------- Model definition ------------------- --------------#\n \n print(\"\\t* Building model...\")\n device = torch.device(\"cuda\")\n model = bertclassifier.to(device)\n \n # -------------------- Preparation for training -------------------------#\n \n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {\n 'params':[p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay':0.01\n },\n {\n 'params':[p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay':0.0\n }\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n\n ## Implement of warm up\n ## total_steps = len(train_loader) * epochs\n ## scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=60, num_training_steps=total_steps)\n \n # When the monitored value is not improving, the network performance could be improved by reducing the learning rate.\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=\"max\", factor=0.85, patience=0)\n\n best_score = 0.0\n start_epoch = 1\n # Data for loss curves plot\n epochs_count = []\n train_losses = []\n train_accuracies = []\n valid_losses = []\n valid_accuracies = []\n valid_aucs = []\n \n # Continuing training from a checkpoint if one was given as argument\n if checkpoint:\n checkpoint = torch.load(checkpoint)\n start_epoch = checkpoint[\"epoch\"] + 1\n best_score = checkpoint[\"best_score\"]\n print(\"\\t* Training will continue on existing model from epoch {}...\".format(start_epoch))\n model.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n epochs_count = checkpoint[\"epochs_count\"]\n train_losses = checkpoint[\"train_losses\"]\n train_accuracy = checkpoint[\"train_accuracy\"]\n valid_losses = checkpoint[\"valid_losses\"]\n valid_accuracy = checkpoint[\"valid_accuracy\"]\n valid_auc = checkpoint[\"valid_auc\"]\n \n # -------------------- Training epochs -----------------------------------#\n \n print(\"\\n\", 20 * \"=\", \"Training bert model on device: {}\".format(device), 20 * \"=\")\n patience_counter = 0\n for epoch in range(start_epoch, epochs + 1):\n epochs_count.append(epoch)\n \n print(\"* Training epoch {}:\".format(epoch))\n print(\"parameters: \", param_optimizer)\n epoch_time, epoch_loss, epoch_accuracy = train(model, train_loader, optimizer, epoch, max_grad_norm)\n train_losses.append(epoch_loss)\n train_accuracies.append(epoch_accuracy) \n print(\"-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%\".format(epoch_time, epoch_loss, (epoch_accuracy*100)))\n \n # Update the optimizer's learning rate with the scheduler.\n scheduler.step(epoch_accuracy)\n ## scheduler.step()\n \n # Early stopping on validation accuracy.\n if epoch_accuracy < best_score:\n patience_counter += 1\n else:\n best_score = epoch_accuracy\n patience_counter = 0\n if (if_save_model):\n torch.save({\"epoch\": epoch, \n \"model\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"best_score\": best_score,\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"train_accuracy\": train_accuracies,\n \"valid_losses\": valid_losses,\n \"valid_accuracy\": valid_accuracies,\n \"valid_auc\": valid_aucs\n },\n os.path.join(target_dir, \"best.pth.tar\"))\n print(\"save model succesfully!\\n\")\n \n # run model on test set and save the prediction result to csv\n print(\"* Test for epoch {}:\".format(epoch))\n _, _, test_accuracy, _, all_prob = validate(model, test_loader)\n print(\"Test accuracy: {:.4f}%\\n\".format(test_accuracy))\n test_prediction = pd.DataFrame({'prob_1':all_prob})\n test_prediction['prob_0'] = 1-test_prediction['prob_1']\n test_prediction['prediction'] = test_prediction.apply(lambda x: 0 if (x['prob_0'] > x['prob_1']) else 1, axis=1)\n test_prediction = test_prediction[['prob_0', 'prob_1', 'prediction']]\n test_prediction.to_csv(os.path.join(target_dir,\"test_prediction_ext.csv\"), index=False)\n \n if patience_counter >= patience:\n print(\"-> Early stopping: patience limit reached, stopping...\")\n break\n\n \nif __name__ == \"__main__\":\n sys.stdout = open('outputs/BERT/results_IMDB_meta.txt', 'w', buffering=1)\n data_df = pickle.load(gzip.open(\"../../data/IMDB/processed_IMDB_data.p\", 'rb'))\n train_df = data_df.head(25000)\n test_df = data_df.tail(25000)\n target_dir = \"outputs/BERT\"\n model_train_validate_test(train_df, test_df, target_dir, max_seq_len=100, epochs=10, batch_size=64, lr=5e-5, patience=3, max_grad_norm=10.0, if_save_model=True, checkpoint=None)\n test_result = pd.read_csv(os.path.join(target_dir, 'test_prediction_ext.csv'))\n Metric(test_df.label, test_result.prediction)"
] |
[
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.Tensor",
"torch.load",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"numpy.concatenate",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
haiyangxue/fairseq
|
[
"b142ceec20d69130aea823054b193a0d04a780e3",
"b142ceec20d69130aea823054b193a0d04a780e3"
] |
[
"interactive.py",
"demo/utils/print_pt.py"
] |
[
"#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTranslate raw text with a trained model. Batches data on-the-fly.\n\"\"\"\n\nfrom collections import namedtuple\nimport fileinput\n\nimport torch\nimport os\nfrom fairseq import checkpoint_utils, options, tasks, utils\nfrom fairseq.data import encoders,data_utils\nimport torchaudio\nimport torchaudio.compliance.kaldi as kaldi\n\nBatch = namedtuple('Batch', 'ids src_tokens src_lengths')\nTranslation = namedtuple('Translation', 'src_str hypos pos_scores alignments')\n\n\ndef buffered_read(input, buffer_size):\n buffer = []\n with fileinput.input(files=[input], openhook=fileinput.hook_encoded(\"utf-8\")) as h:\n for src_str in h:\n buffer.append(src_str.strip())\n if len(buffer) >= buffer_size:\n yield buffer\n buffer = []\n\n if len(buffer) > 0:\n yield buffer\n\n\ndef make_batches(lines, args, task, max_positions, encode_fn):\n tokens = [\n task.source_dictionary.encode_line(\n encode_fn(src_str), add_if_not_exist=False\n ).long()\n for src_str in lines\n ]\n lengths = torch.LongTensor([t.numel() for t in tokens])\n itr = task.get_batch_iterator(\n dataset=task.build_dataset_for_inference(tokens, lengths),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=max_positions,\n ).next_epoch_itr(shuffle=False)\n for batch in itr:\n yield Batch(\n ids=batch['id'],\n src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'],\n )\n\n\ndef main(args):\n utils.import_user_module(args)\n\n if args.buffer_size < 1:\n args.buffer_size = 1\n if args.max_tokens is None and args.max_sentences is None:\n args.max_sentences = 1\n\n assert not args.sampling or args.nbest == args.beam, \\\n '--sampling requires --nbest to be equal to --beam'\n assert not args.max_sentences or args.max_sentences <= args.buffer_size, \\\n '--max-sentences/--batch-size cannot be larger than --buffer-size'\n\n # print(args)\n print()\n print(\"*******************\")\n print(args.task)\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n # Setup task, e.g., translation\n task = tasks.setup_task(args)\n\n # Load ensemble\n print('| loading model(s) from {}'.format(args.path))\n models, _model_args = checkpoint_utils.load_model_ensemble(\n args.path.split(':'),\n arg_overrides=eval(args.model_overrides),\n task=task,\n )\n\n # Set dictionaries\n src_dict = task.source_dictionary\n tgt_dict = task.target_dictionary\n\n # Optimize ensemble for generation\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n if args.fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n # Initialize generator\n generator = task.build_generator(args)\n\n # Handle tokenization and BPE\n tokenizer = encoders.build_tokenizer(args)\n bpe = encoders.build_bpe(args)\n\n def encode_fn(x):\n if tokenizer is not None:\n x = tokenizer.encode(x)\n if bpe is not None:\n x = bpe.encode(x)\n return x\n\n def decode_fn(x):\n if bpe is not None:\n x = bpe.decode(x)\n if tokenizer is not None:\n x = tokenizer.decode(x)\n return x\n\n def collate_frames(frames):\n \"\"\"Convert a list of 2d frames into a padded 3d tensor\n Args:\n frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is\n length of i-th frame and f_dim is static dimension of features\n Returns:\n 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]\n \"\"\"\n len_max = max(frame.size(0) for frame in frames)\n print(frames.size())\n print(frames[0].size())\n f_dim = frames[0].size(1)\n res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)\n for i, v in enumerate(frames):\n res[i, : v.size(0)] = v\n return res\n\n # Load alignment dictionary for unknown word replacement\n # (None if no unknown word replacement, empty if no path to align dictionary)\n align_dict = utils.load_align_dict(args.replace_unk)\n\n max_positions = utils.resolve_max_positions(\n task.max_positions(),\n *[model.max_positions() for model in models]\n )\n\n # if args.buffer_size > 1:\n # print('| Sentence buffer size:', args.buffer_size)\n # print('| Type the input sentence and press return:')\n start_id = 0\n audio_root_path=\"/search/odin/haiyang/fairseq_exp/e2e_trans/fairseq/examples/speech_recognition/datasets/zh_asr_data/train/train2\"\n with open(args.input)as inp:\n input = inp.readline().strip()\n while input:\n print()\n audio_path=audio_root_path+\"/\"+input.split(\" \")[0]+\".wav\"\n inputs = [\" \".join(input.split(\" \")[1:])]\n results = []\n for batch in make_batches(inputs, args, task, max_positions, encode_fn):\n src_tokens = batch.src_tokens\n src_lengths = batch.src_lengths\n if use_cuda:\n src_tokens = src_tokens.cuda()\n src_lengths = src_lengths.cuda()\n if args.task==\"translation\":\n sample = {\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n },\n }\n else:\n if not os.path.exists(audio_path):\n raise FileNotFoundError(\"Audio file not found: {}\".format(audio_path))\n sound, sample_rate = torchaudio.load_wav(audio_path)\n num_mel_bins ,frame_length,frame_shift= 80,25.0,10.0\n\n output = kaldi.fbank(\n sound,\n num_mel_bins=num_mel_bins,\n frame_length=frame_length,\n frame_shift=frame_shift,\n dither=0.0,\n energy_floor=1.0\n )\n\n frames = data_utils.apply_mv_norm(output).detach()[None,:,:].type(torch.cuda.FloatTensor)\n # print(output_cmvn)\n # frames = collate_frames(output_cmvn)\n # sort samples by descending number of frames\n # frames_lengths = torch.cuda.LongTensor(frames.size()[1])\n frames_lengths = torch.LongTensor([s.size(0) for s in frames])\n\n sample = {\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n \"audio\": frames, \"audio_lengths\": frames_lengths\n },}\n translations = task.inference_step(generator, models, sample)\n for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):\n src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())\n results.append((start_id + id, src_tokens_i, hypos))\n\n # sort output to match input order\n for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):\n if src_dict is not None:\n src_str = src_dict.string(src_tokens, args.remove_bpe)\n print('S-{}\\t{}'.format(id, src_str))\n\n # Process top predictions\n for hypo in hypos[:min(len(hypos), args.nbest)]:\n hypo_tokens, hypo_str, alignment = utils.post_process_prediction(\n hypo_tokens=hypo['tokens'].int().cpu(),\n src_str=src_str,\n alignment=hypo['alignment'],\n align_dict=align_dict,\n tgt_dict=tgt_dict,\n remove_bpe=args.remove_bpe,\n )\n hypo_str = decode_fn(hypo_str)\n print('H-{}\\t{}'.format(id, hypo_str))\n\n # print('H-{}\\t{}\\t{}'.format(id, hypo['score'], hypo_str))\n # print('P-{}\\t{}'.format(\n # id,\n # ' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))\n # ))\n if args.print_alignment:\n alignment_str = \" \".join([\"{}-{}\".format(src, tgt) for src, tgt in alignment])\n print('A-{}\\t{}'.format(\n id,\n alignment_str\n ))\n input = inp.readline().strip()\n\n # update running id counter\n start_id += len(inputs)\n\n\ndef cli_main():\n parser = options.get_generation_parser(interactive=True)\n args = options.parse_args_and_arch(parser)\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n",
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#\n# xuehaiyang: [email protected]\n#\n\n\"\"\"\nConvert pt model to npz\n\"\"\"\nimport torch\nimport json\nimport numpy as np\nimport sys\n\nif len(sys.argv) != 2:\n sys.stderr.write('usage: %s + pt_path ' % __file__)\n print()\n sys.exit(-1)\n\npt_path = sys.argv[1]\n\nonly_in_pt = []\nmodel_dict = torch.load(pt_path)\n\nfor item in model_dict[\"model\"].items():\n if \"encoder.transformer_layers.1.fc1.weight\" in item[0]:\n print(item[0] + \" \" + str(item[1]))\n\n # print(item[0] + \" \" + str(list(item[1].size())))\nprint(\"size: \"+str(len(model_dict[\"model\"].items())))\n\n\n"
] |
[
[
"torch.cuda.is_available"
],
[
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
endrjuskr/u8timeseries
|
[
"edf167815e7c7931fe491207f831e88203589883"
] |
[
"u8timeseries/tests/test_timeseries.py"
] |
[
"import unittest\nimport pandas as pd\nimport numpy as np\n\nfrom ..timeseries import TimeSeries\n\n\nclass TimeSeriesTestCase(unittest.TestCase):\n\n times = pd.date_range('20130101', '20130110')\n pd_series1 = pd.Series(range(10), index=times)\n pd_series2 = pd.Series(range(5, 15), index=times)\n pd_series3 = pd.Series(range(15, 25), index=times)\n series1: TimeSeries = TimeSeries(pd_series1)\n series2: TimeSeries = TimeSeries(pd_series1, pd_series2, pd_series3)\n series3: TimeSeries = TimeSeries(pd_series2)\n\n def test_creation(self):\n with self.assertRaises(ValueError):\n # Index is dateTimeIndex\n TimeSeries(pd.Series(range(10), range(10)))\n\n with self.assertRaises(ValueError):\n # Conf interval must be same length as main series\n pd_lo = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))\n TimeSeries(self.pd_series1, pd_lo)\n\n with self.assertRaises(ValueError):\n # Conf interval must have same time index as main series\n pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))\n TimeSeries(self.pd_series1, pd_lo)\n\n with self.assertRaises(ValueError):\n # Conf interval must be same length as main series\n pd_hi = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))\n TimeSeries(self.pd_series1, None, pd_hi)\n\n with self.assertRaises(ValueError):\n # Conf interval must have same time index as main series\n pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))\n TimeSeries(self.pd_series1, None, pd_lo)\n\n with self.assertRaises(ValueError):\n # Main series cannot have date holes\n range_ = pd.date_range('20130101', '20130104').append(pd.date_range('20130106', '20130110'))\n TimeSeries(pd.Series(range(9), index=range_))\n\n series_test = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)\n\n self.assertTrue(series_test.pd_series().equals(self.pd_series1))\n self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))\n self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))\n\n def test_alt_creation(self):\n with self.assertRaises(ValueError):\n # Series cannot be lower than three\n index = pd.date_range('20130101', '20130102')\n TimeSeries.from_times_and_values(index, self.pd_series1.values[:2])\n with self.assertRaises(ValueError):\n # all array must have same length\n TimeSeries.from_times_and_values(self.pd_series1.index,\n self.pd_series1.values[:-1],\n self.pd_series2[:-2],\n self.pd_series3[:-1])\n\n # test if reordering is correct\n rand_perm = np.random.permutation(range(1, 11))\n index = pd.to_datetime(['201301{:02d}'.format(i) for i in rand_perm])\n series_test = TimeSeries.from_times_and_values(index, self.pd_series1.values[rand_perm-1],\n self.pd_series2[rand_perm-1],\n self.pd_series3[rand_perm-1].tolist())\n\n self.assertTrue(series_test.start_time() == pd.to_datetime('20130101'))\n self.assertTrue(series_test.end_time() == pd.to_datetime('20130110'))\n self.assertTrue(series_test.pd_series().equals(self.pd_series1))\n self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))\n self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))\n self.assertTrue(series_test.freq() == self.series1.freq())\n\n # TODO test over to_dataframe when multiple features choice is decided\n\n def test_eq(self):\n seriesA: TimeSeries = TimeSeries(self.pd_series1)\n self.assertTrue(self.series1 == seriesA)\n\n # with a defined CI\n seriesB: TimeSeries = TimeSeries(self.pd_series1,\n confidence_hi=pd.Series(range(10, 20),\n index=pd.date_range('20130101', '20130110')))\n self.assertFalse(self.series1 == seriesB)\n self.assertTrue(self.series1 != seriesB)\n\n # with different dates\n seriesC = TimeSeries(pd.Series(range(10), index=pd.date_range('20130102', '20130111')))\n self.assertFalse(self.series1 == seriesC)\n\n # compare with both CI\n seriesD: TimeSeries = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)\n seriesE: TimeSeries = TimeSeries(self.pd_series1, self.pd_series3, self.pd_series2)\n self.assertTrue(self.series2 == seriesD)\n self.assertFalse(self.series2 == seriesE)\n\n def test_dates(self):\n self.assertEqual(self.series1.start_time(), pd.Timestamp('20130101'))\n self.assertEqual(self.series1.end_time(), pd.Timestamp('20130110'))\n self.assertEqual(self.series1.duration(), pd.Timedelta(days=9))\n\n def test_slice(self):\n # base case\n seriesA = self.series1.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))\n self.assertEqual(seriesA.start_time(), pd.Timestamp('20130104'))\n self.assertEqual(seriesA.end_time(), pd.Timestamp('20130107'))\n\n # time stamp not in series\n seriesB = self.series1.slice(pd.Timestamp('20130104 12:00:00'), pd.Timestamp('20130107'))\n self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))\n self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))\n\n # end timestamp after series\n seriesC = self.series1.slice(pd.Timestamp('20130108'), pd.Timestamp('20130201'))\n self.assertEqual(seriesC.start_time(), pd.Timestamp('20130108'))\n self.assertEqual(seriesC.end_time(), pd.Timestamp('20130110'))\n\n # n points, base case\n seriesD = self.series1.slice_n_points_after(pd.Timestamp('20130102'), n=3)\n self.assertEqual(seriesD.start_time(), pd.Timestamp('20130102'))\n self.assertTrue(len(seriesD.values()) == 3)\n self.assertEqual(seriesD.end_time(), pd.Timestamp('20130104'))\n\n seriesE = self.series1.slice_n_points_after(pd.Timestamp('20130107 12:00:10'), n=10)\n self.assertEqual(seriesE.start_time(), pd.Timestamp('20130108'))\n self.assertEqual(seriesE.end_time(), pd.Timestamp('20130110'))\n\n seriesF = self.series1.slice_n_points_before(pd.Timestamp('20130105'), n=3)\n self.assertEqual(seriesF.end_time(), pd.Timestamp('20130105'))\n self.assertTrue(len(seriesF.values()) == 3)\n self.assertEqual(seriesF.start_time(), pd.Timestamp('20130103'))\n\n seriesG = self.series1.slice_n_points_before(pd.Timestamp('20130107 12:00:10'), n=10)\n self.assertEqual(seriesG.start_time(), pd.Timestamp('20130101'))\n self.assertEqual(seriesG.end_time(), pd.Timestamp('20130107'))\n\n # with CI\n seriesH = self.series2.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))\n self.assertEqual(seriesH.conf_lo_pd_series().index[0], pd.Timestamp('20130104'))\n self.assertEqual(seriesH.conf_lo_pd_series().index[-1], pd.Timestamp('20130107'))\n self.assertEqual(seriesH.conf_hi_pd_series().index[0], pd.Timestamp('20130104'))\n self.assertEqual(seriesH.conf_hi_pd_series().index[-1], pd.Timestamp('20130107'))\n\n def test_split(self):\n seriesA, seriesB = self.series1.split_after(pd.Timestamp('20130104'))\n self.assertEqual(seriesA.end_time(), pd.Timestamp('20130104'))\n self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))\n\n seriesC, seriesD = self.series1.split_before(pd.Timestamp('20130104'))\n self.assertEqual(seriesC.end_time(), pd.Timestamp('20130103'))\n self.assertEqual(seriesD.start_time(), pd.Timestamp('20130104'))\n\n self.assertEqual(self.series1.freq_str(), seriesA.freq_str())\n self.assertEqual(self.series1.freq_str(), seriesC.freq_str())\n\n def test_drop(self):\n seriesA = self.series1.drop_after(pd.Timestamp('20130105'))\n self.assertEqual(seriesA.end_time(), pd.Timestamp('20130105') - self.series1.freq())\n self.assertTrue(np.all(seriesA.time_index() < pd.Timestamp('20130105')))\n\n seriesB = self.series1.drop_before(pd.Timestamp('20130105'))\n self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105') + self.series1.freq())\n self.assertTrue(np.all(seriesB.time_index() > pd.Timestamp('20130105')))\n\n self.assertEqual(self.series1.freq_str(), seriesA.freq_str())\n self.assertEqual(self.series1.freq_str(), seriesB.freq_str())\n\n def test_intersect(self):\n seriesA = TimeSeries(pd.Series(range(2, 8), index=pd.date_range('20130102', '20130107')))\n\n seriesB = self.series1.slice_intersect(seriesA)\n self.assertEqual(seriesB.start_time(), pd.Timestamp('20130102'))\n self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))\n\n # The same, with CI\n seriesC = self.series2.slice_intersect(seriesA)\n self.assertEqual(seriesC.conf_lo_pd_series().index[0], pd.Timestamp('20130102'))\n self.assertEqual(seriesC.conf_hi_pd_series().index[-1], pd.Timestamp('20130107'))\n\n # Outside of range\n seriesD = self.series1.slice_intersect(TimeSeries(pd.Series(range(6, 13),\n index=pd.date_range('20130106', '20130112'))))\n self.assertEqual(seriesD.start_time(), pd.Timestamp('20130106'))\n self.assertEqual(seriesD.end_time(), pd.Timestamp('20130110'))\n\n # No intersect or too small intersect\n with self.assertRaises(ValueError):\n self.series1.slice_intersect(TimeSeries(pd.Series(range(6, 13),\n index=pd.date_range('20130116', '20130122'))))\n with self.assertRaises(ValueError):\n self.series1.slice_intersect(TimeSeries(pd.Series(range(9, 13),\n index=pd.date_range('20130109', '20130112'))))\n\n def test_rescale(self):\n with self.assertRaises(ValueError):\n self.series1.rescale_with_value(1)\n\n seriesA = self.series3.rescale_with_value(0)\n self.assertTrue(np.all(seriesA.values() == 0))\n\n seriesB = self.series3.rescale_with_value(-5)\n self.assertTrue(self.series3 * -1. == seriesB)\n\n seriesC = self.series3.rescale_with_value(1)\n self.assertTrue(self.series3 * 0.2 == seriesC)\n\n seriesD = self.series3.rescale_with_value(1e+20) # TODO: test will fail if value > 1e24 due to num imprecision\n self.assertTrue(self.series3 * 0.2e+20 == seriesD)\n\n def test_shift(self):\n seriesA = self.series1.shift(0)\n self.assertTrue(seriesA == self.series1)\n\n seriesB = self.series1.shift(1)\n self.assertTrue(seriesB.time_index().equals(\n self.series1.time_index()[1:].append(pd.DatetimeIndex([self.series1.time_index()[-1] +\n self.series1.freq()]))))\n\n seriesC = self.series1.shift(-1)\n self.assertTrue(seriesC.time_index().equals(\n pd.DatetimeIndex([self.series1.time_index()[0] - self.series1.freq()]).append(\n self.series1.time_index()[:-1])))\n\n with self.assertRaises(OverflowError):\n self.series1.shift(1e+6)\n\n seriesM = TimeSeries.from_times_and_values(pd.date_range('20130101', '20130601', freq='m'), range(5))\n with self.assertRaises(OverflowError):\n seriesM.shift(1e+4)\n\n def test_append(self):\n # reconstruct series\n seriesA, seriesB = self.series1.split_after(pd.Timestamp('20130106'))\n self.assertEqual(seriesA.append(seriesB), self.series1)\n self.assertEqual(seriesA.append(seriesB).freq(), self.series1.freq())\n\n # Creating a gap is not allowed\n seriesC = self.series1.drop_before(pd.Timestamp('20130107'))\n with self.assertRaises(ValueError):\n seriesA.append(seriesC)\n\n # Changing frequence is not allowed\n seriesM = TimeSeries.from_times_and_values(pd.date_range('20130107', '20130507', freq='30D'), range(5))\n with self.assertRaises(ValueError):\n seriesA.append(seriesM)\n\n # reconstruction with CI\n seriesD, seriesE = self.series2.split_after(pd.Timestamp('20130106'))\n self.assertEqual(seriesD.append(seriesE), self.series2)\n self.assertEqual(seriesD.append(seriesE).freq(), self.series2.freq())\n\n def test_append_values(self):\n # reconstruct series\n seriesA, seriesB = self.series1.split_after(pd.Timestamp('20130106'))\n self.assertEqual(seriesA.append_values(seriesB.values(), seriesB.time_index()), self.series1)\n self.assertEqual(seriesA.append_values(seriesB.values()), self.series1)\n\n # same with CI\n seriesC, seriesD = self.series2.split_after(pd.Timestamp('20130106'))\n self.assertEqual(seriesC.append_values(seriesD.values(), seriesD.time_index(),\n seriesD.conf_lo_pd_series().values,\n seriesD.conf_hi_pd_series().values), self.series2)\n\n # add only few element\n self.assertEqual(self.series1.drop_after(pd.Timestamp('20130110')).append_values([9]), self.series1)\n self.assertEqual(seriesA.append_values([]), seriesA)\n\n # randomize order\n rd_order = np.random.permutation(range(len(seriesB.values())))\n self.assertEqual(seriesA.append_values(seriesB.values()[rd_order], seriesB.time_index()[rd_order]),\n self.series1)\n\n # add non consecutive index\n with self.assertRaises(ValueError):\n self.assertEqual(seriesA.append_values(seriesB.values(), seriesB.time_index()+seriesB.freq()), self.series1)\n\n # add existing indices\n with self.assertRaises(ValueError):\n self.assertEqual(seriesA.append_values(seriesB.values(), seriesB.time_index()-3*seriesB.freq()), self.series1)\n\n # other frequency\n with self.assertRaises(ValueError):\n self.assertEqual(seriesA.append_values(seriesB.values(), pd.date_range('20130107', '20130113', freq='2d')),\n self.series1)\n\n def test_update(self):\n seriesA: TimeSeries = TimeSeries.from_times_and_values(self.times, [0, 1, 1, 3, 4, 5, 6, 2, 8, 0])\n seriesB: TimeSeries = TimeSeries.from_times_and_values(self.times, range(10),\n [5, 1, 7, 3, 9, 5, 11, 2, 13, 14],\n [15, 16, 1, 18, 4, 20, 6, 22, 8, 24])\n # change nothing\n seriesC = self.series1.copy()\n with self.assertRaises(ValueError):\n seriesA.update(self.times)\n seriesC.update(self.times, range(10))\n self.assertEqual(seriesC, self.series1)\n\n # different len\n with self.assertRaises(ValueError):\n seriesA.update(self.times, [], None, None)\n with self.assertRaises(ValueError):\n seriesA.update(self.times, None, np.arange(3), None)\n with self.assertRaises(ValueError):\n seriesA.update(self.times, None, None, np.arange(4))\n\n # change outside\n seriesC = seriesA.copy()\n with self.assertRaises(ValueError):\n seriesC.update(self.times+100*seriesC.freq(), range(10))\n seriesC.update(self.times.append(pd.date_range('20140101', '20140110')), list(range(10))+[0]*10)\n self.assertEqual(seriesC, self.series1)\n\n # change random\n seriesC = seriesA.copy()\n seriesC.update(pd.DatetimeIndex(['20130108', '20130110', '20130103']), [7, 9, 2])\n self.assertEqual(seriesC, self.series1)\n\n # change one of each series\n seriesD = seriesB.copy()\n seriesD.update(self.times, seriesA.pd_series())\n seriesA.update(pd.DatetimeIndex(['20130103', '20130108', '20130110']), [2, 7, 9])\n self.assertEqual(seriesA, self.series1)\n seriesB.update(self.times[::2], conf_hi=range(15, 25, 2))\n self.assertTrue(seriesB.conf_hi_pd_series().equals(self.series2.conf_hi_pd_series()))\n self.assertNotEqual(seriesB, self.series2)\n seriesB.update(self.times[1::2], conf_lo=range(6, 15, 2))\n self.assertEqual(seriesB, self.series2)\n\n # use nan to update all series altogether\n new_series = np.empty(10)\n new_series[:] = np.nan\n new_series[[2, 7, 9]] = [2, 7, 9]\n new_lo = np.empty(10)\n new_lo[:] = np.nan\n new_lo[1::2] = np.arange(6, 15, 2)\n new_hi = np.empty(10)\n new_hi[:] = np.nan\n new_hi[::2] = np.arange(15, 25, 2)\n seriesD.update(self.times, new_series, new_lo, new_hi)\n self.assertEqual(seriesD, self.series2)\n\n # raise error when update missing CI\n with self.assertRaises(AttributeError):\n self.series1.update(self.times, conf_lo=range(5, 15))\n\n def test_drop_values(self):\n seriesA = self.series1.append_values([1])\n self.assertEqual(seriesA.drop_values(pd.Timestamp('20130111'), inplace=False), self.series1)\n seriesA.drop_values(pd.Timestamp('20130111'))\n self.assertEqual(seriesA, self.series1)\n\n with self.assertRaises(KeyError):\n seriesA.drop_values(pd.Timestamp('20130112'))\n\n def test_ops(self):\n seriesA = TimeSeries(pd.Series([2 for _ in range(10)], index=self.pd_series1.index))\n targetAdd = TimeSeries(pd.Series(range(2, 12), index=self.pd_series1.index))\n targetSub = TimeSeries(pd.Series(range(-2, 8), index=self.pd_series1.index))\n targetMul = TimeSeries(pd.Series(range(0, 20, 2), index=self.pd_series1.index))\n targetDiv = TimeSeries(pd.Series([i / 2 for i in range(10)], index=self.pd_series1.index))\n targetPow = TimeSeries(pd.Series([float(i ** 2) for i in range(10)], index=self.pd_series1.index))\n\n self.assertEqual(self.series1 + seriesA, targetAdd)\n self.assertEqual(self.series1 + 2, targetAdd)\n self.assertEqual(2 + self.series1, targetAdd)\n self.assertEqual(self.series1 - seriesA, targetSub)\n self.assertEqual(self.series1 - 2, targetSub)\n self.assertEqual(self.series1 * seriesA, targetMul)\n self.assertEqual(self.series1 * 2, targetMul)\n self.assertEqual(2 * self.series1, targetMul)\n self.assertEqual(self.series1 / seriesA, targetDiv)\n self.assertEqual(self.series1 / 2, targetDiv)\n self.assertEqual(self.series1 ** 2, targetPow)\n\n with self.assertRaises(ZeroDivisionError):\n # Cannot divide by a TimeSeries with a value 0.\n self.series1 / self.series1\n\n with self.assertRaises(ZeroDivisionError):\n # Cannot divide by 0.\n self.series1 / 0\n\n def test_getitem(self):\n seriesA: TimeSeries = self.series1.drop_after(pd.Timestamp(\"20130105\"))\n self.assertEqual(self.series1[pd.date_range('20130101', ' 20130104')], seriesA)\n self.assertEqual(self.series1[:4], seriesA)\n self.assertTrue(self.series1[pd.Timestamp('20130101')].equals(self.series1.pd_series()[:1]))\n self.assertEqual(self.series1[pd.Timestamp('20130101'):pd.Timestamp('20130105')], seriesA)\n\n with self.assertRaises(IndexError):\n self.series1[pd.date_range('19990101', '19990201')]\n\n with self.assertRaises(KeyError):\n self.series1['19990101']\n\n with self.assertRaises(IndexError):\n self.series1[::-1]\n\n\n"
] |
[
[
"pandas.to_datetime",
"numpy.arange",
"pandas.Timedelta",
"pandas.DatetimeIndex",
"pandas.date_range",
"pandas.Timestamp",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/pandas
|
[
"b09b84e8e0baf89e78b618cdda30af11087d2e4a"
] |
[
"pandas/tests/test_index.py"
] |
[
"# -*- coding: utf-8 -*-\n# pylint: disable=E1101,E1103,W0232\n\nfrom datetime import datetime, timedelta, time\nfrom pandas.compat import range, lrange, lzip, u, zip, PY3\nimport operator\nimport re\nimport nose\nimport warnings\nimport os\n\nimport numpy as np\n\nfrom pandas import (period_range, date_range, Categorical, Series,\n Index, Float64Index, Int64Index, MultiIndex,\n CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex)\nfrom pandas.core.index import InvalidIndexError, NumericIndex\nfrom pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,\n assert_copy)\nfrom pandas import compat\nfrom pandas.compat import long, is_platform_windows\n\nimport pandas.util.testing as tm\nimport pandas.core.config as cf\n\nfrom pandas.tseries.index import _to_m8\nimport pandas.tseries.offsets as offsets\n\nimport pandas as pd\nfrom pandas.lib import Timestamp\nfrom itertools import product\n\n\nclass Base(object):\n \"\"\" base class for index sub-class tests \"\"\"\n _holder = None\n _compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']\n\n def setup_indices(self):\n # setup the test indices in the self.indicies dict\n for name, ind in self.indices.items():\n setattr(self, name, ind)\n\n def verify_pickle(self,index):\n unpickled = self.round_trip_pickle(index)\n self.assertTrue(index.equals(unpickled))\n\n def test_pickle_compat_construction(self):\n # this is testing for pickle compat\n if self._holder is None:\n return\n\n # need an object to create with\n self.assertRaises(TypeError, self._holder)\n\n def test_numeric_compat(self):\n\n idx = self.create_index()\n tm.assertRaisesRegexp(TypeError,\n \"cannot perform __mul__\",\n lambda : idx * 1)\n tm.assertRaisesRegexp(TypeError,\n \"cannot perform __mul__\",\n lambda : 1 * idx)\n\n div_err = \"cannot perform __truediv__\" if compat.PY3 else \"cannot perform __div__\"\n tm.assertRaisesRegexp(TypeError,\n div_err,\n lambda : idx / 1)\n tm.assertRaisesRegexp(TypeError,\n div_err,\n lambda : 1 / idx)\n tm.assertRaisesRegexp(TypeError,\n \"cannot perform __floordiv__\",\n lambda : idx // 1)\n tm.assertRaisesRegexp(TypeError,\n \"cannot perform __floordiv__\",\n lambda : 1 // idx)\n\n def test_logical_compat(self):\n idx = self.create_index()\n tm.assertRaisesRegexp(TypeError,\n 'cannot perform all',\n lambda : idx.all())\n tm.assertRaisesRegexp(TypeError,\n 'cannot perform any',\n lambda : idx.any())\n\n def test_boolean_context_compat(self):\n\n # boolean context compat\n idx = self.create_index()\n def f():\n if idx:\n pass\n tm.assertRaisesRegexp(ValueError,'The truth value of a',f)\n\n def test_reindex_base(self):\n idx = self.create_index()\n expected = np.arange(idx.size)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):\n idx.get_indexer(idx, method='invalid')\n\n def test_ndarray_compat_properties(self):\n\n idx = self.create_index()\n self.assertTrue(idx.T.equals(idx))\n self.assertTrue(idx.transpose().equals(idx))\n\n values = idx.values\n for prop in self._compat_props:\n self.assertEqual(getattr(idx, prop), getattr(values, prop))\n\n # test for validity\n idx.nbytes\n idx.values.nbytes\n\n def test_repr_roundtrip(self):\n\n idx = self.create_index()\n tm.assert_index_equal(eval(repr(idx)),idx)\n\n def test_str(self):\n\n # test the string repr\n idx = self.create_index()\n idx.name = 'foo'\n self.assertTrue(\"'foo'\" in str(idx))\n self.assertTrue(idx.__class__.__name__ in str(idx))\n\n def test_dtype_str(self):\n for idx in self.indices.values():\n dtype = idx.dtype_str\n self.assertIsInstance(dtype, compat.string_types)\n if isinstance(idx, PeriodIndex):\n self.assertEqual(dtype, 'period')\n else:\n self.assertEqual(dtype, str(idx.dtype))\n\n def test_repr_max_seq_item_setting(self):\n # GH10182\n idx = self.create_index()\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n self.assertFalse('...' in str(idx))\n\n def test_wrong_number_names(self):\n def testit(ind):\n ind.names = [\"apple\", \"banana\", \"carrot\"]\n\n for ind in self.indices.values():\n assertRaisesRegexp(ValueError, \"^Length\", testit, ind)\n\n def test_set_name_methods(self):\n new_name = \"This is the new name for this index\"\n for ind in self.indices.values():\n\n # don't tests a MultiIndex here (as its tested separated)\n if isinstance(ind, MultiIndex):\n continue\n\n original_name = ind.name\n new_ind = ind.set_names([new_name])\n self.assertEqual(new_ind.name, new_name)\n self.assertEqual(ind.name, original_name)\n res = ind.rename(new_name, inplace=True)\n\n # should return None\n self.assertIsNone(res)\n self.assertEqual(ind.name, new_name)\n self.assertEqual(ind.names, [new_name])\n #with assertRaisesRegexp(TypeError, \"list-like\"):\n # # should still fail even if it would be the right length\n # ind.set_names(\"a\")\n with assertRaisesRegexp(ValueError, \"Level must be None\"):\n ind.set_names(\"a\", level=0)\n\n # rename in place just leaves tuples and other containers alone\n name = ('A', 'B')\n ind.rename(name, inplace=True)\n self.assertEqual(ind.name, name)\n self.assertEqual(ind.names, [name])\n\n def test_hash_error(self):\n for ind in self.indices.values():\n with tm.assertRaisesRegexp(TypeError,\n \"unhashable type: %r\" %\n type(ind).__name__):\n hash(ind)\n\n def test_copy_and_deepcopy(self):\n from copy import copy, deepcopy\n\n for ind in self.indices.values():\n\n # don't tests a MultiIndex here (as its tested separated)\n if isinstance(ind, MultiIndex):\n continue\n\n for func in (copy, deepcopy):\n idx_copy = func(ind)\n self.assertIsNot(idx_copy, ind)\n self.assertTrue(idx_copy.equals(ind))\n\n new_copy = ind.copy(deep=True, name=\"banana\")\n self.assertEqual(new_copy.name, \"banana\")\n\n def test_duplicates(self):\n for ind in self.indices.values():\n\n if not len(ind):\n continue\n if isinstance(ind, MultiIndex):\n continue\n idx = self._holder([ind[0]]*5)\n self.assertFalse(idx.is_unique)\n self.assertTrue(idx.has_duplicates)\n\n # GH 10115\n # preserve names\n idx.name = 'foo'\n result = idx.drop_duplicates()\n self.assertEqual(result.name, 'foo')\n self.assert_index_equal(result, Index([ind[0]],name='foo'))\n\n def test_sort(self):\n for ind in self.indices.values():\n self.assertRaises(TypeError, ind.sort)\n\n def test_order(self):\n for ind in self.indices.values():\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n ind.order()\n\n def test_mutability(self):\n for ind in self.indices.values():\n if not len(ind):\n continue\n self.assertRaises(TypeError, ind.__setitem__, 0, ind[0])\n\n def test_view(self):\n for ind in self.indices.values():\n i_view = ind.view()\n self.assertEqual(i_view.name, ind.name)\n\n def test_compat(self):\n for ind in self.indices.values():\n self.assertEqual(ind.tolist(),list(ind))\n\n def test_argsort(self):\n for k, ind in self.indices.items():\n\n # sep teststed\n if k in ['catIndex']:\n continue\n\n result = ind.argsort()\n expected = np.array(ind).argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_pickle(self):\n for ind in self.indices.values():\n self.verify_pickle(ind)\n ind.name = 'foo'\n self.verify_pickle(ind)\n\n def test_take(self):\n indexer = [4, 3, 0, 2]\n for k, ind in self.indices.items():\n\n # separate\n if k in ['boolIndex','tuples','empty']:\n continue\n\n result = ind.take(indexer)\n expected = ind[indexer]\n self.assertTrue(result.equals(expected))\n\n if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n with tm.assertRaises(AttributeError):\n ind.freq\n\n def test_setops_errorcases(self):\n for name, idx in compat.iteritems(self.indices):\n # # non-iterable input\n cases = [0.5, 'xxx']\n methods = [idx.intersection, idx.union, idx.difference, idx.sym_diff]\n\n for method in methods:\n for case in cases:\n assertRaisesRegexp(TypeError,\n \"Input must be Index or array-like\",\n method, case)\n\n def test_intersection_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[:5]\n second = idx[:3]\n intersect = first.intersection(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n self.assertTrue(tm.equalContents(intersect, second))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.intersection(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.intersection(case)\n self.assertTrue(tm.equalContents(result, second))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.intersection([1, 2, 3])\n\n def test_union_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[3:]\n second = idx[:5]\n everything = idx\n union = first.union(second)\n self.assertTrue(tm.equalContents(union, everything))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.union(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.union(case)\n self.assertTrue(tm.equalContents(result, everything))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.union([1, 2, 3])\n\n def test_difference_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[2:]\n second = idx[:4]\n answer = idx[4:]\n result = first.difference(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n self.assertTrue(tm.equalContents(result, answer))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):\n self.assertEqual(result.__class__, answer.__class__)\n tm.assert_numpy_array_equal(result.asi8, answer.asi8)\n else:\n result = first.difference(case)\n self.assertTrue(tm.equalContents(result, answer))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.difference([1, 2, 3])\n\n def test_symmetric_diff(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[1:]\n second = idx[:-1]\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n answer = idx[[0, -1]]\n result = first.sym_diff(second)\n self.assertTrue(tm.equalContents(result, answer))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n result = first.sym_diff(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.sym_diff(case)\n self.assertTrue(tm.equalContents(result, answer))\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assertRaisesRegexp(TypeError, msg):\n result = first.sym_diff([1, 2, 3])\n\n def test_insert_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n result = idx[1:4]\n\n if not len(idx):\n continue\n\n #test 0th element\n self.assertTrue(idx[0:4].equals(\n result.insert(0, idx[0])))\n\n def test_delete_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n\n if not len(idx):\n continue\n\n expected = idx[1:]\n result = idx.delete(0)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.name, expected.name)\n\n expected = idx[:-1]\n result = idx.delete(-1)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.name, expected.name)\n\n with tm.assertRaises((IndexError, ValueError)):\n # either depending on numpy version\n result = idx.delete(len(idx))\n\n def test_equals_op(self):\n # GH9947, GH10637\n index_a = self.create_index()\n if isinstance(index_a, PeriodIndex):\n return\n\n n = len(index_a)\n index_b = index_a[0:-1]\n index_c = index_a[0:-1].append(index_a[-2:-1])\n index_d = index_a[0:1]\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == index_b\n expected1 = np.array([True] * n)\n expected2 = np.array([True] * (n - 1) + [False])\n tm.assert_numpy_array_equal(index_a == index_a, expected1)\n tm.assert_numpy_array_equal(index_a == index_c, expected2)\n\n # test comparisons with numpy arrays\n array_a = np.array(index_a)\n array_b = np.array(index_a[0:-1])\n array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))\n array_d = np.array(index_a[0:1])\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == array_b\n tm.assert_numpy_array_equal(index_a == array_a, expected1)\n tm.assert_numpy_array_equal(index_a == array_c, expected2)\n\n # test comparisons with Series\n series_a = Series(array_a)\n series_b = Series(array_b)\n series_c = Series(array_c)\n series_d = Series(array_d)\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == series_b\n tm.assert_numpy_array_equal(index_a == series_a, expected1)\n tm.assert_numpy_array_equal(index_a == series_c, expected2)\n\n # cases where length is 1 for one of them\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == index_d\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == series_d\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n index_a == array_d\n with tm.assertRaisesRegexp(ValueError, \"Series lengths must match\"):\n series_a == series_d\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n series_a == array_d\n\n # comparing with a scalar should broadcast; note that we are excluding\n # MultiIndex because in this case each item in the index is a tuple of\n # length 2, and therefore is considered an array of length 2 in the\n # comparison instead of a scalar\n if not isinstance(index_a, MultiIndex):\n expected3 = np.array([False] * (len(index_a) - 2) + [True, False])\n # assuming the 2nd to last item is unique in the data\n item = index_a[-2]\n tm.assert_numpy_array_equal(index_a == item, expected3)\n tm.assert_numpy_array_equal(series_a == item, expected3)\n\n def test_numpy_ufuncs(self):\n # test ufuncs of numpy 1.9.2. see:\n # http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n # some functions are skipped because it may return different result\n # for unicode input depending on numpy version\n\n for name, idx in compat.iteritems(self.indices):\n for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,\n np.log1p, np.sqrt, np.sin, np.cos,\n np.tan, np.arcsin, np.arccos, np.arctan,\n np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh,\n np.arctanh, np.deg2rad, np.rad2deg]:\n if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n # PeriodIndex behavior should be changed in future version\n with tm.assertRaises(Exception):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index)):\n # coerces to float (e.g. np.sin)\n result = func(idx)\n exp = Index(func(idx.values), name=idx.name)\n self.assert_index_equal(result, exp)\n self.assertIsInstance(result, pd.Float64Index)\n else:\n # raise AttributeError or TypeError\n if len(idx) == 0:\n continue\n else:\n with tm.assertRaises(Exception):\n func(idx)\n\n for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:\n if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n with tm.assertRaises(Exception):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index)):\n # results in bool array\n result = func(idx)\n exp = func(idx.values)\n self.assertIsInstance(result, np.ndarray)\n tm.assertNotIsInstance(result, Index)\n else:\n if len(idx) == 0:\n continue\n else:\n with tm.assertRaises(Exception):\n func(idx)\n\n\nclass TestIndex(Base, tm.TestCase):\n _holder = Index\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(\n unicodeIndex = tm.makeUnicodeIndex(100),\n strIndex = tm.makeStringIndex(100),\n dateIndex = tm.makeDateIndex(100),\n periodIndex = tm.makePeriodIndex(100),\n tdIndex = tm.makeTimedeltaIndex(100),\n intIndex = tm.makeIntIndex(100),\n floatIndex = tm.makeFloatIndex(100),\n boolIndex = Index([True,False]),\n catIndex = tm.makeCategoricalIndex(100),\n empty = Index([]),\n tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],\n [1, 2, 3]))\n )\n self.setup_indices()\n\n def create_index(self):\n return Index(list('abcde'))\n\n def test_new_axis(self):\n new_index = self.dateIndex[None, :]\n self.assertEqual(new_index.ndim, 2)\n tm.assertIsInstance(new_index, np.ndarray)\n\n def test_copy_and_deepcopy(self):\n super(TestIndex, self).test_copy_and_deepcopy()\n\n new_copy2 = self.intIndex.copy(dtype=int)\n self.assertEqual(new_copy2.dtype.kind, 'i')\n\n def test_constructor(self):\n # regular instance creation\n tm.assert_contains_all(self.strIndex, self.strIndex)\n tm.assert_contains_all(self.dateIndex, self.dateIndex)\n\n # casting\n arr = np.array(self.strIndex)\n index = Index(arr)\n tm.assert_contains_all(arr, index)\n tm.assert_numpy_array_equal(self.strIndex, index)\n\n # copy\n arr = np.array(self.strIndex)\n index = Index(arr, copy=True, name='name')\n tm.assertIsInstance(index, Index)\n self.assertEqual(index.name, 'name')\n tm.assert_numpy_array_equal(arr, index)\n arr[0] = \"SOMEBIGLONGSTRING\"\n self.assertNotEqual(index[0], \"SOMEBIGLONGSTRING\")\n\n # what to do here?\n # arr = np.array(5.)\n # self.assertRaises(Exception, arr.view, Index)\n\n def test_constructor_corner(self):\n # corner case\n self.assertRaises(TypeError, Index, 0)\n\n def test_construction_list_mixed_tuples(self):\n # 10697\n # if we are constructing from a mixed list of tuples, make sure that we\n # are independent of the sorting order\n idx1 = Index([('A',1),'B'])\n self.assertIsInstance(idx1, Index) and self.assertNotInstance(idx1, MultiIndex)\n idx2 = Index(['B',('A',1)])\n self.assertIsInstance(idx2, Index) and self.assertNotInstance(idx2, MultiIndex)\n\n def test_constructor_from_series(self):\n\n expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])\n s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])\n result = Index(s)\n self.assertTrue(result.equals(expected))\n result = DatetimeIndex(s)\n self.assertTrue(result.equals(expected))\n\n # GH 6273\n # create from a series, passing a freq\n s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))\n result = DatetimeIndex(s, freq='MS')\n expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')\n self.assertTrue(result.equals(expected))\n\n df = pd.DataFrame(np.random.rand(5,3))\n df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']\n result = DatetimeIndex(df['date'], freq='MS')\n self.assertTrue(result.equals(expected))\n self.assertEqual(df['date'].dtype, object)\n\n exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'], name='date')\n self.assert_series_equal(df['date'], exp)\n\n # GH 6274\n # infer freq of same\n result = pd.infer_freq(df['date'])\n self.assertEqual(result,'MS')\n\n def test_constructor_ndarray_like(self):\n # GH 5460#issuecomment-44474502\n # it should be possible to convert any object that satisfies the numpy\n # ndarray interface directly into an Index\n class ArrayLike(object):\n def __init__(self, array):\n self.array = array\n def __array__(self, dtype=None):\n return self.array\n\n for array in [np.arange(5),\n np.array(['a', 'b', 'c']),\n date_range('2000-01-01', periods=3).values]:\n expected = pd.Index(array)\n result = pd.Index(ArrayLike(array))\n self.assertTrue(result.equals(expected))\n\n def test_index_ctor_infer_periodindex(self):\n xp = period_range('2012-1-1', freq='M', periods=3)\n rs = Index(xp)\n tm.assert_numpy_array_equal(rs, xp)\n tm.assertIsInstance(rs, PeriodIndex)\n\n def test_constructor_simple_new(self):\n idx = Index([1, 2, 3, 4, 5], name='int')\n result = idx._simple_new(idx, 'int')\n self.assertTrue(result.equals(idx))\n\n idx = Index([1.1, np.nan, 2.2, 3.0], name='float')\n result = idx._simple_new(idx, 'float')\n self.assertTrue(result.equals(idx))\n\n idx = Index(['A', 'B', 'C', np.nan], name='obj')\n result = idx._simple_new(idx, 'obj')\n self.assertTrue(result.equals(idx))\n\n def test_constructor_dtypes(self):\n\n for idx in [Index(np.array([1, 2, 3], dtype=int)),\n Index(np.array([1, 2, 3], dtype=int), dtype=int),\n Index(np.array([1., 2., 3.], dtype=float), dtype=int),\n Index([1, 2, 3], dtype=int),\n Index([1., 2., 3.], dtype=int)]:\n self.assertIsInstance(idx, Int64Index)\n\n for idx in [Index(np.array([1., 2., 3.], dtype=float)),\n Index(np.array([1, 2, 3], dtype=int), dtype=float),\n Index(np.array([1., 2., 3.], dtype=float), dtype=float),\n Index([1, 2, 3], dtype=float),\n Index([1., 2., 3.], dtype=float)]:\n self.assertIsInstance(idx, Float64Index)\n\n for idx in [Index(np.array([True, False, True], dtype=bool)),\n Index([True, False, True]),\n Index(np.array([True, False, True], dtype=bool), dtype=bool),\n Index([True, False, True], dtype=bool)]:\n self.assertIsInstance(idx, Index)\n self.assertEqual(idx.dtype, object)\n\n for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'),\n Index([1, 2, 3], dtype='category'),\n Index(np.array([np.datetime64('2011-01-01'), np.datetime64('2011-01-02')]), dtype='category'),\n Index([datetime(2011, 1, 1), datetime(2011, 1, 2)], dtype='category')]:\n self.assertIsInstance(idx, CategoricalIndex)\n\n for idx in [Index(np.array([np.datetime64('2011-01-01'), np.datetime64('2011-01-02')])),\n Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]:\n self.assertIsInstance(idx, DatetimeIndex)\n\n for idx in [Index(np.array([np.datetime64('2011-01-01'), np.datetime64('2011-01-02')]), dtype=object),\n Index([datetime(2011, 1, 1), datetime(2011, 1, 2)], dtype=object)]:\n self.assertNotIsInstance(idx, DatetimeIndex)\n self.assertIsInstance(idx, Index)\n self.assertEqual(idx.dtype, object)\n\n for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')])),\n Index([timedelta(1), timedelta(1)])]:\n self.assertIsInstance(idx, TimedeltaIndex)\n\n for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]), dtype=object),\n Index([timedelta(1), timedelta(1)], dtype=object)]:\n self.assertNotIsInstance(idx, TimedeltaIndex)\n self.assertIsInstance(idx, Index)\n self.assertEqual(idx.dtype, object)\n\n def test_view_with_args(self):\n\n restricted = ['unicodeIndex','strIndex','catIndex','boolIndex','empty']\n\n for i in restricted:\n ind = self.indices[i]\n\n # with arguments\n self.assertRaises(TypeError, lambda : ind.view('i8'))\n\n # these are ok\n for i in list(set(self.indices.keys())-set(restricted)):\n ind = self.indices[i]\n\n # with arguments\n ind.view('i8')\n\n def test_legacy_pickle_identity(self):\n\n # GH 8431\n pth = tm.get_data_path()\n s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))\n s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))\n self.assertFalse(s1.index.identical(s2.index))\n self.assertFalse(s1.index.equals(s2.index))\n\n def test_astype(self):\n casted = self.intIndex.astype('i8')\n\n # it works!\n casted.get_loc(5)\n\n # pass on name\n self.intIndex.name = 'foobar'\n casted = self.intIndex.astype('i8')\n self.assertEqual(casted.name, 'foobar')\n\n def test_equals(self):\n # same\n self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))\n\n # different length\n self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))\n\n # same length, different values\n self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))\n\n # Must also be an Index\n self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))\n\n def test_insert(self):\n\n # GH 7256\n # validate neg/pos inserts\n result = Index(['b', 'c', 'd'])\n\n #test 0th element\n self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(\n result.insert(0, 'a')))\n\n #test Nth element that follows Python list behavior\n self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(\n result.insert(-1, 'e')))\n\n #test loc +/- neq (0, -1)\n self.assertTrue(result.insert(1, 'z').equals(\n result.insert(-2, 'z')))\n\n #test empty\n null_index = Index([])\n self.assertTrue(Index(['a']).equals(\n null_index.insert(0, 'a')))\n\n def test_delete(self):\n idx = Index(['a', 'b', 'c', 'd'], name='idx')\n\n expected = Index(['b', 'c', 'd'], name='idx')\n result = idx.delete(0)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.name, expected.name)\n\n expected = Index(['a', 'b', 'c'], name='idx')\n result = idx.delete(-1)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.name, expected.name)\n\n with tm.assertRaises((IndexError, ValueError)):\n # either depeidnig on numpy version\n result = idx.delete(5)\n\n def test_identical(self):\n\n # index\n i1 = Index(['a', 'b', 'c'])\n i2 = Index(['a', 'b', 'c'])\n\n self.assertTrue(i1.identical(i2))\n\n i1 = i1.rename('foo')\n self.assertTrue(i1.equals(i2))\n self.assertFalse(i1.identical(i2))\n\n i2 = i2.rename('foo')\n self.assertTrue(i1.identical(i2))\n\n i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])\n i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)\n self.assertFalse(i3.identical(i4))\n\n def test_is_(self):\n ind = Index(range(10))\n self.assertTrue(ind.is_(ind))\n self.assertTrue(ind.is_(ind.view().view().view().view()))\n self.assertFalse(ind.is_(Index(range(10))))\n self.assertFalse(ind.is_(ind.copy()))\n self.assertFalse(ind.is_(ind.copy(deep=False)))\n self.assertFalse(ind.is_(ind[:]))\n self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))\n self.assertFalse(ind.is_(np.array(range(10))))\n\n # quasi-implementation dependent\n self.assertTrue(ind.is_(ind.view()))\n ind2 = ind.view()\n ind2.name = 'bob'\n self.assertTrue(ind.is_(ind2))\n self.assertTrue(ind2.is_(ind))\n # doesn't matter if Indices are *actually* views of underlying data,\n self.assertFalse(ind.is_(Index(ind.values)))\n arr = np.array(range(1, 11))\n ind1 = Index(arr, copy=False)\n ind2 = Index(arr, copy=False)\n self.assertFalse(ind1.is_(ind2))\n\n def test_asof(self):\n d = self.dateIndex[0]\n self.assertEqual(self.dateIndex.asof(d), d)\n self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))\n\n d = self.dateIndex[-1]\n self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)\n\n d = self.dateIndex[0].to_datetime()\n tm.assertIsInstance(self.dateIndex.asof(d), Timestamp)\n\n def test_asof_datetime_partial(self):\n idx = pd.date_range('2010-01-01', periods=2, freq='m')\n expected = Timestamp('2010-02-28')\n result = idx.asof('2010-02')\n self.assertEqual(result, expected)\n self.assertFalse(isinstance(result, Index))\n\n def test_nanosecond_index_access(self):\n s = Series([Timestamp('20130101')]).values.view('i8')[0]\n r = DatetimeIndex([s + 50 + i for i in range(100)])\n x = Series(np.random.randn(100), index=r)\n\n first_value = x.asof(x.index[0])\n\n # this does not yet work, as parsing strings is done via dateutil\n #self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])\n\n self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])\n\n def test_comparators(self):\n index = self.dateIndex\n element = index[len(index) // 2]\n element = _to_m8(element)\n\n arr = np.array(index)\n\n def _check(op):\n arr_result = op(arr, element)\n index_result = op(index, element)\n\n self.assertIsInstance(index_result, np.ndarray)\n tm.assert_numpy_array_equal(arr_result, index_result)\n\n _check(operator.eq)\n _check(operator.ne)\n _check(operator.gt)\n _check(operator.lt)\n _check(operator.ge)\n _check(operator.le)\n\n def test_booleanindex(self):\n boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)\n boolIdx[5:30:2] = False\n\n subIndex = self.strIndex[boolIdx]\n\n for i, val in enumerate(subIndex):\n self.assertEqual(subIndex.get_loc(val), i)\n\n subIndex = self.strIndex[list(boolIdx)]\n for i, val in enumerate(subIndex):\n self.assertEqual(subIndex.get_loc(val), i)\n\n def test_fancy(self):\n sl = self.strIndex[[1, 2, 3]]\n for i in sl:\n self.assertEqual(i, sl[sl.get_loc(i)])\n\n def test_empty_fancy(self):\n empty_farr = np.array([], dtype=np.float_)\n empty_iarr = np.array([], dtype=np.int_)\n empty_barr = np.array([], dtype=np.bool_)\n\n # pd.DatetimeIndex is excluded, because it overrides getitem and should\n # be tested separately.\n for idx in [self.strIndex, self.intIndex, self.floatIndex]:\n empty_idx = idx.__class__([])\n values = idx.values\n\n self.assertTrue(idx[[]].identical(empty_idx))\n self.assertTrue(idx[empty_iarr].identical(empty_idx))\n self.assertTrue(idx[empty_barr].identical(empty_idx))\n\n # np.ndarray only accepts ndarray of int & bool dtypes, so should\n # Index.\n self.assertRaises(IndexError, idx.__getitem__, empty_farr)\n\n def test_getitem(self):\n arr = np.array(self.dateIndex)\n exp = self.dateIndex[5]\n exp = _to_m8(exp)\n\n self.assertEqual(exp, arr[5])\n\n def test_shift(self):\n shifted = self.dateIndex.shift(0, timedelta(1))\n self.assertIs(shifted, self.dateIndex)\n\n shifted = self.dateIndex.shift(5, timedelta(1))\n tm.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))\n\n shifted = self.dateIndex.shift(1, 'B')\n tm.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())\n\n shifted.name = 'shifted'\n self.assertEqual(shifted.name, shifted.shift(1, 'D').name)\n\n def test_intersection(self):\n first = self.strIndex[:20]\n second = self.strIndex[:10]\n intersect = first.intersection(second)\n self.assertTrue(tm.equalContents(intersect, second))\n\n # Corner cases\n inter = first.intersection(first)\n self.assertIs(inter, first)\n\n idx1 = Index([1, 2, 3, 4, 5], name='idx')\n # if target has the same name, it is preserved\n idx2 = Index([3, 4, 5, 6, 7], name='idx')\n expected2 = Index([3, 4, 5], name='idx')\n result2 = idx1.intersection(idx2)\n self.assertTrue(result2.equals(expected2))\n self.assertEqual(result2.name, expected2.name)\n\n # if target name is different, it will be reset\n idx3 = Index([3, 4, 5, 6, 7], name='other')\n expected3 = Index([3, 4, 5], name=None)\n result3 = idx1.intersection(idx3)\n self.assertTrue(result3.equals(expected3))\n self.assertEqual(result3.name, expected3.name)\n\n # non monotonic\n idx1 = Index([5, 3, 2, 4, 1], name='idx')\n idx2 = Index([4, 7, 6, 5, 3], name='idx')\n result2 = idx1.intersection(idx2)\n self.assertTrue(tm.equalContents(result2, expected2))\n self.assertEqual(result2.name, expected2.name)\n\n idx3 = Index([4, 7, 6, 5, 3], name='other')\n result3 = idx1.intersection(idx3)\n self.assertTrue(tm.equalContents(result3, expected3))\n self.assertEqual(result3.name, expected3.name)\n\n # non-monotonic non-unique\n idx1 = Index(['A','B','A','C'])\n idx2 = Index(['B','D'])\n expected = Index(['B'], dtype='object')\n result = idx1.intersection(idx2)\n self.assertTrue(result.equals(expected))\n\n def test_union(self):\n first = self.strIndex[5:20]\n second = self.strIndex[:10]\n everything = self.strIndex[:20]\n union = first.union(second)\n self.assertTrue(tm.equalContents(union, everything))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.union(case)\n self.assertTrue(tm.equalContents(result, everything))\n\n # Corner cases\n union = first.union(first)\n self.assertIs(union, first)\n\n union = first.union([])\n self.assertIs(union, first)\n\n union = Index([]).union(first)\n self.assertIs(union, first)\n\n # preserve names\n first.name = 'A'\n second.name = 'A'\n union = first.union(second)\n self.assertEqual(union.name, 'A')\n\n second.name = 'B'\n union = first.union(second)\n self.assertIsNone(union.name)\n\n def test_add(self):\n\n # - API change GH 8226\n with tm.assert_produces_warning():\n self.strIndex + self.strIndex\n with tm.assert_produces_warning():\n self.strIndex + self.strIndex.tolist()\n with tm.assert_produces_warning():\n self.strIndex.tolist() + self.strIndex\n\n with tm.assert_produces_warning(RuntimeWarning):\n firstCat = self.strIndex.union(self.dateIndex)\n secondCat = self.strIndex.union(self.strIndex)\n\n if self.dateIndex.dtype == np.object_:\n appended = np.append(self.strIndex, self.dateIndex)\n else:\n appended = np.append(self.strIndex, self.dateIndex.astype('O'))\n\n self.assertTrue(tm.equalContents(firstCat, appended))\n self.assertTrue(tm.equalContents(secondCat, self.strIndex))\n tm.assert_contains_all(self.strIndex, firstCat)\n tm.assert_contains_all(self.strIndex, secondCat)\n tm.assert_contains_all(self.dateIndex, firstCat)\n\n # test add and radd\n idx = Index(list('abc'))\n expected = Index(['a1', 'b1', 'c1'])\n self.assert_index_equal(idx + '1', expected)\n expected = Index(['1a', '1b', '1c'])\n self.assert_index_equal('1' + idx, expected)\n\n def test_append_multiple(self):\n index = Index(['a', 'b', 'c', 'd', 'e', 'f'])\n\n foos = [index[:2], index[2:4], index[4:]]\n result = foos[0].append(foos[1:])\n self.assertTrue(result.equals(index))\n\n # empty\n result = index.append([])\n self.assertTrue(result.equals(index))\n\n def test_append_empty_preserve_name(self):\n left = Index([], name='foo')\n right = Index([1, 2, 3], name='foo')\n\n result = left.append(right)\n self.assertEqual(result.name, 'foo')\n\n left = Index([], name='foo')\n right = Index([1, 2, 3], name='bar')\n\n result = left.append(right)\n self.assertIsNone(result.name)\n\n def test_add_string(self):\n # from bug report\n index = Index(['a', 'b', 'c'])\n index2 = index + 'foo'\n\n self.assertNotIn('a', index2)\n self.assertIn('afoo', index2)\n\n def test_iadd_string(self):\n index = pd.Index(['a', 'b', 'c'])\n # doesn't fail test unless there is a check before `+=`\n self.assertIn('a', index)\n\n index += '_x'\n self.assertIn('a_x', index)\n\n def test_difference(self):\n\n first = self.strIndex[5:20]\n second = self.strIndex[:10]\n answer = self.strIndex[10:20]\n first.name = 'name'\n # different names\n result = first.difference(second)\n\n self.assertTrue(tm.equalContents(result, answer))\n self.assertEqual(result.name, None)\n\n # same names\n second.name = 'name'\n result = first.difference(second)\n self.assertEqual(result.name, 'name')\n\n # with empty\n result = first.difference([])\n self.assertTrue(tm.equalContents(result, first))\n self.assertEqual(result.name, first.name)\n\n # with everythin\n result = first.difference(first)\n self.assertEqual(len(result), 0)\n self.assertEqual(result.name, first.name)\n\n def test_symmetric_diff(self):\n # smoke\n idx1 = Index([1, 2, 3, 4], name='idx1')\n idx2 = Index([2, 3, 4, 5])\n result = idx1.sym_diff(idx2)\n expected = Index([1, 5])\n self.assertTrue(tm.equalContents(result, expected))\n self.assertIsNone(result.name)\n\n # __xor__ syntax\n expected = idx1 ^ idx2\n self.assertTrue(tm.equalContents(result, expected))\n self.assertIsNone(result.name)\n\n # multiIndex\n idx1 = MultiIndex.from_tuples(self.tuples)\n idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])\n result = idx1.sym_diff(idx2)\n expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])\n self.assertTrue(tm.equalContents(result, expected))\n\n # nans:\n # GH #6444, sorting of nans. Make sure the number of nans is right\n # and the correct non-nan values are there. punt on sorting.\n idx1 = Index([1, 2, 3, np.nan])\n idx2 = Index([0, 1, np.nan])\n result = idx1.sym_diff(idx2)\n # expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])\n\n nans = pd.isnull(result)\n self.assertEqual(nans.sum(), 1)\n self.assertEqual((~nans).sum(), 3)\n [self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]\n\n # other not an Index:\n idx1 = Index([1, 2, 3, 4], name='idx1')\n idx2 = np.array([2, 3, 4, 5])\n expected = Index([1, 5])\n result = idx1.sym_diff(idx2)\n self.assertTrue(tm.equalContents(result, expected))\n self.assertEqual(result.name, 'idx1')\n\n result = idx1.sym_diff(idx2, result_name='new_name')\n self.assertTrue(tm.equalContents(result, expected))\n self.assertEqual(result.name, 'new_name')\n\n def test_is_numeric(self):\n self.assertFalse(self.dateIndex.is_numeric())\n self.assertFalse(self.strIndex.is_numeric())\n self.assertTrue(self.intIndex.is_numeric())\n self.assertTrue(self.floatIndex.is_numeric())\n self.assertFalse(self.catIndex.is_numeric())\n\n def test_is_object(self):\n self.assertTrue(self.strIndex.is_object())\n self.assertTrue(self.boolIndex.is_object())\n self.assertFalse(self.catIndex.is_object())\n self.assertFalse(self.intIndex.is_object())\n self.assertFalse(self.dateIndex.is_object())\n self.assertFalse(self.floatIndex.is_object())\n\n def test_is_all_dates(self):\n self.assertTrue(self.dateIndex.is_all_dates)\n self.assertFalse(self.strIndex.is_all_dates)\n self.assertFalse(self.intIndex.is_all_dates)\n\n def test_summary(self):\n self._check_method_works(Index.summary)\n # GH3869\n ind = Index(['{other}%s', \"~:{range}:0\"], name='A')\n result = ind.summary()\n # shouldn't be formatted accidentally.\n self.assertIn('~:{range}:0', result)\n self.assertIn('{other}%s', result)\n\n def test_format(self):\n self._check_method_works(Index.format)\n\n index = Index([datetime.now()])\n\n\n # windows has different precision on datetime.datetime.now (it doesn't include us\n # since the default for Timestamp shows these but Index formating does not\n # we are skipping\n if not is_platform_windows():\n formatted = index.format()\n expected = [str(index[0])]\n self.assertEqual(formatted, expected)\n\n # 2845\n index = Index([1, 2.0+3.0j, np.nan])\n formatted = index.format()\n expected = [str(index[0]), str(index[1]), u('NaN')]\n self.assertEqual(formatted, expected)\n\n # is this really allowed?\n index = Index([1, 2.0+3.0j, None])\n formatted = index.format()\n expected = [str(index[0]), str(index[1]), u('NaN')]\n self.assertEqual(formatted, expected)\n\n self.strIndex[:0].format()\n\n def test_format_with_name_time_info(self):\n # bug I fixed 12/20/2011\n inc = timedelta(hours=4)\n dates = Index([dt + inc for dt in self.dateIndex], name='something')\n\n formatted = dates.format(name=True)\n self.assertEqual(formatted[0], 'something')\n\n def test_format_datetime_with_time(self):\n t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])\n\n result = t.format()\n expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']\n self.assertEqual(len(result), 2)\n self.assertEqual(result, expected)\n\n def test_format_none(self):\n values = ['a', 'b', 'c', None]\n\n idx = Index(values)\n idx.format()\n self.assertIsNone(idx[3])\n\n def test_logical_compat(self):\n idx = self.create_index()\n self.assertEqual(idx.all(), idx.values.all())\n self.assertEqual(idx.any(), idx.values.any())\n\n def _check_method_works(self, method):\n method(self.empty)\n method(self.dateIndex)\n method(self.unicodeIndex)\n method(self.strIndex)\n method(self.intIndex)\n method(self.tuples)\n method(self.catIndex)\n\n def test_get_indexer(self):\n idx1 = Index([1, 2, 3, 4, 5])\n idx2 = Index([2, 4, 6])\n\n r1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, [1, 3, -1])\n\n r1 = idx2.get_indexer(idx1, method='pad')\n e1 = [-1, 0, 0, 1, 1]\n assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='pad')\n assert_almost_equal(r2, e1[::-1])\n\n rffill1 = idx2.get_indexer(idx1, method='ffill')\n assert_almost_equal(r1, rffill1)\n\n r1 = idx2.get_indexer(idx1, method='backfill')\n e1 = [0, 0, 1, 1, 2]\n assert_almost_equal(r1, e1)\n\n rbfill1 = idx2.get_indexer(idx1, method='bfill')\n assert_almost_equal(r1, rbfill1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='backfill')\n assert_almost_equal(r2, e1[::-1])\n\n def test_get_indexer_invalid(self):\n # GH10411\n idx = Index(np.arange(10))\n\n with tm.assertRaisesRegexp(ValueError, 'tolerance argument'):\n idx.get_indexer([1, 0], tolerance=1)\n\n with tm.assertRaisesRegexp(ValueError, 'limit argument'):\n idx.get_indexer([1, 0], limit=1)\n\n def test_get_indexer_nearest(self):\n idx = Index(np.arange(10))\n\n all_methods = ['pad', 'backfill', 'nearest']\n for method in all_methods:\n actual = idx.get_indexer([0, 5, 9], method=method)\n tm.assert_numpy_array_equal(actual, [0, 5, 9])\n\n actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)\n tm.assert_numpy_array_equal(actual, [0, 5, 9])\n\n for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9], [0, 2, 9]]):\n actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)\n tm.assert_numpy_array_equal(actual, expected)\n\n actual = idx.get_indexer([0.2, 1.8, 8.5], method=method, tolerance=1)\n tm.assert_numpy_array_equal(actual, expected)\n\n for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1], [0, 2, -1]]):\n actual = idx.get_indexer([0.2, 1.8, 8.5], method=method, tolerance=0.2)\n tm.assert_numpy_array_equal(actual, expected)\n\n with tm.assertRaisesRegexp(ValueError, 'limit argument'):\n idx.get_indexer([1, 0], method='nearest', limit=1)\n\n def test_get_indexer_nearest_decreasing(self):\n idx = Index(np.arange(10))[::-1]\n\n all_methods = ['pad', 'backfill', 'nearest']\n for method in all_methods:\n actual = idx.get_indexer([0, 5, 9], method=method)\n tm.assert_numpy_array_equal(actual, [9, 4, 0])\n\n for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1], [9, 7, 0]]):\n actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)\n tm.assert_numpy_array_equal(actual, expected)\n\n def test_get_indexer_strings(self):\n idx = pd.Index(['b', 'c'])\n\n actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')\n expected = [-1, 0, 1, 1]\n tm.assert_numpy_array_equal(actual, expected)\n\n actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')\n expected = [0, 0, 1, -1]\n tm.assert_numpy_array_equal(actual, expected)\n\n with tm.assertRaises(TypeError):\n idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')\n\n with tm.assertRaises(TypeError):\n idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)\n\n def test_get_loc(self):\n idx = pd.Index([0, 1, 2])\n all_methods = [None, 'pad', 'backfill', 'nearest']\n for method in all_methods:\n self.assertEqual(idx.get_loc(1, method=method), 1)\n if method is not None:\n self.assertEqual(idx.get_loc(1, method=method, tolerance=0), 1)\n with tm.assertRaises(TypeError):\n idx.get_loc([1, 2], method=method)\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n self.assertEqual(idx.get_loc(1.1, method), loc)\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n self.assertEqual(idx.get_loc(1.1, method, tolerance=1), loc)\n\n for method in ['pad', 'backfill', 'nearest']:\n with tm.assertRaises(KeyError):\n idx.get_loc(1.1, method, tolerance=0.05)\n\n with tm.assertRaisesRegexp(ValueError, 'must be numeric'):\n idx.get_loc(1.1, 'nearest', tolerance='invalid')\n with tm.assertRaisesRegexp(ValueError, 'tolerance .* valid if'):\n idx.get_loc(1.1, tolerance=1)\n\n idx = pd.Index(['a', 'c'])\n with tm.assertRaises(TypeError):\n idx.get_loc('a', method='nearest')\n with tm.assertRaises(TypeError):\n idx.get_loc('a', method='pad', tolerance='invalid')\n\n def test_slice_locs(self):\n for dtype in [int, float]:\n idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))\n n = len(idx)\n\n self.assertEqual(idx.slice_locs(start=2), (2, n))\n self.assertEqual(idx.slice_locs(start=3), (3, n))\n self.assertEqual(idx.slice_locs(3, 8), (3, 6))\n self.assertEqual(idx.slice_locs(5, 10), (3, n))\n self.assertEqual(idx.slice_locs(end=8), (0, 6))\n self.assertEqual(idx.slice_locs(end=9), (0, 7))\n\n # reversed\n idx2 = idx[::-1]\n self.assertEqual(idx2.slice_locs(8, 2), (2, 6))\n self.assertEqual(idx2.slice_locs(7, 3), (2, 5))\n\n # float slicing\n idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))\n n = len(idx)\n self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))\n self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))\n idx2 = idx[::-1]\n self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))\n self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))\n\n # int slicing with floats\n idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))\n self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))\n self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))\n idx2 = idx[::-1]\n self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))\n self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))\n\n def test_slice_locs_dup(self):\n idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])\n self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))\n self.assertEqual(idx.slice_locs(end='d'), (0, 6))\n self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))\n self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))\n\n idx2 = idx[::-1]\n self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))\n self.assertEqual(idx2.slice_locs(end='a'), (0, 6))\n self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))\n self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))\n\n for dtype in [int, float]:\n idx = Index(np.array([10, 12, 12, 14], dtype=dtype))\n self.assertEqual(idx.slice_locs(12, 12), (1, 3))\n self.assertEqual(idx.slice_locs(11, 13), (1, 3))\n\n idx2 = idx[::-1]\n self.assertEqual(idx2.slice_locs(12, 12), (1, 3))\n self.assertEqual(idx2.slice_locs(13, 11), (1, 3))\n\n def test_slice_locs_na(self):\n idx = Index([np.nan, 1, 2])\n self.assertRaises(KeyError, idx.slice_locs, start=1.5)\n self.assertRaises(KeyError, idx.slice_locs, end=1.5)\n self.assertEqual(idx.slice_locs(1), (1, 3))\n self.assertEqual(idx.slice_locs(np.nan), (0, 3))\n\n idx = Index([0, np.nan, np.nan, 1, 2])\n self.assertEqual(idx.slice_locs(np.nan), (1, 5))\n\n def test_slice_locs_negative_step(self):\n idx = Index(list('bcdxy'))\n\n SLC = pd.IndexSlice\n\n def check_slice(in_slice, expected):\n s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop,\n in_slice.step)\n result = idx[s_start:s_stop:in_slice.step]\n expected = pd.Index(list(expected))\n self.assertTrue(result.equals(expected))\n\n for in_slice, expected in [\n (SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''),\n (SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'),\n (SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'),\n (SLC['y'::-4], 'yb'),\n # absent labels\n (SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'),\n (SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'),\n (SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'),\n (SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''),\n (SLC['m':'m':-1], '')\n ]:\n check_slice(in_slice, expected)\n\n def test_drop(self):\n n = len(self.strIndex)\n\n drop = self.strIndex[lrange(5, 10)]\n dropped = self.strIndex.drop(drop)\n expected = self.strIndex[lrange(5) + lrange(10, n)]\n self.assertTrue(dropped.equals(expected))\n\n self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])\n self.assertRaises(ValueError, self.strIndex.drop, ['1', 'bar'])\n\n # errors='ignore'\n mixed = drop.tolist() + ['foo']\n dropped = self.strIndex.drop(mixed, errors='ignore')\n expected = self.strIndex[lrange(5) + lrange(10, n)]\n self.assert_index_equal(dropped, expected)\n\n dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')\n expected = self.strIndex[lrange(n)]\n self.assert_index_equal(dropped, expected)\n\n dropped = self.strIndex.drop(self.strIndex[0])\n expected = self.strIndex[1:]\n self.assert_index_equal(dropped, expected)\n\n ser = Index([1, 2, 3])\n dropped = ser.drop(1)\n expected = Index([2, 3])\n self.assert_index_equal(dropped, expected)\n\n # errors='ignore'\n self.assertRaises(ValueError, ser.drop, [3, 4])\n\n dropped = ser.drop(4, errors='ignore')\n expected = Index([1, 2, 3])\n self.assert_index_equal(dropped, expected)\n\n dropped = ser.drop([3, 4, 5], errors='ignore')\n expected = Index([1, 2])\n self.assert_index_equal(dropped, expected)\n\n def test_tuple_union_bug(self):\n import pandas\n import numpy as np\n\n aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],\n dtype=[('num', int), ('let', 'a1')])\n aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,\n 'C')], dtype=[('num', int), ('let', 'a1')])\n\n idx1 = pandas.Index(aidx1)\n idx2 = pandas.Index(aidx2)\n\n # intersection broken?\n int_idx = idx1.intersection(idx2)\n # needs to be 1d like idx1 and idx2\n expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))\n self.assertEqual(int_idx.ndim, 1)\n self.assertTrue(int_idx.equals(expected))\n\n # union broken\n union_idx = idx1.union(idx2)\n expected = idx2\n self.assertEqual(union_idx.ndim, 1)\n self.assertTrue(union_idx.equals(expected))\n\n def test_is_monotonic_incomparable(self):\n index = Index([5, datetime.now(), 7])\n self.assertFalse(index.is_monotonic)\n self.assertFalse(index.is_monotonic_decreasing)\n\n def test_get_set_value(self):\n values = np.random.randn(100)\n date = self.dateIndex[67]\n\n assert_almost_equal(self.dateIndex.get_value(values, date),\n values[67])\n\n self.dateIndex.set_value(values, date, 10)\n self.assertEqual(values[67], 10)\n\n def test_isin(self):\n values = ['foo', 'bar', 'quux']\n\n idx = Index(['qux', 'baz', 'foo', 'bar'])\n result = idx.isin(values)\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # empty, return dtype bool\n idx = Index([])\n result = idx.isin(values)\n self.assertEqual(len(result), 0)\n self.assertEqual(result.dtype, np.bool_)\n\n def test_isin_nan(self):\n tm.assert_numpy_array_equal(\n Index(['a', np.nan]).isin([np.nan]), [False, True])\n tm.assert_numpy_array_equal(\n Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])\n tm.assert_numpy_array_equal(\n Index(['a', np.nan]).isin([float('nan')]), [False, False])\n tm.assert_numpy_array_equal(\n Index(['a', np.nan]).isin([pd.NaT]), [False, False])\n # Float64Index overrides isin, so must be checked separately\n tm.assert_numpy_array_equal(\n Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])\n tm.assert_numpy_array_equal(\n Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])\n tm.assert_numpy_array_equal(\n Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])\n\n def test_isin_level_kwarg(self):\n def check_idx(idx):\n values = idx.tolist()[-2:] + ['nonexisting']\n\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))\n tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))\n\n self.assertRaises(IndexError, idx.isin, values, level=1)\n self.assertRaises(IndexError, idx.isin, values, level=10)\n self.assertRaises(IndexError, idx.isin, values, level=-2)\n\n self.assertRaises(KeyError, idx.isin, values, level=1.0)\n self.assertRaises(KeyError, idx.isin, values, level='foobar')\n\n idx.name = 'foobar'\n tm.assert_numpy_array_equal(expected,\n idx.isin(values, level='foobar'))\n\n self.assertRaises(KeyError, idx.isin, values, level='xyzzy')\n self.assertRaises(KeyError, idx.isin, values, level=np.nan)\n\n check_idx(Index(['qux', 'baz', 'foo', 'bar']))\n # Float64Index overrides isin, so must be checked separately\n check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))\n\n def test_boolean_cmp(self):\n values = [1, 2, 3, 4]\n\n idx = Index(values)\n res = (idx == values)\n\n tm.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))\n\n def test_get_level_values(self):\n result = self.strIndex.get_level_values(0)\n self.assertTrue(result.equals(self.strIndex))\n\n def test_slice_keep_name(self):\n idx = Index(['a', 'b'], name='asdf')\n self.assertEqual(idx.name, idx[1:].name)\n\n def test_join_self(self):\n # instance attributes of the form self.<name>Index\n indices = 'unicode', 'str', 'date', 'int', 'float'\n kinds = 'outer', 'inner', 'left', 'right'\n for index_kind in indices:\n res = getattr(self, '{0}Index'.format(index_kind))\n\n for kind in kinds:\n joined = res.join(res, how=kind)\n self.assertIs(res, joined)\n def test_str_attribute(self):\n # GH9068\n methods = ['strip', 'rstrip', 'lstrip']\n idx = Index([' jack', 'jill ', ' jesse ', 'frank'])\n for method in methods:\n expected = Index([getattr(str, method)(x) for x in idx.values])\n tm.assert_index_equal(getattr(Index.str, method)(idx.str), expected)\n\n # create a few instances that are not able to use .str accessor\n indices = [Index(range(5)),\n tm.makeDateIndex(10),\n MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),\n PeriodIndex(start='2000', end='2010', freq='A')]\n for idx in indices:\n with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):\n idx.str.repeat(2)\n\n idx = Index(['a b c', 'd e', 'f'])\n expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']])\n tm.assert_index_equal(idx.str.split(), expected)\n tm.assert_index_equal(idx.str.split(expand=False), expected)\n\n expected = MultiIndex.from_tuples([('a', 'b', 'c'),\n ('d', 'e', np.nan),\n ('f', np.nan, np.nan)])\n tm.assert_index_equal(idx.str.split(expand=True), expected)\n\n # test boolean case, should return np.array instead of boolean Index\n idx = Index(['a1', 'a2', 'b1', 'b2'])\n expected = np.array([True, True, False, False])\n tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)\n self.assertIsInstance(idx.str.startswith('a'), np.ndarray)\n s = Series(range(4), index=idx)\n expected = Series(range(2), index=['a1', 'a2'])\n tm.assert_series_equal(s[s.index.str.startswith('a')], expected)\n\n def test_tab_completion(self):\n # GH 9910\n idx = Index(list('abcd'))\n self.assertTrue('str' in dir(idx))\n\n idx = Index(range(4))\n self.assertTrue('str' not in dir(idx))\n\n def test_indexing_doesnt_change_class(self):\n idx = Index([1, 2, 3, 'a', 'b', 'c'])\n\n self.assertTrue(idx[1:3].identical(\n pd.Index([2, 3], dtype=np.object_)))\n self.assertTrue(idx[[0,1]].identical(\n pd.Index([1, 2], dtype=np.object_)))\n\n def test_outer_join_sort(self):\n left_idx = Index(np.random.permutation(15))\n right_idx = tm.makeDateIndex(10)\n\n with tm.assert_produces_warning(RuntimeWarning):\n joined = left_idx.join(right_idx, how='outer')\n\n # right_idx in this case because DatetimeIndex has join precedence over\n # Int64Index\n with tm.assert_produces_warning(RuntimeWarning):\n expected = right_idx.astype(object).union(left_idx.astype(object))\n tm.assert_index_equal(joined, expected)\n\n def test_nan_first_take_datetime(self):\n idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])\n res = idx.take([-1, 0, 1])\n exp = Index([idx[-1], idx[0], idx[1]])\n tm.assert_index_equal(res, exp)\n\n def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):\n # GH6552\n idx = pd.Index([0, 1, 2])\n\n dt_idx = pd.date_range('20130101', periods=3)\n\n idx.name = None\n self.assertEqual(idx.reindex([])[0].name, None)\n self.assertEqual(idx.reindex(np.array([]))[0].name, None)\n self.assertEqual(idx.reindex(idx.tolist())[0].name, None)\n self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)\n self.assertEqual(idx.reindex(idx.values)[0].name, None)\n self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)\n\n # Must preserve name even if dtype changes.\n self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)\n self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)\n\n idx.name = 'foobar'\n self.assertEqual(idx.reindex([])[0].name, 'foobar')\n self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')\n self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')\n self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')\n self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')\n self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')\n\n # Must preserve name even if dtype changes.\n self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')\n self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')\n\n def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):\n # GH7774\n idx = pd.Index(list('abc'))\n def get_reindex_type(target):\n return idx.reindex(target)[0].dtype.type\n\n self.assertEqual(get_reindex_type([]), np.object_)\n self.assertEqual(get_reindex_type(np.array([])), np.object_)\n self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),\n np.object_)\n\n def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):\n # GH7774\n idx = pd.Index(list('abc'))\n def get_reindex_type(target):\n return idx.reindex(target)[0].dtype.type\n\n self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)\n self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)\n self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)\n\n reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),\n pd.Float64Index([])],\n [[], []]))[0]\n self.assertEqual(reindexed.levels[0].dtype.type, np.int64)\n self.assertEqual(reindexed.levels[1].dtype.type, np.float64)\n\n def test_groupby(self):\n idx = Index(range(5))\n groups = idx.groupby(np.array([1,1,2,2,2]))\n exp = {1: [0, 1], 2: [2, 3, 4]}\n tm.assert_dict_equal(groups, exp)\n\n def test_equals_op_multiindex(self):\n # GH9785\n # test comparisons of multiindex\n from pandas.compat import StringIO\n df = pd.read_csv(StringIO('a,b,c\\n1,2,3\\n4,5,6'), index_col=[0, 1])\n tm.assert_numpy_array_equal(df.index == df.index, np.array([True, True]))\n\n mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])\n tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))\n mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])\n tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))\n mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n df.index == mi3\n\n index_a = Index(['foo', 'bar', 'baz'])\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n df.index == index_a\n tm.assert_numpy_array_equal(index_a == mi3, np.array([False, False, False]))\n\n def test_conversion_preserves_name(self):\n #GH 10875\n i = pd.Index(['01:02:03', '01:02:04'], name='label')\n self.assertEqual(i.name, pd.to_datetime(i).name)\n self.assertEqual(i.name, pd.to_timedelta(i).name)\n\n def test_string_index_repr(self):\n # py3/py2 repr can differ because of \"u\" prefix\n # which also affects to displayed element size\n\n # short\n idx = pd.Index(['a', 'bb', 'ccc'])\n if PY3:\n expected = u\"\"\"Index(['a', 'bb', 'ccc'], dtype='object')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'a', u'bb', u'ccc'], dtype='object')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # multiple lines\n idx = pd.Index(['a', 'bb', 'ccc'] * 10)\n if PY3:\n expected = u\"\"\"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',\n 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',\n 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n dtype='object')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],\n dtype='object')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # truncated\n idx = pd.Index(['a', 'bb', 'ccc'] * 100)\n if PY3:\n expected = u\"\"\"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n ...\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n dtype='object', length=300)\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n ...\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],\n dtype='object', length=300)\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # short\n idx = pd.Index([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう'], dtype='object')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう'], dtype='object')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # multiple lines\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n dtype='object')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n dtype='object')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # truncated\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n dtype='object', length=300)\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n ...\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n dtype='object', length=300)\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # Emable Unicode option -----------------------------------------\n with cf.option_context('display.unicode.east_asian_width', True):\n\n # short\n idx = pd.Index([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう'], dtype='object')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう'], dtype='object')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # multiple lines\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう'],\n dtype='object')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n dtype='object')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # truncated\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう'],\n dtype='object', length=300)\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ',\n ...\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう'],\n dtype='object', length=300)\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n\nclass TestCategoricalIndex(Base, tm.TestCase):\n _holder = CategoricalIndex\n\n def setUp(self):\n self.indices = dict(catIndex = tm.makeCategoricalIndex(100))\n self.setup_indices()\n\n def create_index(self, categories=None, ordered=False):\n if categories is None:\n categories = list('cab')\n return CategoricalIndex(list('aabbca'), categories=categories, ordered=ordered)\n\n def test_construction(self):\n\n ci = self.create_index(categories=list('abcd'))\n categories = ci.categories\n\n result = Index(ci)\n tm.assert_index_equal(result,ci,exact=True)\n self.assertFalse(result.ordered)\n\n result = Index(ci.values)\n tm.assert_index_equal(result,ci,exact=True)\n self.assertFalse(result.ordered)\n\n # empty\n result = CategoricalIndex(categories=categories)\n self.assertTrue(result.categories.equals(Index(categories)))\n tm.assert_numpy_array_equal(result.codes, np.array([],dtype='int8'))\n self.assertFalse(result.ordered)\n\n # passing categories\n result = CategoricalIndex(list('aabbca'),categories=categories)\n self.assertTrue(result.categories.equals(Index(categories)))\n tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))\n\n c = pd.Categorical(list('aabbca'))\n result = CategoricalIndex(c)\n self.assertTrue(result.categories.equals(Index(list('abc'))))\n tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))\n self.assertFalse(result.ordered)\n\n result = CategoricalIndex(c,categories=categories)\n self.assertTrue(result.categories.equals(Index(categories)))\n tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))\n self.assertFalse(result.ordered)\n\n ci = CategoricalIndex(c,categories=list('abcd'))\n result = CategoricalIndex(ci)\n self.assertTrue(result.categories.equals(Index(categories)))\n tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))\n self.assertFalse(result.ordered)\n\n result = CategoricalIndex(ci, categories=list('ab'))\n self.assertTrue(result.categories.equals(Index(list('ab'))))\n tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8'))\n self.assertFalse(result.ordered)\n\n result = CategoricalIndex(ci, categories=list('ab'), ordered=True)\n self.assertTrue(result.categories.equals(Index(list('ab'))))\n tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8'))\n self.assertTrue(result.ordered)\n\n # turn me to an Index\n result = Index(np.array(ci))\n self.assertIsInstance(result, Index)\n self.assertNotIsInstance(result, CategoricalIndex)\n\n def test_construction_with_dtype(self):\n\n # specify dtype\n ci = self.create_index(categories=list('abc'))\n\n result = Index(np.array(ci), dtype='category')\n tm.assert_index_equal(result,ci,exact=True)\n\n result = Index(np.array(ci).tolist(), dtype='category')\n tm.assert_index_equal(result,ci,exact=True)\n\n # these are generally only equal when the categories are reordered\n ci = self.create_index()\n\n result = Index(np.array(ci), dtype='category').reorder_categories(ci.categories)\n tm.assert_index_equal(result,ci,exact=True)\n\n # make sure indexes are handled\n expected = CategoricalIndex([0,1,2], categories=[0,1,2], ordered=True)\n idx = Index(range(3))\n result = CategoricalIndex(idx, categories=idx, ordered=True)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_disallow_set_ops(self):\n\n # GH 10039\n # set ops (+/-) raise TypeError\n idx = pd.Index(pd.Categorical(['a', 'b']))\n\n self.assertRaises(TypeError, lambda : idx - idx)\n self.assertRaises(TypeError, lambda : idx + idx)\n self.assertRaises(TypeError, lambda : idx - ['a','b'])\n self.assertRaises(TypeError, lambda : idx + ['a','b'])\n self.assertRaises(TypeError, lambda : ['a','b'] - idx)\n self.assertRaises(TypeError, lambda : ['a','b'] + idx)\n\n def test_method_delegation(self):\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))\n result = ci.set_categories(list('cab'))\n tm.assert_index_equal(result, CategoricalIndex(list('aabbca'), categories=list('cab')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n result = ci.rename_categories(list('efg'))\n tm.assert_index_equal(result, CategoricalIndex(list('ffggef'), categories=list('efg')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n result = ci.add_categories(['d'])\n tm.assert_index_equal(result, CategoricalIndex(list('aabbca'), categories=list('cabd')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n result = ci.remove_categories(['c'])\n tm.assert_index_equal(result, CategoricalIndex(list('aabb') + [np.nan] + ['a'], categories=list('ab')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))\n result = ci.as_unordered()\n tm.assert_index_equal(result, ci)\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))\n result = ci.as_ordered()\n tm.assert_index_equal(result, CategoricalIndex(list('aabbca'), categories=list('cabdef'), ordered=True))\n\n # invalid\n self.assertRaises(ValueError, lambda : ci.set_categories(list('cab'), inplace=True))\n\n def test_contains(self):\n\n ci = self.create_index(categories=list('cabdef'))\n\n self.assertTrue('a' in ci)\n self.assertTrue('z' not in ci)\n self.assertTrue('e' not in ci)\n self.assertTrue(np.nan not in ci)\n\n # assert codes NOT in index\n self.assertFalse(0 in ci)\n self.assertFalse(1 in ci)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef') + [np.nan])\n self.assertFalse(np.nan in ci)\n\n ci = CategoricalIndex(list('aabbca') + [np.nan], categories=list('cabdef'))\n self.assertTrue(np.nan in ci)\n\n def test_min_max(self):\n\n ci = self.create_index(ordered=False)\n self.assertRaises(TypeError, lambda : ci.min())\n self.assertRaises(TypeError, lambda : ci.max())\n\n ci = self.create_index(ordered=True)\n\n self.assertEqual(ci.min(),'c')\n self.assertEqual(ci.max(),'b')\n\n def test_append(self):\n\n ci = self.create_index()\n categories = ci.categories\n\n # append cats with the same categories\n result = ci[:3].append(ci[3:])\n tm.assert_index_equal(result,ci,exact=True)\n\n foos = [ci[:1], ci[1:3], ci[3:]]\n result = foos[0].append(foos[1:])\n tm.assert_index_equal(result,ci,exact=True)\n\n # empty\n result = ci.append([])\n tm.assert_index_equal(result,ci,exact=True)\n\n # appending with different categories or reoreded is not ok\n self.assertRaises(TypeError, lambda : ci.append(ci.values.set_categories(list('abcd'))))\n self.assertRaises(TypeError, lambda : ci.append(ci.values.reorder_categories(list('abc'))))\n\n # with objects\n result = ci.append(['c','a'])\n expected = CategoricalIndex(list('aabbcaca'), categories=categories)\n tm.assert_index_equal(result,expected,exact=True)\n\n # invalid objects\n self.assertRaises(TypeError, lambda : ci.append(['a','d']))\n\n def test_insert(self):\n\n ci = self.create_index()\n categories = ci.categories\n\n #test 0th element\n result = ci.insert(0, 'a')\n expected = CategoricalIndex(list('aaabbca'),categories=categories)\n tm.assert_index_equal(result,expected,exact=True)\n\n #test Nth element that follows Python list behavior\n result = ci.insert(-1, 'a')\n expected = CategoricalIndex(list('aabbcaa'),categories=categories)\n tm.assert_index_equal(result,expected,exact=True)\n\n #test empty\n result = CategoricalIndex(categories=categories).insert(0, 'a')\n expected = CategoricalIndex(['a'],categories=categories)\n tm.assert_index_equal(result,expected,exact=True)\n\n # invalid\n self.assertRaises(TypeError, lambda : ci.insert(0,'d'))\n\n def test_delete(self):\n\n ci = self.create_index()\n categories = ci.categories\n\n result = ci.delete(0)\n expected = CategoricalIndex(list('abbca'),categories=categories)\n tm.assert_index_equal(result,expected,exact=True)\n\n result = ci.delete(-1)\n expected = CategoricalIndex(list('aabbc'),categories=categories)\n tm.assert_index_equal(result,expected,exact=True)\n\n with tm.assertRaises((IndexError, ValueError)):\n # either depeidnig on numpy version\n result = ci.delete(10)\n\n def test_astype(self):\n\n ci = self.create_index()\n result = ci.astype('category')\n tm.assert_index_equal(result,ci,exact=True)\n\n result = ci.astype(object)\n self.assertTrue(result.equals(Index(np.array(ci))))\n\n # this IS equal, but not the same class\n self.assertTrue(result.equals(ci))\n self.assertIsInstance(result, Index)\n self.assertNotIsInstance(result, CategoricalIndex)\n\n def test_reindex_base(self):\n\n # determined by cat ordering\n idx = self.create_index()\n expected = np.array([4,0,1,5,2,3])\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):\n idx.get_indexer(idx, method='invalid')\n\n def test_reindexing(self):\n\n ci = self.create_index()\n oidx = Index(np.array(ci))\n\n for n in [1,2,5,len(ci)]:\n finder = oidx[np.random.randint(0,len(ci),size=n)]\n expected = oidx.get_indexer_non_unique(finder)[0]\n\n actual = ci.get_indexer(finder)\n tm.assert_numpy_array_equal(expected, actual)\n\n def test_duplicates(self):\n\n idx = CategoricalIndex([0, 0, 0], name='foo')\n self.assertFalse(idx.is_unique)\n self.assertTrue(idx.has_duplicates)\n\n expected = CategoricalIndex([0], name='foo')\n self.assert_index_equal(idx.drop_duplicates(), expected)\n\n def test_get_indexer(self):\n\n idx1 = CategoricalIndex(list('aabcde'),categories=list('edabc'))\n idx2 = CategoricalIndex(list('abf'))\n\n for indexer in [idx2, list('abf'), Index(list('abf'))]:\n r1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, [0, 1, 2, -1])\n\n self.assertRaises(NotImplementedError, lambda : idx2.get_indexer(idx1, method='pad'))\n self.assertRaises(NotImplementedError, lambda : idx2.get_indexer(idx1, method='backfill'))\n self.assertRaises(NotImplementedError, lambda : idx2.get_indexer(idx1, method='nearest'))\n\n def test_repr_roundtrip(self):\n\n ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)\n str(ci)\n tm.assert_index_equal(eval(repr(ci)),ci,exact=True)\n\n # formatting\n if compat.PY3:\n str(ci)\n else:\n compat.text_type(ci)\n\n # long format\n # this is not reprable\n ci = CategoricalIndex(np.random.randint(0,5,size=100))\n if compat.PY3:\n str(ci)\n else:\n compat.text_type(ci)\n\n def test_isin(self):\n\n ci = CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b'])\n tm.assert_numpy_array_equal(ci.isin(['c']),np.array([False,False,False,True,False,False]))\n tm.assert_numpy_array_equal(ci.isin(['c','a','b']),np.array([True]*5 + [False]))\n tm.assert_numpy_array_equal(ci.isin(['c','a','b',np.nan]),np.array([True]*6))\n\n # mismatched categorical -> coerced to ndarray so doesn't matter\n tm.assert_numpy_array_equal(ci.isin(ci.set_categories(list('abcdefghi'))),np.array([True]*6))\n tm.assert_numpy_array_equal(ci.isin(ci.set_categories(list('defghi'))),np.array([False]*5 + [True]))\n\n def test_identical(self):\n\n ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)\n ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'], ordered=True)\n self.assertTrue(ci1.identical(ci1))\n self.assertTrue(ci1.identical(ci1.copy()))\n self.assertFalse(ci1.identical(ci2))\n\n def test_equals(self):\n\n ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)\n ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'], ordered=True)\n\n self.assertTrue(ci1.equals(ci1))\n self.assertFalse(ci1.equals(ci2))\n self.assertTrue(ci1.equals(ci1.astype(object)))\n self.assertTrue(ci1.astype(object).equals(ci1))\n\n self.assertTrue((ci1 == ci1).all())\n self.assertFalse((ci1 != ci1).all())\n self.assertFalse((ci1 > ci1).all())\n self.assertFalse((ci1 < ci1).all())\n self.assertTrue((ci1 <= ci1).all())\n self.assertTrue((ci1 >= ci1).all())\n\n self.assertFalse((ci1 == 1).all())\n self.assertTrue((ci1 == Index(['a','b'])).all())\n self.assertTrue((ci1 == ci1.values).all())\n\n # invalid comparisons\n with tm.assertRaisesRegexp(ValueError, \"Lengths must match\"):\n ci1 == Index(['a','b','c'])\n self.assertRaises(TypeError, lambda : ci1 == ci2)\n self.assertRaises(TypeError, lambda : ci1 == Categorical(ci1.values, ordered=False))\n self.assertRaises(TypeError, lambda : ci1 == Categorical(ci1.values, categories=list('abc')))\n\n # tests\n # make sure that we are testing for category inclusion properly\n self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b']).equals(list('aabca')))\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b',np.nan]).equals(list('aabca')))\n\n self.assertFalse(CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b']).equals(list('aabca')))\n self.assertTrue(CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b']).equals(list('aabca') + [np.nan]))\n\n def test_string_categorical_index_repr(self):\n # short\n idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # multiple lines\n idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',\n u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],\n categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # truncated\n idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n ...\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',\n u'ccc', u'a',\n ...\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n u'bb', u'ccc'],\n categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # larger categories\n idx = pd.CategoricalIndex(list('abcdefghijklmmo'))\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'm', 'o'],\n categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',\n u'k', u'l', u'm', u'm', u'o'],\n categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')\"\"\"\n\n self.assertEqual(unicode(idx), expected)\n\n # short\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # multiple lines\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',\n u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # truncated\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ',\n ...\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # larger categories\n idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',\n 'す', 'せ', 'そ'],\n categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',\n u'さ', u'し', u'す', u'せ', u'そ'],\n categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # Emable Unicode option -----------------------------------------\n with cf.option_context('display.unicode.east_asian_width', True):\n\n # short\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # multiple lines\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # truncated\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ',\n ...\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n # larger categories\n idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',\n 'さ', 'し', 'す', 'せ', 'そ'],\n categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')\"\"\"\n self.assertEqual(repr(idx), expected)\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',\n u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],\n categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')\"\"\"\n self.assertEqual(unicode(idx), expected)\n\n\nclass Numeric(Base):\n\n def test_numeric_compat(self):\n\n idx = self._holder(np.arange(5,dtype='int64'))\n didx = self._holder(np.arange(5,dtype='int64')**2\n )\n result = idx * 1\n tm.assert_index_equal(result, idx)\n\n result = 1 * idx\n tm.assert_index_equal(result, idx)\n\n result = idx * idx\n tm.assert_index_equal(result, didx)\n\n result = idx / 1\n tm.assert_index_equal(result, idx)\n\n result = idx // 1\n tm.assert_index_equal(result, idx)\n\n result = idx * np.array(5,dtype='int64')\n tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))\n\n result = idx * np.arange(5,dtype='int64')\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5,dtype='int64'))\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5,dtype='float64')+0.1)\n tm.assert_index_equal(result,\n Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))\n\n # invalid\n self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))\n self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))\n self.assertRaises(ValueError, lambda : idx * np.array([1,2]))\n\n\n def test_explicit_conversions(self):\n\n # GH 8608\n # add/sub are overriden explicity for Float/Int Index\n idx = self._holder(np.arange(5,dtype='int64'))\n\n # float conversions\n arr = np.arange(5,dtype='int64')*3.2\n expected = Float64Index(arr)\n fidx = idx * 3.2\n tm.assert_index_equal(fidx,expected)\n fidx = 3.2 * idx\n tm.assert_index_equal(fidx,expected)\n\n # interops with numpy arrays\n expected = Float64Index(arr)\n a = np.zeros(5,dtype='float64')\n result = fidx - a\n tm.assert_index_equal(result,expected)\n\n expected = Float64Index(-arr)\n a = np.zeros(5,dtype='float64')\n result = a - fidx\n tm.assert_index_equal(result,expected)\n\n def test_ufunc_compat(self):\n idx = self._holder(np.arange(5,dtype='int64'))\n result = np.sin(idx)\n expected = Float64Index(np.sin(np.arange(5,dtype='int64')))\n tm.assert_index_equal(result, expected)\n\n def test_index_groupby(self):\n int_idx = Index(range(6))\n float_idx = Index(np.arange(0, 0.6, 0.1))\n obj_idx = Index('A B C D E F'.split())\n dt_idx = pd.date_range('2013-01-01', freq='M', periods=6)\n\n for idx in [int_idx, float_idx, obj_idx, dt_idx]:\n to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1])\n self.assertEqual(idx.groupby(to_groupby),\n {1.0: [idx[0], idx[5]], 2.0: [idx[1], idx[4]]})\n\n to_groupby = Index([datetime(2011, 11, 1), datetime(2011, 12, 1),\n pd.NaT, pd.NaT,\n datetime(2011, 12, 1), datetime(2011, 11, 1)], tz='UTC').values\n\n ex_keys = pd.tslib.datetime_to_datetime64(np.array([Timestamp('2011-11-01'), Timestamp('2011-12-01')]))\n expected = {ex_keys[0][0]: [idx[0], idx[5]], ex_keys[0][1]: [idx[1], idx[4]]}\n self.assertEqual(idx.groupby(to_groupby), expected)\n\n\nclass TestFloat64Index(Numeric, tm.TestCase):\n _holder = Float64Index\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(mixed = Float64Index([1.5, 2, 3, 4, 5]),\n float = Float64Index(np.arange(5) * 2.5))\n self.setup_indices()\n\n def create_index(self):\n return Float64Index(np.arange(5, dtype='float64'))\n\n def test_repr_roundtrip(self):\n for ind in (self.mixed, self.float):\n tm.assert_index_equal(eval(repr(ind)), ind)\n\n def check_is_index(self, i):\n self.assertIsInstance(i, Index)\n self.assertNotIsInstance(i, Float64Index)\n\n def check_coerce(self, a, b, is_float_index=True):\n self.assertTrue(a.equals(b))\n if is_float_index:\n self.assertIsInstance(b, Float64Index)\n else:\n self.check_is_index(b)\n\n def test_constructor(self):\n\n # explicit construction\n index = Float64Index([1,2,3,4,5])\n self.assertIsInstance(index, Float64Index)\n self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())\n index = Float64Index(np.array([1,2,3,4,5]))\n self.assertIsInstance(index, Float64Index)\n index = Float64Index([1.,2,3,4,5])\n self.assertIsInstance(index, Float64Index)\n index = Float64Index(np.array([1.,2,3,4,5]))\n self.assertIsInstance(index, Float64Index)\n self.assertEqual(index.dtype, float)\n\n index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)\n self.assertIsInstance(index, Float64Index)\n self.assertEqual(index.dtype, np.float64)\n\n index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)\n self.assertIsInstance(index, Float64Index)\n self.assertEqual(index.dtype, np.float64)\n\n # nan handling\n result = Float64Index([np.nan, np.nan])\n self.assertTrue(pd.isnull(result.values).all())\n result = Float64Index(np.array([np.nan]))\n self.assertTrue(pd.isnull(result.values).all())\n result = Index(np.array([np.nan]))\n self.assertTrue(pd.isnull(result.values).all())\n\n def test_constructor_invalid(self):\n\n # invalid\n self.assertRaises(TypeError, Float64Index, 0.)\n self.assertRaises(TypeError, Float64Index, ['a','b',0.])\n self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])\n\n def test_constructor_coerce(self):\n\n self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))\n self.check_coerce(self.float,Index(np.arange(5) * 2.5))\n self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))\n\n def test_constructor_explicit(self):\n\n # these don't auto convert\n self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),\n is_float_index=False)\n self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),\n is_float_index=False)\n\n def test_astype(self):\n\n result = self.float.astype(object)\n self.assertTrue(result.equals(self.float))\n self.assertTrue(self.float.equals(result))\n self.check_is_index(result)\n\n i = self.mixed.copy()\n i.name = 'foo'\n result = i.astype(object)\n self.assertTrue(result.equals(i))\n self.assertTrue(i.equals(result))\n self.check_is_index(result)\n\n def test_equals(self):\n\n i = Float64Index([1.0,2.0])\n self.assertTrue(i.equals(i))\n self.assertTrue(i.identical(i))\n\n i2 = Float64Index([1.0,2.0])\n self.assertTrue(i.equals(i2))\n\n i = Float64Index([1.0,np.nan])\n self.assertTrue(i.equals(i))\n self.assertTrue(i.identical(i))\n\n i2 = Float64Index([1.0,np.nan])\n self.assertTrue(i.equals(i2))\n\n def test_get_indexer(self):\n idx = Float64Index([0.0, 1.0, 2.0])\n tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])\n\n target = [-0.1, 0.5, 1.1]\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])\n\n def test_get_loc(self):\n idx = Float64Index([0.0, 1.0, 2.0])\n for method in [None, 'pad', 'backfill', 'nearest']:\n self.assertEqual(idx.get_loc(1, method), 1)\n if method is not None:\n self.assertEqual(idx.get_loc(1, method, tolerance=0), 1)\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n self.assertEqual(idx.get_loc(1.1, method), loc)\n self.assertEqual(idx.get_loc(1.1, method, tolerance=0.9), loc)\n\n self.assertRaises(KeyError, idx.get_loc, 'foo')\n self.assertRaises(KeyError, idx.get_loc, 1.5)\n self.assertRaises(KeyError, idx.get_loc, 1.5,\n method='pad', tolerance=0.1)\n\n with tm.assertRaisesRegexp(ValueError, 'must be numeric'):\n idx.get_loc(1.4, method='nearest', tolerance='foo')\n\n def test_get_loc_na(self):\n idx = Float64Index([np.nan, 1, 2])\n self.assertEqual(idx.get_loc(1), 1)\n self.assertEqual(idx.get_loc(np.nan), 0)\n\n idx = Float64Index([np.nan, 1, np.nan])\n self.assertEqual(idx.get_loc(1), 1)\n\n # representable by slice [0:2:2]\n # self.assertRaises(KeyError, idx.slice_locs, np.nan)\n sliced = idx.slice_locs(np.nan)\n self.assertTrue(isinstance(sliced, tuple))\n self.assertEqual(sliced, (0, 3))\n\n # not representable by slice\n idx = Float64Index([np.nan, 1, np.nan, np.nan])\n self.assertEqual(idx.get_loc(1), 1)\n self.assertRaises(KeyError, idx.slice_locs, np.nan)\n\n def test_contains_nans(self):\n i = Float64Index([1.0, 2.0, np.nan])\n self.assertTrue(np.nan in i)\n\n def test_contains_not_nans(self):\n i = Float64Index([1.0, 2.0, np.nan])\n self.assertTrue(1.0 in i)\n\n def test_doesnt_contain_all_the_things(self):\n i = Float64Index([np.nan])\n self.assertFalse(i.isin([0]).item())\n self.assertFalse(i.isin([1]).item())\n self.assertTrue(i.isin([np.nan]).item())\n\n def test_nan_multiple_containment(self):\n i = Float64Index([1.0, np.nan])\n tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False]))\n tm.assert_numpy_array_equal(i.isin([2.0, np.pi]),\n np.array([False, False]))\n tm.assert_numpy_array_equal(i.isin([np.nan]),\n np.array([False, True]))\n tm.assert_numpy_array_equal(i.isin([1.0, np.nan]),\n np.array([True, True]))\n i = Float64Index([1.0, 2.0])\n tm.assert_numpy_array_equal(i.isin([np.nan]),\n np.array([False, False]))\n\n def test_astype_from_object(self):\n index = Index([1.0, np.nan, 0.2], dtype='object')\n result = index.astype(float)\n expected = Float64Index([1.0, np.nan, 0.2])\n tm.assert_equal(result.dtype, expected.dtype)\n tm.assert_index_equal(result, expected)\n\n\nclass TestInt64Index(Numeric, tm.TestCase):\n _holder = Int64Index\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(index = Int64Index(np.arange(0, 20, 2)))\n self.setup_indices()\n\n def create_index(self):\n return Int64Index(np.arange(5, dtype='int64'))\n\n def test_too_many_names(self):\n def testit():\n self.index.names = [\"roger\", \"harold\"]\n assertRaisesRegexp(ValueError, \"^Length\", testit)\n\n def test_constructor(self):\n # pass list, coerce fine\n index = Int64Index([-5, 0, 1, 2])\n expected = np.array([-5, 0, 1, 2], dtype=np.int64)\n tm.assert_numpy_array_equal(index, expected)\n\n # from iterable\n index = Int64Index(iter([-5, 0, 1, 2]))\n tm.assert_numpy_array_equal(index, expected)\n\n # scalar raise Exception\n self.assertRaises(TypeError, Int64Index, 5)\n\n # copy\n arr = self.index.values\n new_index = Int64Index(arr, copy=True)\n tm.assert_numpy_array_equal(new_index, self.index)\n val = arr[0] + 3000\n # this should not change index\n arr[0] = val\n self.assertNotEqual(new_index[0], val)\n\n def test_constructor_corner(self):\n arr = np.array([1, 2, 3, 4], dtype=object)\n index = Int64Index(arr)\n self.assertEqual(index.values.dtype, np.int64)\n self.assertTrue(index.equals(arr))\n\n # preventing casting\n arr = np.array([1, '2', 3, '4'], dtype=object)\n with tm.assertRaisesRegexp(TypeError, 'casting'):\n Int64Index(arr)\n\n arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]\n with tm.assertRaisesRegexp(TypeError, 'casting'):\n Int64Index(arr_with_floats)\n\n def test_copy(self):\n i = Int64Index([], name='Foo')\n i_copy = i.copy()\n self.assertEqual(i_copy.name, 'Foo')\n\n def test_view(self):\n super(TestInt64Index, self).test_view()\n\n i = Int64Index([], name='Foo')\n i_view = i.view()\n self.assertEqual(i_view.name, 'Foo')\n\n i_view = i.view('i8')\n tm.assert_index_equal(i, Int64Index(i_view, name='Foo'))\n\n i_view = i.view(Int64Index)\n tm.assert_index_equal(i, Int64Index(i_view, name='Foo'))\n\n def test_coerce_list(self):\n # coerce things\n arr = Index([1, 2, 3, 4])\n tm.assertIsInstance(arr, Int64Index)\n\n # but not if explicit dtype passed\n arr = Index([1, 2, 3, 4], dtype=object)\n tm.assertIsInstance(arr, Index)\n\n def test_dtype(self):\n self.assertEqual(self.index.dtype, np.int64)\n\n def test_is_monotonic(self):\n self.assertTrue(self.index.is_monotonic)\n self.assertTrue(self.index.is_monotonic_increasing)\n self.assertFalse(self.index.is_monotonic_decreasing)\n\n index = Int64Index([4, 3, 2, 1])\n self.assertFalse(index.is_monotonic)\n self.assertTrue(index.is_monotonic_decreasing)\n\n index = Int64Index([1])\n self.assertTrue(index.is_monotonic)\n self.assertTrue(index.is_monotonic_increasing)\n self.assertTrue(index.is_monotonic_decreasing)\n\n def test_is_monotonic_na(self):\n examples = [Index([np.nan]),\n Index([np.nan, 1]),\n Index([1, 2, np.nan]),\n Index(['a', 'b', np.nan]),\n pd.to_datetime(['NaT']),\n pd.to_datetime(['NaT', '2000-01-01']),\n pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),\n pd.to_timedelta(['1 day', 'NaT']),\n ]\n for index in examples:\n self.assertFalse(index.is_monotonic_increasing)\n self.assertFalse(index.is_monotonic_decreasing)\n\n def test_equals(self):\n same_values = Index(self.index, dtype=object)\n self.assertTrue(self.index.equals(same_values))\n self.assertTrue(same_values.equals(self.index))\n\n def test_logical_compat(self):\n idx = self.create_index()\n self.assertEqual(idx.all(), idx.values.all())\n self.assertEqual(idx.any(), idx.values.any())\n\n def test_identical(self):\n i = Index(self.index.copy())\n self.assertTrue(i.identical(self.index))\n\n same_values_different_type = Index(i, dtype=object)\n self.assertFalse(i.identical(same_values_different_type))\n\n i = self.index.copy(dtype=object)\n i = i.rename('foo')\n same_values = Index(i, dtype=object)\n self.assertTrue(same_values.identical(self.index.copy(dtype=object)))\n\n self.assertFalse(i.identical(self.index))\n self.assertTrue(Index(same_values, name='foo', dtype=object\n ).identical(i))\n\n self.assertFalse(\n self.index.copy(dtype=object)\n .identical(self.index.copy(dtype='int64')))\n\n def test_get_indexer(self):\n target = Int64Index(np.arange(10))\n indexer = self.index.get_indexer(target)\n expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])\n tm.assert_numpy_array_equal(indexer, expected)\n\n def test_get_indexer_pad(self):\n target = Int64Index(np.arange(10))\n indexer = self.index.get_indexer(target, method='pad')\n expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])\n tm.assert_numpy_array_equal(indexer, expected)\n\n def test_get_indexer_backfill(self):\n target = Int64Index(np.arange(10))\n indexer = self.index.get_indexer(target, method='backfill')\n expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])\n tm.assert_numpy_array_equal(indexer, expected)\n\n def test_join_outer(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n # guarantee of sortedness\n res, lidx, ridx = self.index.join(other, how='outer',\n return_indexers=True)\n noidx_res = self.index.join(other, how='outer')\n self.assertTrue(res.equals(noidx_res))\n\n eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])\n elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],\n dtype=np.int64)\n eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],\n dtype=np.int64)\n\n tm.assertIsInstance(res, Int64Index)\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='outer',\n return_indexers=True)\n noidx_res = self.index.join(other_mono, how='outer')\n self.assertTrue(res.equals(noidx_res))\n\n eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],\n dtype=np.int64)\n tm.assertIsInstance(res, Int64Index)\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n def test_join_inner(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n res, lidx, ridx = self.index.join(other, how='inner',\n return_indexers=True)\n\n # no guarantee of sortedness, so sort for comparison purposes\n ind = res.argsort()\n res = res.take(ind)\n lidx = lidx.take(ind)\n ridx = ridx.take(ind)\n\n eres = Int64Index([2, 12])\n elidx = np.array([1, 6])\n eridx = np.array([4, 1])\n\n tm.assertIsInstance(res, Int64Index)\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='inner',\n return_indexers=True)\n\n res2 = self.index.intersection(other_mono)\n self.assertTrue(res.equals(res2))\n\n eridx = np.array([1, 4])\n tm.assertIsInstance(res, Int64Index)\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n def test_join_left(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n res, lidx, ridx = self.index.join(other, how='left',\n return_indexers=True)\n eres = self.index\n eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],\n dtype=np.int64)\n\n tm.assertIsInstance(res, Int64Index)\n self.assertTrue(res.equals(eres))\n self.assertIsNone(lidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='left',\n return_indexers=True)\n eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],\n dtype=np.int64)\n tm.assertIsInstance(res, Int64Index)\n self.assertTrue(res.equals(eres))\n self.assertIsNone(lidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # non-unique\n \"\"\"\n idx = Index([1,1,2,5])\n idx2 = Index([1,2,5,7,9])\n res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)\n eres = idx2\n eridx = np.array([0, 2, 3, -1, -1])\n elidx = np.array([0, 1, 2, 3, 4])\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n \"\"\"\n\n def test_join_right(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n res, lidx, ridx = self.index.join(other, how='right',\n return_indexers=True)\n eres = other\n elidx = np.array([-1, 6, -1, -1, 1, -1],\n dtype=np.int64)\n\n tm.assertIsInstance(other, Int64Index)\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n self.assertIsNone(ridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='right',\n return_indexers=True)\n eres = other_mono\n elidx = np.array([-1, 1, -1, -1, 6, -1],\n dtype=np.int64)\n tm.assertIsInstance(other, Int64Index)\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n self.assertIsNone(ridx)\n\n # non-unique\n \"\"\"\n idx = Index([1,1,2,5])\n idx2 = Index([1,2,5,7,9])\n res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)\n eres = idx2\n elidx = np.array([0, 2, 3, -1, -1])\n eridx = np.array([0, 1, 2, 3, 4])\n self.assertTrue(res.equals(eres))\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n idx = Index([1,1,2,5])\n idx2 = Index([1,2,5,9,7])\n res = idx.join(idx2, how='right', return_indexers=False)\n eres = idx2\n self.assert(res.equals(eres))\n \"\"\"\n\n def test_join_non_int_index(self):\n other = Index([3, 6, 7, 8, 10], dtype=object)\n\n outer = self.index.join(other, how='outer')\n outer2 = other.join(self.index, how='outer')\n expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,\n 16, 18], dtype=object)\n self.assertTrue(outer.equals(outer2))\n self.assertTrue(outer.equals(expected))\n\n inner = self.index.join(other, how='inner')\n inner2 = other.join(self.index, how='inner')\n expected = Index([6, 8, 10], dtype=object)\n self.assertTrue(inner.equals(inner2))\n self.assertTrue(inner.equals(expected))\n\n left = self.index.join(other, how='left')\n self.assertTrue(left.equals(self.index))\n\n left2 = other.join(self.index, how='left')\n self.assertTrue(left2.equals(other))\n\n right = self.index.join(other, how='right')\n self.assertTrue(right.equals(other))\n\n right2 = other.join(self.index, how='right')\n self.assertTrue(right2.equals(self.index))\n\n def test_join_non_unique(self):\n left = Index([4, 4, 3, 3])\n\n joined, lidx, ridx = left.join(left, return_indexers=True)\n\n exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])\n self.assertTrue(joined.equals(exp_joined))\n\n exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)\n tm.assert_numpy_array_equal(lidx, exp_lidx)\n\n exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)\n tm.assert_numpy_array_equal(ridx, exp_ridx)\n\n def test_join_self(self):\n kinds = 'outer', 'inner', 'left', 'right'\n for kind in kinds:\n joined = self.index.join(self.index, how=kind)\n self.assertIs(self.index, joined)\n\n def test_intersection(self):\n other = Index([1, 2, 3, 4, 5])\n result = self.index.intersection(other)\n expected = np.sort(np.intersect1d(self.index.values, other.values))\n tm.assert_numpy_array_equal(result, expected)\n\n result = other.intersection(self.index)\n expected = np.sort(np.asarray(np.intersect1d(self.index.values,\n other.values)))\n tm.assert_numpy_array_equal(result, expected)\n\n def test_intersect_str_dates(self):\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n i1 = Index(dt_dates, dtype=object)\n i2 = Index(['aa'], dtype=object)\n res = i2.intersection(i1)\n\n self.assertEqual(len(res), 0)\n\n def test_union_noncomparable(self):\n from datetime import datetime, timedelta\n # corner case, non-Int64Index\n now = datetime.now()\n other = Index([now + timedelta(i) for i in range(4)], dtype=object)\n result = self.index.union(other)\n expected = np.concatenate((self.index, other))\n tm.assert_numpy_array_equal(result, expected)\n\n result = other.union(self.index)\n expected = np.concatenate((other, self.index))\n tm.assert_numpy_array_equal(result, expected)\n\n def test_cant_or_shouldnt_cast(self):\n # can't\n data = ['foo', 'bar', 'baz']\n self.assertRaises(TypeError, Int64Index, data)\n\n # shouldn't\n data = ['0', '1', '2']\n self.assertRaises(TypeError, Int64Index, data)\n\n def test_view_Index(self):\n self.index.view(Index)\n\n def test_prevent_casting(self):\n result = self.index.astype('O')\n self.assertEqual(result.dtype, np.object_)\n\n def test_take_preserve_name(self):\n index = Int64Index([1, 2, 3, 4], name='foo')\n taken = index.take([3, 0, 1])\n self.assertEqual(index.name, taken.name)\n\n def test_int_name_format(self):\n from pandas import Series, DataFrame\n index = Index(['a', 'b', 'c'], name=0)\n s = Series(lrange(3), index)\n df = DataFrame(lrange(3), index=index)\n repr(s)\n repr(df)\n\n def test_print_unicode_columns(self):\n df = pd.DataFrame(\n {u(\"\\u05d0\"): [1, 2, 3], \"\\u05d1\": [4, 5, 6], \"c\": [7, 8, 9]})\n repr(df.columns) # should not raise UnicodeDecodeError\n\n def test_repr_summary(self):\n with cf.option_context('display.max_seq_items', 10):\n r = repr(pd.Index(np.arange(1000)))\n self.assertTrue(len(r) < 200)\n self.assertTrue(\"...\" in r)\n\n def test_repr_roundtrip(self):\n tm.assert_index_equal(eval(repr(self.index)), self.index)\n\n def test_unicode_string_with_unicode(self):\n idx = Index(lrange(1000))\n\n if compat.PY3:\n str(idx)\n else:\n compat.text_type(idx)\n\n def test_bytestring_with_unicode(self):\n idx = Index(lrange(1000))\n if compat.PY3:\n bytes(idx)\n else:\n str(idx)\n\n def test_slice_keep_name(self):\n idx = Int64Index([1, 2], name='asdf')\n self.assertEqual(idx.name, idx[1:].name)\n\n def test_ufunc_coercions(self):\n idx = pd.Int64Index([1, 2, 3, 4, 5], name='x')\n\n result = np.sqrt(idx)\n tm.assertIsInstance(result, Float64Index)\n exp = pd.Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')\n tm.assert_index_equal(result, exp)\n\n result = np.divide(idx, 2.)\n tm.assertIsInstance(result, Float64Index)\n exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')\n tm.assert_index_equal(result, exp)\n\n # _evaluate_numeric_binop\n result = idx + 2.\n tm.assertIsInstance(result, Float64Index)\n exp = pd.Float64Index([3., 4., 5., 6., 7.], name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx - 2.\n tm.assertIsInstance(result, Float64Index)\n exp = pd.Float64Index([-1., 0., 1., 2., 3.], name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx * 1.\n tm.assertIsInstance(result, Float64Index)\n exp = pd.Float64Index([1., 2., 3., 4., 5.], name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx / 2.\n tm.assertIsInstance(result, Float64Index)\n exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')\n tm.assert_index_equal(result, exp)\n\n\nclass DatetimeLike(Base):\n\n def test_str(self):\n\n # test the string repr\n idx = self.create_index()\n idx.name = 'foo'\n self.assertFalse(\"length=%s\" % len(idx) in str(idx))\n self.assertTrue(\"'foo'\" in str(idx))\n self.assertTrue(idx.__class__.__name__ in str(idx))\n\n if hasattr(idx,'tz'):\n if idx.tz is not None:\n self.assertTrue(idx.tz in str(idx))\n if hasattr(idx,'freq'):\n self.assertTrue(\"freq='%s'\" % idx.freqstr in str(idx))\n\n def test_view(self):\n super(DatetimeLike, self).test_view()\n\n i = self.create_index()\n\n i_view = i.view('i8')\n result = self._holder(i)\n tm.assert_index_equal(result, i)\n\n i_view = i.view(self._holder)\n result = self._holder(i)\n tm.assert_index_equal(result, i)\n\nclass TestDatetimeIndex(DatetimeLike, tm.TestCase):\n _holder = DatetimeIndex\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(index = tm.makeDateIndex(10))\n self.setup_indices()\n\n def create_index(self):\n return date_range('20130101', periods=5)\n\n def test_construction_with_alt(self):\n\n i = pd.date_range('20130101',periods=5,freq='H',tz='US/Eastern')\n i2 = DatetimeIndex(i, dtype=i.dtype)\n self.assert_index_equal(i, i2)\n\n i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)\n self.assert_index_equal(i, i2)\n\n i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)\n self.assert_index_equal(i, i2)\n\n i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)\n self.assert_index_equal(i, i2)\n\n # localize into the provided tz\n i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')\n expected = i.tz_localize(None).tz_localize('UTC')\n self.assert_index_equal(i2, expected)\n\n i2 = DatetimeIndex(i, tz='UTC')\n expected = i.tz_convert('UTC')\n self.assert_index_equal(i2, expected)\n\n # incompat tz/dtype\n self.assertRaises(ValueError, lambda : DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_get_loc(self):\n idx = pd.date_range('2000-01-01', periods=3)\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n self.assertEqual(idx.get_loc(idx[1], method), 1)\n self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)\n self.assertEqual(idx.get_loc(str(idx[1]), method), 1)\n if method is not None:\n self.assertEqual(idx.get_loc(idx[1], method,\n tolerance=pd.Timedelta('0 days')),\n 1)\n\n self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)\n self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)\n\n self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',\n tolerance='1 day'), 1)\n self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=pd.Timedelta('1D')), 1)\n self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=np.timedelta64(1, 'D')), 1)\n self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=timedelta(1)), 1)\n with tm.assertRaisesRegexp(ValueError, 'must be convertible'):\n idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')\n with tm.assertRaises(KeyError):\n idx.get_loc('2000-01-01T03', method='nearest',\n tolerance='2 hours')\n\n self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))\n self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))\n\n self.assertEqual(idx.get_loc('1999', method='nearest'), 0)\n self.assertEqual(idx.get_loc('2001', method='nearest'), 2)\n\n with tm.assertRaises(KeyError):\n idx.get_loc('1999', method='pad')\n with tm.assertRaises(KeyError):\n idx.get_loc('2001', method='backfill')\n\n with tm.assertRaises(KeyError):\n idx.get_loc('foobar')\n with tm.assertRaises(TypeError):\n idx.get_loc(slice(2))\n\n idx = pd.to_datetime(['2000-01-01', '2000-01-04'])\n self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)\n self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)\n self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))\n\n # time indexing\n idx = pd.date_range('2000-01-01', periods=24, freq='H')\n tm.assert_numpy_array_equal(idx.get_loc(time(12)), [12])\n tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)), [])\n with tm.assertRaises(NotImplementedError):\n idx.get_loc(time(12, 30), method='pad')\n\n def test_get_indexer(self):\n idx = pd.date_range('2000-01-01', periods=3)\n tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])\n\n target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest', tolerance=pd.Timedelta('1 hour')),\n [0, -1, 1])\n with tm.assertRaises(ValueError):\n idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')\n\n def test_roundtrip_pickle_with_tz(self):\n\n # GH 8367\n # round-trip of timezone\n index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')\n unpickled = self.round_trip_pickle(index)\n self.assertTrue(index.equals(unpickled))\n\n def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):\n # GH7774\n index = date_range('20130101', periods=3, tz='US/Eastern')\n self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')\n self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')\n\n def test_time_loc(self): # GH8667\n from datetime import time\n from pandas.index import _SIZE_CUTOFF\n\n ns = _SIZE_CUTOFF + np.array([-100, 100],dtype=np.int64)\n key = time(15, 11, 30)\n start = key.hour * 3600 + key.minute * 60 + key.second\n step = 24 * 3600\n\n for n in ns:\n idx = pd.date_range('2014-11-26', periods=n, freq='S')\n ts = pd.Series(np.random.randn(n), index=idx)\n i = np.arange(start, n, step)\n\n tm.assert_numpy_array_equal(ts.index.get_loc(key), i)\n tm.assert_series_equal(ts[key], ts.iloc[i])\n\n left, right = ts.copy(), ts.copy()\n left[key] *= -10\n right.iloc[i] *= -10\n tm.assert_series_equal(left, right)\n\n def test_time_overflow_for_32bit_machines(self):\n # GH8943. On some machines NumPy defaults to np.int32 (for example,\n # 32-bit Linux machines). In the function _generate_regular_range\n # found in tseries/index.py, `periods` gets multiplied by `strides`\n # (which has value 1e9) and since the max value for np.int32 is ~2e9,\n # and since those machines won't promote np.int32 to np.int64, we get\n # overflow.\n periods = np.int_(1000)\n\n idx1 = pd.date_range(start='2000', periods=periods, freq='S')\n self.assertEqual(len(idx1), periods)\n\n idx2 = pd.date_range(end='2000', periods=periods, freq='S')\n self.assertEqual(len(idx2), periods)\n\n def test_intersection(self):\n first = self.index\n second = self.index[5:]\n intersect = first.intersection(second)\n self.assertTrue(tm.equalContents(intersect, second))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.intersection(case)\n self.assertTrue(tm.equalContents(result, second))\n\n third = Index(['a', 'b', 'c'])\n result = first.intersection(third)\n expected = pd.Index([], dtype=object)\n self.assert_index_equal(result, expected)\n\n def test_union(self):\n first = self.index[:5]\n second = self.index[5:]\n everything = self.index\n union = first.union(second)\n self.assertTrue(tm.equalContents(union, everything))\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.union(case)\n self.assertTrue(tm.equalContents(result, everything))\n\n def test_nat(self):\n self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)\n\n\n def test_ufunc_coercions(self):\n idx = date_range('2011-01-01', periods=3, freq='2D', name='x')\n\n delta = np.timedelta64(1, 'D')\n for result in [idx + delta, np.add(idx, delta)]:\n tm.assertIsInstance(result, DatetimeIndex)\n exp = date_range('2011-01-02', periods=3, freq='2D', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, '2D')\n\n for result in [idx - delta, np.subtract(idx, delta)]:\n tm.assertIsInstance(result, DatetimeIndex)\n exp = date_range('2010-12-31', periods=3, freq='2D', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, '2D')\n\n delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),\n np.timedelta64(3, 'D')])\n for result in [idx + delta, np.add(idx, delta)]:\n tm.assertIsInstance(result, DatetimeIndex)\n exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],\n freq='3D', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, '3D')\n\n for result in [idx - delta, np.subtract(idx, delta)]:\n tm.assertIsInstance(result, DatetimeIndex)\n exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],\n freq='D', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, 'D')\n\n\nclass TestPeriodIndex(DatetimeLike, tm.TestCase):\n _holder = PeriodIndex\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(index = tm.makePeriodIndex(10))\n self.setup_indices()\n\n def create_index(self):\n return period_range('20130101', periods=5, freq='D')\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_get_loc(self):\n idx = pd.period_range('2000-01-01', periods=3)\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n self.assertEqual(idx.get_loc(idx[1], method), 1)\n self.assertEqual(idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)\n self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)\n self.assertEqual(idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)\n self.assertEqual(idx.get_loc(str(idx[1]), method), 1)\n\n idx = pd.period_range('2000-01-01', periods=5)[::2]\n self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',\n tolerance='1 day'), 1)\n self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',\n tolerance=pd.Timedelta('1D')), 1)\n self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',\n tolerance=np.timedelta64(1, 'D')), 1)\n self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',\n tolerance=timedelta(1)), 1)\n with tm.assertRaisesRegexp(ValueError, 'must be convertible'):\n idx.get_loc('2000-01-10', method='nearest', tolerance='foo')\n\n msg = 'Input has different freq from PeriodIndex\\\\(freq=D\\\\)'\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')\n with tm.assertRaises(KeyError):\n idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')\n\n def test_get_indexer(self):\n idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')\n tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])\n\n target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',\n '2000-01-02T01'], freq='H')\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest', tolerance='1 hour'),\n [0, -1, 1])\n\n msg = 'Input has different freq from PeriodIndex\\\\(freq=H\\\\)'\n with self.assertRaisesRegexp(ValueError, msg):\n idx.get_indexer(target, 'nearest', tolerance='1 minute')\n\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest', tolerance='1 day'), [0, 1, 1])\n\n def test_repeat(self):\n # GH10183\n idx = pd.period_range('2000-01-01', periods=3, freq='D')\n res = idx.repeat(3)\n exp = PeriodIndex(idx.values.repeat(3), freq='D')\n self.assert_index_equal(res, exp)\n self.assertEqual(res.freqstr, 'D')\n\n def test_period_index_indexer(self):\n\n #GH4125\n idx = pd.period_range('2002-01','2003-12', freq='M')\n df = pd.DataFrame(pd.np.random.randn(24,10), index=idx)\n self.assert_frame_equal(df, df.ix[idx])\n self.assert_frame_equal(df, df.ix[list(idx)])\n self.assert_frame_equal(df, df.loc[list(idx)])\n self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])\n self.assert_frame_equal(df, df.loc[list(idx)])\n\nclass TestTimedeltaIndex(DatetimeLike, tm.TestCase):\n _holder = TimedeltaIndex\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(index = tm.makeTimedeltaIndex(10))\n self.setup_indices()\n\n def create_index(self):\n return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)\n\n def test_get_loc(self):\n idx = pd.to_timedelta(['0 days', '1 days', '2 days'])\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n self.assertEqual(idx.get_loc(idx[1], method), 1)\n self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)\n self.assertEqual(idx.get_loc(str(idx[1]), method), 1)\n\n self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)\n self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)\n self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)\n\n with tm.assertRaisesRegexp(ValueError, 'must be convertible'):\n idx.get_loc(idx[1], method='nearest', tolerance='foo')\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)\n\n def test_get_indexer(self):\n idx = pd.to_timedelta(['0 days', '1 days', '2 days'])\n tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])\n\n target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest',\n tolerance=pd.Timedelta('1 hour')),\n [0, -1, 1])\n\n def test_numeric_compat(self):\n\n idx = self._holder(np.arange(5,dtype='int64'))\n didx = self._holder(np.arange(5,dtype='int64')**2\n )\n result = idx * 1\n tm.assert_index_equal(result, idx)\n\n result = 1 * idx\n tm.assert_index_equal(result, idx)\n\n result = idx / 1\n tm.assert_index_equal(result, idx)\n\n result = idx // 1\n tm.assert_index_equal(result, idx)\n\n result = idx * np.array(5,dtype='int64')\n tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))\n\n result = idx * np.arange(5,dtype='int64')\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5,dtype='int64'))\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5,dtype='float64')+0.1)\n tm.assert_index_equal(result,\n Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))\n\n\n # invalid\n self.assertRaises(TypeError, lambda : idx * idx)\n self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))\n self.assertRaises(ValueError, lambda : idx * np.array([1,2]))\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_ufunc_coercions(self):\n # normal ops are also tested in tseries/test_timedeltas.py\n idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],\n freq='2H', name='x')\n\n for result in [idx * 2, np.multiply(idx, 2)]:\n tm.assertIsInstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],\n freq='4H', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, '4H')\n\n for result in [idx / 2, np.divide(idx, 2)]:\n tm.assertIsInstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],\n freq='H', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, 'H')\n\n idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],\n freq='2H', name='x')\n for result in [ - idx, np.negative(idx)]:\n tm.assertIsInstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],\n freq='-2H', name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, '-2H')\n\n idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],\n freq='H', name='x')\n for result in [ abs(idx), np.absolute(idx)]:\n tm.assertIsInstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],\n freq=None, name='x')\n tm.assert_index_equal(result, exp)\n self.assertEqual(result.freq, None)\n\n\nclass TestMultiIndex(Base, tm.TestCase):\n _holder = MultiIndex\n _multiprocess_can_split_ = True\n _compat_props = ['shape', 'ndim', 'size', 'itemsize']\n\n def setUp(self):\n major_axis = Index(['foo', 'bar', 'baz', 'qux'])\n minor_axis = Index(['one', 'two'])\n\n major_labels = np.array([0, 0, 1, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 0, 1])\n self.index_names = ['first', 'second']\n self.indices = dict(index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels],\n names=self.index_names, verify_integrity=False))\n self.setup_indices()\n\n def create_index(self):\n return self.index\n\n def test_boolean_context_compat2(self):\n\n # boolean context compat\n # GH7897\n i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])\n i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])\n common = i1.intersection(i2)\n\n def f():\n if common:\n pass\n tm.assertRaisesRegexp(ValueError,'The truth value of a',f)\n\n def test_labels_dtypes(self):\n\n # GH 8456\n i = MultiIndex.from_tuples([('A', 1), ('A', 2)])\n self.assertTrue(i.labels[0].dtype == 'int8')\n self.assertTrue(i.labels[1].dtype == 'int8')\n\n i = MultiIndex.from_product([['a'],range(40)])\n self.assertTrue(i.labels[1].dtype == 'int8')\n i = MultiIndex.from_product([['a'],range(400)])\n self.assertTrue(i.labels[1].dtype == 'int16')\n i = MultiIndex.from_product([['a'],range(40000)])\n self.assertTrue(i.labels[1].dtype == 'int32')\n\n i = pd.MultiIndex.from_product([['a'],range(1000)])\n self.assertTrue((i.labels[0]>=0).all())\n self.assertTrue((i.labels[1]>=0).all())\n\n def test_set_name_methods(self):\n # so long as these are synonyms, we don't need to test set_names\n self.assertEqual(self.index.rename, self.index.set_names)\n new_names = [name + \"SUFFIX\" for name in self.index_names]\n ind = self.index.set_names(new_names)\n self.assertEqual(self.index.names, self.index_names)\n self.assertEqual(ind.names, new_names)\n with assertRaisesRegexp(ValueError, \"^Length\"):\n ind.set_names(new_names + new_names)\n new_names2 = [name + \"SUFFIX2\" for name in new_names]\n res = ind.set_names(new_names2, inplace=True)\n self.assertIsNone(res)\n self.assertEqual(ind.names, new_names2)\n\n # set names for specific level (# GH7792)\n ind = self.index.set_names(new_names[0], level=0)\n self.assertEqual(self.index.names, self.index_names)\n self.assertEqual(ind.names, [new_names[0], self.index_names[1]])\n\n res = ind.set_names(new_names2[0], level=0, inplace=True)\n self.assertIsNone(res)\n self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])\n\n # set names for multiple levels\n ind = self.index.set_names(new_names, level=[0, 1])\n self.assertEqual(self.index.names, self.index_names)\n self.assertEqual(ind.names, new_names)\n\n res = ind.set_names(new_names2, level=[0, 1], inplace=True)\n self.assertIsNone(res)\n self.assertEqual(ind.names, new_names2)\n\n\n def test_set_levels(self):\n\n # side note - you probably wouldn't want to use levels and labels\n # directly like this - but it is possible.\n levels, labels = self.index.levels, self.index.labels\n new_levels = [[lev + 'a' for lev in level] for level in levels]\n\n def assert_matching(actual, expected):\n # avoid specifying internal representation\n # as much as possible\n self.assertEqual(len(actual), len(expected))\n for act, exp in zip(actual, expected):\n act = np.asarray(act)\n exp = np.asarray(exp)\n assert_almost_equal(act, exp)\n\n # level changing [w/o mutation]\n ind2 = self.index.set_levels(new_levels)\n assert_matching(ind2.levels, new_levels)\n assert_matching(self.index.levels, levels)\n\n # level changing [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels, inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.levels, new_levels)\n\n # level changing specific level [w/o mutation]\n ind2 = self.index.set_levels(new_levels[0], level=0)\n assert_matching(ind2.levels, [new_levels[0], levels[1]])\n assert_matching(self.index.levels, levels)\n\n ind2 = self.index.set_levels(new_levels[1], level=1)\n assert_matching(ind2.levels, [levels[0], new_levels[1]])\n assert_matching(self.index.levels, levels)\n\n # level changing multiple levels [w/o mutation]\n ind2 = self.index.set_levels(new_levels, level=[0, 1])\n assert_matching(ind2.levels, new_levels)\n assert_matching(self.index.levels, levels)\n\n # level changing specific level [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.levels, [new_levels[0], levels[1]])\n assert_matching(self.index.levels, levels)\n\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.levels, [levels[0], new_levels[1]])\n assert_matching(self.index.levels, levels)\n\n # level changing multiple levels [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.levels, new_levels)\n assert_matching(self.index.levels, levels)\n\n def test_set_labels(self):\n # side note - you probably wouldn't want to use levels and labels\n # directly like this - but it is possible.\n levels, labels = self.index.levels, self.index.labels\n major_labels, minor_labels = labels\n major_labels = [(x + 1) % 3 for x in major_labels]\n minor_labels = [(x + 1) % 1 for x in minor_labels]\n new_labels = [major_labels, minor_labels]\n\n def assert_matching(actual, expected):\n # avoid specifying internal representation\n # as much as possible\n self.assertEqual(len(actual), len(expected))\n for act, exp in zip(actual, expected):\n act = np.asarray(act)\n exp = np.asarray(exp)\n assert_almost_equal(act, exp)\n\n # label changing [w/o mutation]\n ind2 = self.index.set_labels(new_labels)\n assert_matching(ind2.labels, new_labels)\n assert_matching(self.index.labels, labels)\n\n # label changing [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels, inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.labels, new_labels)\n\n # label changing specific level [w/o mutation]\n ind2 = self.index.set_labels(new_labels[0], level=0)\n assert_matching(ind2.labels, [new_labels[0], labels[1]])\n assert_matching(self.index.labels, labels)\n\n ind2 = self.index.set_labels(new_labels[1], level=1)\n assert_matching(ind2.labels, [labels[0], new_labels[1]])\n assert_matching(self.index.labels, labels)\n\n # label changing multiple levels [w/o mutation]\n ind2 = self.index.set_labels(new_labels, level=[0, 1])\n assert_matching(ind2.labels, new_labels)\n assert_matching(self.index.labels, labels)\n\n # label changing specific level [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.labels, [new_labels[0], labels[1]])\n assert_matching(self.index.labels, labels)\n\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.labels, [labels[0], new_labels[1]])\n assert_matching(self.index.labels, labels)\n\n # label changing multiple levels [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)\n self.assertIsNone(inplace_return)\n assert_matching(ind2.labels, new_labels)\n assert_matching(self.index.labels, labels)\n\n def test_set_levels_labels_names_bad_input(self):\n levels, labels = self.index.levels, self.index.labels\n names = self.index.names\n\n with tm.assertRaisesRegexp(ValueError, 'Length of levels'):\n self.index.set_levels([levels[0]])\n\n with tm.assertRaisesRegexp(ValueError, 'Length of labels'):\n self.index.set_labels([labels[0]])\n\n with tm.assertRaisesRegexp(ValueError, 'Length of names'):\n self.index.set_names([names[0]])\n\n # shouldn't scalar data error, instead should demand list-like\n with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):\n self.index.set_levels(levels[0])\n\n # shouldn't scalar data error, instead should demand list-like\n with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):\n self.index.set_labels(labels[0])\n\n # shouldn't scalar data error, instead should demand list-like\n with tm.assertRaisesRegexp(TypeError, 'list-like'):\n self.index.set_names(names[0])\n\n # should have equal lengths\n with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):\n self.index.set_levels(levels[0], level=[0, 1])\n\n with tm.assertRaisesRegexp(TypeError, 'list-like'):\n self.index.set_levels(levels, level=0)\n\n # should have equal lengths\n with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):\n self.index.set_labels(labels[0], level=[0, 1])\n\n with tm.assertRaisesRegexp(TypeError, 'list-like'):\n self.index.set_labels(labels, level=0)\n\n # should have equal lengths\n with tm.assertRaisesRegexp(ValueError, 'Length of names'):\n self.index.set_names(names[0], level=[0, 1])\n\n with tm.assertRaisesRegexp(TypeError, 'string'):\n self.index.set_names(names, level=0)\n\n def test_metadata_immutable(self):\n levels, labels = self.index.levels, self.index.labels\n # shouldn't be able to set at either the top level or base level\n mutable_regex = re.compile('does not support mutable operations')\n with assertRaisesRegexp(TypeError, mutable_regex):\n levels[0] = levels[0]\n with assertRaisesRegexp(TypeError, mutable_regex):\n levels[0][0] = levels[0][0]\n # ditto for labels\n with assertRaisesRegexp(TypeError, mutable_regex):\n labels[0] = labels[0]\n with assertRaisesRegexp(TypeError, mutable_regex):\n labels[0][0] = labels[0][0]\n # and for names\n names = self.index.names\n with assertRaisesRegexp(TypeError, mutable_regex):\n names[0] = names[0]\n\n def test_inplace_mutation_resets_values(self):\n levels = [['a', 'b', 'c'], [4]]\n levels2 = [[1, 2, 3], ['a']]\n labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]\n mi1 = MultiIndex(levels=levels, labels=labels)\n mi2 = MultiIndex(levels=levels2, labels=labels)\n vals = mi1.values.copy()\n vals2 = mi2.values.copy()\n self.assertIsNotNone(mi1._tuples)\n\n # make sure level setting works\n new_vals = mi1.set_levels(levels2).values\n assert_almost_equal(vals2, new_vals)\n # non-inplace doesn't kill _tuples [implementation detail]\n assert_almost_equal(mi1._tuples, vals)\n # and values is still same too\n assert_almost_equal(mi1.values, vals)\n\n # inplace should kill _tuples\n mi1.set_levels(levels2, inplace=True)\n assert_almost_equal(mi1.values, vals2)\n\n # make sure label setting works too\n labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]\n exp_values = np.empty((6, ), dtype=object)\n exp_values[:] = [(long(1), 'a')] * 6\n # must be 1d array of tuples\n self.assertEqual(exp_values.shape, (6, ))\n new_values = mi2.set_labels(labels2).values\n # not inplace shouldn't change\n assert_almost_equal(mi2._tuples, vals2)\n # should have correct values\n assert_almost_equal(exp_values, new_values)\n\n # and again setting inplace should kill _tuples, etc\n mi2.set_labels(labels2, inplace=True)\n assert_almost_equal(mi2.values, new_values)\n\n def test_copy_in_constructor(self):\n levels = np.array([\"a\", \"b\", \"c\"])\n labels = np.array([1, 1, 2, 0, 0, 1, 1])\n val = labels[0]\n mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],\n copy=True)\n self.assertEqual(mi.labels[0][0], val)\n labels[0] = 15\n self.assertEqual(mi.labels[0][0], val)\n val = levels[0]\n levels[0] = \"PANDA\"\n self.assertEqual(mi.levels[0][0], val)\n\n def test_set_value_keeps_names(self):\n # motivating example from #3742\n lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']\n lev2 = ['1', '2', '3'] * 2\n idx = pd.MultiIndex.from_arrays(\n [lev1, lev2],\n names=['Name', 'Number'])\n df = pd.DataFrame(\n np.random.randn(6, 4),\n columns=['one', 'two', 'three', 'four'],\n index=idx)\n df = df.sortlevel()\n self.assertIsNone(df.is_copy)\n self.assertEqual(df.index.names, ('Name', 'Number'))\n df = df.set_value(('grethe', '4'), 'one', 99.34)\n self.assertIsNone(df.is_copy)\n self.assertEqual(df.index.names, ('Name', 'Number'))\n\n def test_names(self):\n\n # names are assigned in __init__\n names = self.index_names\n level_names = [level.name for level in self.index.levels]\n self.assertEqual(names, level_names)\n\n # setting bad names on existing\n index = self.index\n assertRaisesRegexp(ValueError, \"^Length of names\", setattr, index,\n \"names\", list(index.names) + [\"third\"])\n assertRaisesRegexp(ValueError, \"^Length of names\", setattr, index,\n \"names\", [])\n\n # initializing with bad names (should always be equivalent)\n major_axis, minor_axis = self.index.levels\n major_labels, minor_labels = self.index.labels\n assertRaisesRegexp(ValueError, \"^Length of names\", MultiIndex,\n levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels],\n names=['first'])\n assertRaisesRegexp(ValueError, \"^Length of names\", MultiIndex,\n levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels],\n names=['first', 'second', 'third'])\n\n # names are assigned\n index.names = [\"a\", \"b\"]\n ind_names = list(index.names)\n level_names = [level.name for level in index.levels]\n self.assertEqual(ind_names, level_names)\n\n def test_reference_duplicate_name(self):\n idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])\n self.assertTrue(idx._reference_duplicate_name('x'))\n\n idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])\n self.assertFalse(idx._reference_duplicate_name('x'))\n\n def test_astype(self):\n expected = self.index.copy()\n actual = self.index.astype('O')\n assert_copy(actual.levels, expected.levels)\n assert_copy(actual.labels, expected.labels)\n self.check_level_names(actual, expected.names)\n\n with assertRaisesRegexp(TypeError, \"^Setting.*dtype.*object\"):\n self.index.astype(np.dtype(int))\n\n def test_constructor_single_level(self):\n single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],\n labels=[[0, 1, 2, 3]],\n names=['first'])\n tm.assertIsInstance(single_level, Index)\n self.assertNotIsInstance(single_level, MultiIndex)\n self.assertEqual(single_level.name, 'first')\n\n single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],\n labels=[[0, 1, 2, 3]])\n self.assertIsNone(single_level.name)\n\n def test_constructor_no_levels(self):\n assertRaisesRegexp(ValueError, \"non-zero number of levels/labels\",\n MultiIndex, levels=[], labels=[])\n both_re = re.compile('Must pass both levels and labels')\n with tm.assertRaisesRegexp(TypeError, both_re):\n MultiIndex(levels=[])\n with tm.assertRaisesRegexp(TypeError, both_re):\n MultiIndex(labels=[])\n\n def test_constructor_mismatched_label_levels(self):\n labels = [np.array([1]), np.array([2]), np.array([3])]\n levels = [\"a\"]\n assertRaisesRegexp(ValueError, \"Length of levels and labels must be\"\n \" the same\", MultiIndex, levels=levels,\n labels=labels)\n length_error = re.compile('>= length of level')\n label_error = re.compile(r'Unequal label lengths: \\[4, 2\\]')\n\n # important to check that it's looking at the right thing.\n with tm.assertRaisesRegexp(ValueError, length_error):\n MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])\n\n with tm.assertRaisesRegexp(ValueError, label_error):\n MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])\n\n # external API\n with tm.assertRaisesRegexp(ValueError, length_error):\n self.index.copy().set_levels([['a'], ['b']])\n\n with tm.assertRaisesRegexp(ValueError, label_error):\n self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])\n\n # deprecated properties\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n with tm.assertRaisesRegexp(ValueError, length_error):\n self.index.copy().levels = [['a'], ['b']]\n\n with tm.assertRaisesRegexp(ValueError, label_error):\n self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]\n\n\n def assert_multiindex_copied(self, copy, original):\n # levels shoudl be (at least, shallow copied)\n assert_copy(copy.levels, original.levels)\n\n assert_almost_equal(copy.labels, original.labels)\n\n # labels doesn't matter which way copied\n assert_almost_equal(copy.labels, original.labels)\n self.assertIsNot(copy.labels, original.labels)\n\n # names doesn't matter which way copied\n self.assertEqual(copy.names, original.names)\n self.assertIsNot(copy.names, original.names)\n\n # sort order should be copied\n self.assertEqual(copy.sortorder, original.sortorder)\n\n def test_copy(self):\n i_copy = self.index.copy()\n\n self.assert_multiindex_copied(i_copy, self.index)\n\n def test_shallow_copy(self):\n i_copy = self.index._shallow_copy()\n\n self.assert_multiindex_copied(i_copy, self.index)\n\n def test_view(self):\n i_view = self.index.view()\n\n self.assert_multiindex_copied(i_view, self.index)\n\n def check_level_names(self, index, names):\n self.assertEqual([level.name for level in index.levels], list(names))\n\n def test_changing_names(self):\n\n # names should be applied to levels\n level_names = [level.name for level in self.index.levels]\n self.check_level_names(self.index, self.index.names)\n\n view = self.index.view()\n copy = self.index.copy()\n shallow_copy = self.index._shallow_copy()\n\n # changing names should change level names on object\n new_names = [name + \"a\" for name in self.index.names]\n self.index.names = new_names\n self.check_level_names(self.index, new_names)\n\n # but not on copies\n self.check_level_names(view, level_names)\n self.check_level_names(copy, level_names)\n self.check_level_names(shallow_copy, level_names)\n\n # and copies shouldn't change original\n shallow_copy.names = [name + \"c\" for name in shallow_copy.names]\n self.check_level_names(self.index, new_names)\n\n def test_duplicate_names(self):\n self.index.names = ['foo', 'foo']\n assertRaisesRegexp(KeyError, 'Level foo not found',\n self.index._get_level_number, 'foo')\n\n def test_get_level_number_integer(self):\n self.index.names = [1, 0]\n self.assertEqual(self.index._get_level_number(1), 0)\n self.assertEqual(self.index._get_level_number(0), 1)\n self.assertRaises(IndexError, self.index._get_level_number, 2)\n assertRaisesRegexp(KeyError, 'Level fourth not found',\n self.index._get_level_number, 'fourth')\n\n def test_from_arrays(self):\n arrays = []\n for lev, lab in zip(self.index.levels, self.index.labels):\n arrays.append(np.asarray(lev).take(lab))\n\n result = MultiIndex.from_arrays(arrays)\n self.assertEqual(list(result), list(self.index))\n\n # infer correctly\n result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])\n self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))\n self.assertTrue(result.levels[1].equals(Index(['a','b'])))\n\n def test_from_product(self):\n\n first = ['foo', 'bar', 'buz']\n second = ['a', 'b', 'c']\n names = ['first', 'second']\n result = MultiIndex.from_product([first, second], names=names)\n\n tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),\n ('bar', 'a'), ('bar', 'b'), ('bar', 'c'),\n ('buz', 'a'), ('buz', 'b'), ('buz', 'c')]\n expected = MultiIndex.from_tuples(tuples, names=names)\n\n tm.assert_numpy_array_equal(result, expected)\n self.assertEqual(result.names, names)\n\n def test_from_product_datetimeindex(self):\n dt_index = date_range('2000-01-01', periods=2)\n mi = pd.MultiIndex.from_product([[1, 2], dt_index])\n etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),\n (1, pd.Timestamp('2000-01-02')),\n (2, pd.Timestamp('2000-01-01')),\n (2, pd.Timestamp('2000-01-02'))])\n tm.assert_numpy_array_equal(mi.values, etalon)\n\n def test_values_boxed(self):\n tuples = [(1, pd.Timestamp('2000-01-01')),\n (2, pd.NaT),\n (3, pd.Timestamp('2000-01-03')),\n (1, pd.Timestamp('2000-01-04')),\n (2, pd.Timestamp('2000-01-02')),\n (3, pd.Timestamp('2000-01-03'))]\n mi = pd.MultiIndex.from_tuples(tuples)\n tm.assert_numpy_array_equal(mi.values, pd.lib.list_to_object_array(tuples))\n # Check that code branches for boxed values produce identical results\n tm.assert_numpy_array_equal(mi.values[:4], mi[:4].values)\n\n def test_append(self):\n result = self.index[:3].append(self.index[3:])\n self.assertTrue(result.equals(self.index))\n\n foos = [self.index[:1], self.index[1:3], self.index[3:]]\n result = foos[0].append(foos[1:])\n self.assertTrue(result.equals(self.index))\n\n # empty\n result = self.index.append([])\n self.assertTrue(result.equals(self.index))\n\n def test_get_level_values(self):\n result = self.index.get_level_values(0)\n expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']\n tm.assert_numpy_array_equal(result, expected)\n\n self.assertEqual(result.name, 'first')\n\n result = self.index.get_level_values('first')\n expected = self.index.get_level_values(0)\n tm.assert_numpy_array_equal(result, expected)\n\n # GH 10460\n index = MultiIndex(levels=[CategoricalIndex(['A', 'B']),\n CategoricalIndex([1, 2, 3])],\n labels=[np.array([0, 0, 0, 1, 1, 1]),\n np.array([0, 1, 2, 0, 1, 2])])\n exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])\n self.assert_index_equal(index.get_level_values(0), exp)\n exp = CategoricalIndex([1, 2 ,3, 1, 2, 3])\n self.assert_index_equal(index.get_level_values(1), exp)\n\n def test_get_level_values_na(self):\n arrays = [['a', 'b', 'b'], [1, np.nan, 2]]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(1)\n expected = [1, np.nan, 2]\n tm.assert_numpy_array_equal(values.values.astype(float), expected)\n\n arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(1)\n expected = [np.nan, np.nan, 2]\n tm.assert_numpy_array_equal(values.values.astype(float), expected)\n\n arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(0)\n expected = [np.nan, np.nan, np.nan]\n tm.assert_numpy_array_equal(values.values.astype(float), expected)\n values = index.get_level_values(1)\n expected = np.array(['a', np.nan, 1],dtype=object)\n tm.assert_numpy_array_equal(values.values, expected)\n\n arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(1)\n expected = pd.DatetimeIndex([0, 1, pd.NaT])\n tm.assert_numpy_array_equal(values.values, expected.values)\n\n arrays = [[], []]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(0)\n self.assertEqual(values.shape, (0,))\n\n def test_reorder_levels(self):\n # this blows up\n assertRaisesRegexp(IndexError, '^Too many levels',\n self.index.reorder_levels, [2, 1, 0])\n\n def test_nlevels(self):\n self.assertEqual(self.index.nlevels, 2)\n\n def test_iter(self):\n result = list(self.index)\n expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),\n ('baz', 'two'), ('qux', 'one'), ('qux', 'two')]\n self.assertEqual(result, expected)\n\n def test_legacy_pickle(self):\n if compat.PY3:\n raise nose.SkipTest(\"testing for legacy pickles not support on py3\")\n\n path = tm.get_data_path('multiindex_v1.pickle')\n obj = pd.read_pickle(path)\n\n obj2 = MultiIndex.from_tuples(obj.values)\n self.assertTrue(obj.equals(obj2))\n\n res = obj.get_indexer(obj)\n exp = np.arange(len(obj))\n assert_almost_equal(res, exp)\n\n res = obj.get_indexer(obj2[::-1])\n exp = obj.get_indexer(obj[::-1])\n exp2 = obj2.get_indexer(obj2[::-1])\n assert_almost_equal(res, exp)\n assert_almost_equal(exp, exp2)\n\n def test_legacy_v2_unpickle(self):\n\n # 0.7.3 -> 0.8.0 format manage\n path = tm.get_data_path('mindex_073.pickle')\n obj = pd.read_pickle(path)\n\n obj2 = MultiIndex.from_tuples(obj.values)\n self.assertTrue(obj.equals(obj2))\n\n res = obj.get_indexer(obj)\n exp = np.arange(len(obj))\n assert_almost_equal(res, exp)\n\n res = obj.get_indexer(obj2[::-1])\n exp = obj.get_indexer(obj[::-1])\n exp2 = obj2.get_indexer(obj2[::-1])\n assert_almost_equal(res, exp)\n assert_almost_equal(exp, exp2)\n\n def test_roundtrip_pickle_with_tz(self):\n\n # GH 8367\n # round-trip of timezone\n index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])\n unpickled = self.round_trip_pickle(index)\n self.assertTrue(index.equal_levels(unpickled))\n\n def test_from_tuples_index_values(self):\n result = MultiIndex.from_tuples(self.index)\n self.assertTrue((result.values == self.index.values).all())\n\n def test_contains(self):\n self.assertIn(('foo', 'two'), self.index)\n self.assertNotIn(('bar', 'two'), self.index)\n self.assertNotIn(None, self.index)\n\n def test_is_all_dates(self):\n self.assertFalse(self.index.is_all_dates)\n\n def test_is_numeric(self):\n # MultiIndex is never numeric\n self.assertFalse(self.index.is_numeric())\n\n def test_getitem(self):\n # scalar\n self.assertEqual(self.index[2], ('bar', 'one'))\n\n # slice\n result = self.index[2:5]\n expected = self.index[[2, 3, 4]]\n self.assertTrue(result.equals(expected))\n\n # boolean\n result = self.index[[True, False, True, False, True, True]]\n result2 = self.index[np.array([True, False, True, False, True, True])]\n expected = self.index[[0, 2, 4, 5]]\n self.assertTrue(result.equals(expected))\n self.assertTrue(result2.equals(expected))\n\n def test_getitem_group_select(self):\n sorted_idx, _ = self.index.sortlevel(0)\n self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))\n self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))\n\n def test_get_loc(self):\n self.assertEqual(self.index.get_loc(('foo', 'two')), 1)\n self.assertEqual(self.index.get_loc(('baz', 'two')), 3)\n self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))\n self.assertRaises(KeyError, self.index.get_loc, 'quux')\n\n self.assertRaises(NotImplementedError, self.index.get_loc, 'foo',\n method='nearest')\n\n # 3 levels\n index = MultiIndex(levels=[Index(lrange(4)),\n Index(lrange(4)),\n Index(lrange(4))],\n labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n self.assertRaises(KeyError, index.get_loc, (1, 1))\n self.assertEqual(index.get_loc((2, 0)), slice(3, 5))\n\n def test_get_loc_duplicates(self):\n index = Index([2, 2, 2, 2])\n result = index.get_loc(2)\n expected = slice(0, 4)\n self.assertEqual(result, expected)\n # self.assertRaises(Exception, index.get_loc, 2)\n\n index = Index(['c', 'a', 'a', 'b', 'b'])\n rs = index.get_loc('c')\n xp = 0\n assert(rs == xp)\n\n def test_get_loc_level(self):\n index = MultiIndex(levels=[Index(lrange(4)),\n Index(lrange(4)),\n Index(lrange(4))],\n labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n\n loc, new_index = index.get_loc_level((0, 1))\n expected = slice(1, 2)\n exp_index = index[expected].droplevel(0).droplevel(0)\n self.assertEqual(loc, expected)\n self.assertTrue(new_index.equals(exp_index))\n\n loc, new_index = index.get_loc_level((0, 1, 0))\n expected = 1\n self.assertEqual(loc, expected)\n self.assertIsNone(new_index)\n\n self.assertRaises(KeyError, index.get_loc_level, (2, 2))\n\n index = MultiIndex(levels=[[2000], lrange(4)],\n labels=[np.array([0, 0, 0, 0]),\n np.array([0, 1, 2, 3])])\n result, new_index = index.get_loc_level((2000, slice(None, None)))\n expected = slice(None, None)\n self.assertEqual(result, expected)\n self.assertTrue(new_index.equals(index.droplevel(0)))\n\n def test_slice_locs(self):\n df = tm.makeTimeDataFrame()\n stacked = df.stack()\n idx = stacked.index\n\n slob = slice(*idx.slice_locs(df.index[5], df.index[15]))\n sliced = stacked[slob]\n expected = df[5:16].stack()\n tm.assert_almost_equal(sliced.values, expected.values)\n\n slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),\n df.index[15] - timedelta(seconds=30)))\n sliced = stacked[slob]\n expected = df[6:15].stack()\n tm.assert_almost_equal(sliced.values, expected.values)\n\n def test_slice_locs_with_type_mismatch(self):\n df = tm.makeTimeDataFrame()\n stacked = df.stack()\n idx = stacked.index\n assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,\n (1, 3))\n assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,\n df.index[5] + timedelta(seconds=30), (5, 2))\n df = tm.makeCustomDataframe(5, 5)\n stacked = df.stack()\n idx = stacked.index\n with assertRaisesRegexp(TypeError, '^Level type mismatch'):\n idx.slice_locs(timedelta(seconds=30))\n # TODO: Try creating a UnicodeDecodeError in exception message\n with assertRaisesRegexp(TypeError, '^Level type mismatch'):\n idx.slice_locs(df.index[1], (16, \"a\"))\n\n def test_slice_locs_not_sorted(self):\n index = MultiIndex(levels=[Index(lrange(4)),\n Index(lrange(4)),\n Index(lrange(4))],\n labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n\n assertRaisesRegexp(KeyError, \"[Kk]ey length.*greater than MultiIndex\"\n \" lexsort depth\", index.slice_locs, (1, 0, 1),\n (2, 1, 0))\n\n # works\n sorted_index, _ = index.sortlevel(0)\n # should there be a test case here???\n sorted_index.slice_locs((1, 0, 1), (2, 1, 0))\n\n def test_slice_locs_partial(self):\n sorted_idx, _ = self.index.sortlevel(0)\n\n result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))\n self.assertEqual(result, (1, 5))\n\n result = sorted_idx.slice_locs(None, ('qux', 'one'))\n self.assertEqual(result, (0, 5))\n\n result = sorted_idx.slice_locs(('foo', 'two'), None)\n self.assertEqual(result, (1, len(sorted_idx)))\n\n result = sorted_idx.slice_locs('bar', 'baz')\n self.assertEqual(result, (2, 4))\n\n def test_slice_locs_not_contained(self):\n # some searchsorted action\n\n index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],\n labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],\n [0, 1, 2, 1, 2, 2, 0, 1, 2]],\n sortorder=0)\n\n result = index.slice_locs((1, 0), (5, 2))\n self.assertEqual(result, (3, 6))\n\n result = index.slice_locs(1, 5)\n self.assertEqual(result, (3, 6))\n\n result = index.slice_locs((2, 2), (5, 2))\n self.assertEqual(result, (3, 6))\n\n result = index.slice_locs(2, 5)\n self.assertEqual(result, (3, 6))\n\n result = index.slice_locs((1, 0), (6, 3))\n self.assertEqual(result, (3, 8))\n\n result = index.slice_locs(-1, 10)\n self.assertEqual(result, (0, len(index)))\n\n def test_consistency(self):\n # need to construct an overflow\n major_axis = lrange(70000)\n minor_axis = lrange(10)\n\n major_labels = np.arange(70000)\n minor_labels = np.repeat(lrange(10), 7000)\n\n # the fact that is works means it's consistent\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n\n # inconsistent\n major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n\n self.assertFalse(index.is_unique)\n\n def test_truncate(self):\n major_axis = Index(lrange(4))\n minor_axis = Index(lrange(2))\n\n major_labels = np.array([0, 0, 1, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 0, 1])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n\n result = index.truncate(before=1)\n self.assertNotIn('foo', result.levels[0])\n self.assertIn(1, result.levels[0])\n\n result = index.truncate(after=1)\n self.assertNotIn(2, result.levels[0])\n self.assertIn(1, result.levels[0])\n\n result = index.truncate(before=1, after=2)\n self.assertEqual(len(result.levels[0]), 2)\n\n # after < before\n self.assertRaises(ValueError, index.truncate, 3, 1)\n\n def test_get_indexer(self):\n major_axis = Index(lrange(4))\n minor_axis = Index(lrange(2))\n\n major_labels = np.array([0, 0, 1, 2, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 0, 1, 0, 1])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n idx1 = index[:5]\n idx2 = index[[1, 3, 5]]\n\n r1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, [1, 3, -1])\n\n r1 = idx2.get_indexer(idx1, method='pad')\n e1 = [-1, 0, 0, 1, 1]\n assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='pad')\n assert_almost_equal(r2, e1[::-1])\n\n rffill1 = idx2.get_indexer(idx1, method='ffill')\n assert_almost_equal(r1, rffill1)\n\n r1 = idx2.get_indexer(idx1, method='backfill')\n e1 = [0, 0, 1, 1, 2]\n assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='backfill')\n assert_almost_equal(r2, e1[::-1])\n\n rbfill1 = idx2.get_indexer(idx1, method='bfill')\n assert_almost_equal(r1, rbfill1)\n\n # pass non-MultiIndex\n r1 = idx1.get_indexer(idx2._tuple_index)\n rexp1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, rexp1)\n\n r1 = idx1.get_indexer([1, 2, 3])\n self.assertTrue((r1 == [-1, -1, -1]).all())\n\n # create index with duplicates\n idx1 = Index(lrange(10) + lrange(10))\n idx2 = Index(lrange(20))\n assertRaisesRegexp(InvalidIndexError, \"Reindexing only valid with\"\n \" uniquely valued Index objects\",\n idx1.get_indexer, idx2)\n\n def test_get_indexer_nearest(self):\n midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])\n with tm.assertRaises(NotImplementedError):\n midx.get_indexer(['a'], method='nearest')\n with tm.assertRaises(NotImplementedError):\n midx.get_indexer(['a'], method='pad', tolerance=2)\n\n def test_format(self):\n self.index.format()\n self.index[:0].format()\n\n def test_format_integer_names(self):\n index = MultiIndex(levels=[[0, 1], [0, 1]],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[0, 1])\n index.format(names=True)\n\n def test_format_sparse_display(self):\n index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],\n labels=[[0, 0, 0, 1, 1, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0]])\n\n result = index.format()\n self.assertEqual(result[3], '1 0 0 0')\n\n def test_format_sparse_config(self):\n warn_filters = warnings.filters\n warnings.filterwarnings('ignore',\n category=FutureWarning,\n module=\".*format\")\n # GH1538\n pd.set_option('display.multi_sparse', False)\n\n result = self.index.format()\n self.assertEqual(result[1], 'foo two')\n\n self.reset_display_options()\n\n warnings.filters = warn_filters\n\n def test_to_hierarchical(self):\n index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'),\n (2, 'one'), (2, 'two')])\n result = index.to_hierarchical(3)\n expected = MultiIndex(levels=[[1, 2], ['one', 'two']],\n labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])\n tm.assert_index_equal(result, expected)\n self.assertEqual(result.names, index.names)\n\n # K > 1\n result = index.to_hierarchical(3, 2)\n expected = MultiIndex(levels=[[1, 2], ['one', 'two']],\n labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])\n tm.assert_index_equal(result, expected)\n self.assertEqual(result.names, index.names)\n\n # non-sorted\n index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),\n (2, 'a'), (2, 'b')],\n names=['N1', 'N2'])\n\n result = index.to_hierarchical(2)\n expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), (1, 'b'),\n (2, 'a'), (2, 'a'), (2, 'b'), (2, 'b')],\n names=['N1', 'N2'])\n tm.assert_index_equal(result, expected)\n self.assertEqual(result.names, index.names)\n\n def test_bounds(self):\n self.index._bounds\n\n def test_equals(self):\n self.assertTrue(self.index.equals(self.index))\n self.assertTrue(self.index.equal_levels(self.index))\n\n self.assertFalse(self.index.equals(self.index[:-1]))\n\n self.assertTrue(self.index.equals(self.index._tuple_index))\n\n # different number of levels\n index = MultiIndex(levels=[Index(lrange(4)),\n Index(lrange(4)),\n Index(lrange(4))],\n labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n\n index2 = MultiIndex(levels=index.levels[:-1],\n labels=index.labels[:-1])\n self.assertFalse(index.equals(index2))\n self.assertFalse(index.equal_levels(index2))\n\n # levels are different\n major_axis = Index(lrange(4))\n minor_axis = Index(lrange(2))\n\n major_labels = np.array([0, 0, 1, 2, 2, 3])\n minor_labels = np.array([0, 1, 0, 0, 1, 0])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n self.assertFalse(self.index.equals(index))\n self.assertFalse(self.index.equal_levels(index))\n\n # some of the labels are different\n major_axis = Index(['foo', 'bar', 'baz', 'qux'])\n minor_axis = Index(['one', 'two'])\n\n major_labels = np.array([0, 0, 2, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 0, 1])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n self.assertFalse(self.index.equals(index))\n\n def test_identical(self):\n mi = self.index.copy()\n mi2 = self.index.copy()\n self.assertTrue(mi.identical(mi2))\n\n mi = mi.set_names(['new1', 'new2'])\n self.assertTrue(mi.equals(mi2))\n self.assertFalse(mi.identical(mi2))\n\n mi2 = mi2.set_names(['new1', 'new2'])\n self.assertTrue(mi.identical(mi2))\n\n mi3 = Index(mi.tolist(), names=mi.names)\n mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)\n self.assertTrue(mi.identical(mi3))\n self.assertFalse(mi.identical(mi4))\n self.assertTrue(mi.equals(mi4))\n\n def test_is_(self):\n\n mi = MultiIndex.from_tuples(lzip(range(10), range(10)))\n self.assertTrue(mi.is_(mi))\n self.assertTrue(mi.is_(mi.view()))\n self.assertTrue(mi.is_(mi.view().view().view().view()))\n mi2 = mi.view()\n # names are metadata, they don't change id\n mi2.names = [\"A\", \"B\"]\n self.assertTrue(mi2.is_(mi))\n self.assertTrue(mi.is_(mi2))\n\n self.assertTrue(mi.is_(mi.set_names([\"C\", \"D\"])))\n mi2 = mi.view()\n mi2.set_names([\"E\", \"F\"], inplace=True)\n self.assertTrue(mi.is_(mi2))\n # levels are inherent properties, they change identity\n mi3 = mi2.set_levels([lrange(10), lrange(10)])\n self.assertFalse(mi3.is_(mi2))\n # shouldn't change\n self.assertTrue(mi2.is_(mi))\n mi4 = mi3.view()\n mi4.set_levels([[1 for _ in range(10)], lrange(10)], inplace=True)\n self.assertFalse(mi4.is_(mi3))\n mi5 = mi.view()\n mi5.set_levels(mi5.levels, inplace=True)\n self.assertFalse(mi5.is_(mi))\n\n def test_union(self):\n piece1 = self.index[:5][::-1]\n piece2 = self.index[3:]\n\n the_union = piece1 | piece2\n\n tups = sorted(self.index._tuple_index)\n expected = MultiIndex.from_tuples(tups)\n\n self.assertTrue(the_union.equals(expected))\n\n # corner case, pass self or empty thing:\n the_union = self.index.union(self.index)\n self.assertIs(the_union, self.index)\n\n the_union = self.index.union(self.index[:0])\n self.assertIs(the_union, self.index)\n\n # won't work in python 3\n # tuples = self.index._tuple_index\n # result = self.index[:4] | tuples[4:]\n # self.assertTrue(result.equals(tuples))\n\n # not valid for python 3\n # def test_union_with_regular_index(self):\n # other = Index(['A', 'B', 'C'])\n\n # result = other.union(self.index)\n # self.assertIn(('foo', 'one'), result)\n # self.assertIn('B', result)\n\n # result2 = self.index.union(other)\n # self.assertTrue(result.equals(result2))\n\n def test_intersection(self):\n piece1 = self.index[:5][::-1]\n piece2 = self.index[3:]\n\n the_int = piece1 & piece2\n tups = sorted(self.index[3:5]._tuple_index)\n expected = MultiIndex.from_tuples(tups)\n self.assertTrue(the_int.equals(expected))\n\n # corner case, pass self\n the_int = self.index.intersection(self.index)\n self.assertIs(the_int, self.index)\n\n # empty intersection: disjoint\n empty = self.index[:2] & self.index[2:]\n expected = self.index[:0]\n self.assertTrue(empty.equals(expected))\n\n # can't do in python 3\n # tuples = self.index._tuple_index\n # result = self.index & tuples\n # self.assertTrue(result.equals(tuples))\n\n def test_difference(self):\n\n first = self.index\n result = first.difference(self.index[-3:])\n\n # - API change GH 8226\n with tm.assert_produces_warning():\n first - self.index[-3:]\n with tm.assert_produces_warning():\n self.index[-3:] - first\n with tm.assert_produces_warning():\n self.index[-3:] - first.tolist()\n\n self.assertRaises(TypeError, lambda : first.tolist() - self.index[-3:])\n\n expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),\n sortorder=0,\n names=self.index.names)\n\n tm.assertIsInstance(result, MultiIndex)\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.names, self.index.names)\n\n # empty difference: reflexive\n result = self.index.difference(self.index)\n expected = self.index[:0]\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.names, self.index.names)\n\n # empty difference: superset\n result = self.index[-3:].difference(self.index)\n expected = self.index[:0]\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.names, self.index.names)\n\n # empty difference: degenerate\n result = self.index[:0].difference(self.index)\n expected = self.index[:0]\n self.assertTrue(result.equals(expected))\n self.assertEqual(result.names, self.index.names)\n\n # names not the same\n chunklet = self.index[-3:]\n chunklet.names = ['foo', 'baz']\n result = first.difference(chunklet)\n self.assertEqual(result.names, (None, None))\n\n # empty, but non-equal\n result = self.index.difference(self.index.sortlevel(1)[0])\n self.assertEqual(len(result), 0)\n\n # raise Exception called with non-MultiIndex\n result = first.difference(first._tuple_index)\n self.assertTrue(result.equals(first[:0]))\n\n # name from empty array\n result = first.difference([])\n self.assertTrue(first.equals(result))\n self.assertEqual(first.names, result.names)\n\n # name from non-empty array\n result = first.difference([('foo', 'one')])\n expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),\n ('foo', 'two'), ('qux', 'one'),\n ('qux', 'two')])\n expected.names = first.names\n self.assertEqual(first.names, result.names)\n assertRaisesRegexp(TypeError, \"other must be a MultiIndex or a list\"\n \" of tuples\", first.difference, [1, 2, 3, 4, 5])\n\n def test_from_tuples(self):\n assertRaisesRegexp(TypeError, 'Cannot infer number of levels from'\n ' empty list', MultiIndex.from_tuples, [])\n\n idx = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])\n self.assertEqual(len(idx), 2)\n\n def test_argsort(self):\n result = self.index.argsort()\n expected = self.index._tuple_index.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_sortlevel(self):\n import random\n\n tuples = list(self.index)\n random.shuffle(tuples)\n\n index = MultiIndex.from_tuples(tuples)\n\n sorted_idx, _ = index.sortlevel(0)\n expected = MultiIndex.from_tuples(sorted(tuples))\n self.assertTrue(sorted_idx.equals(expected))\n\n sorted_idx, _ = index.sortlevel(0, ascending=False)\n self.assertTrue(sorted_idx.equals(expected[::-1]))\n\n sorted_idx, _ = index.sortlevel(1)\n by1 = sorted(tuples, key=lambda x: (x[1], x[0]))\n expected = MultiIndex.from_tuples(by1)\n self.assertTrue(sorted_idx.equals(expected))\n\n sorted_idx, _ = index.sortlevel(1, ascending=False)\n self.assertTrue(sorted_idx.equals(expected[::-1]))\n\n def test_sortlevel_not_sort_remaining(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)\n self.assertTrue(sorted_idx.equals(mi))\n\n def test_sortlevel_deterministic(self):\n tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),\n ('foo', 'one'), ('baz', 'two'), ('qux', 'one')]\n\n index = MultiIndex.from_tuples(tuples)\n\n sorted_idx, _ = index.sortlevel(0)\n expected = MultiIndex.from_tuples(sorted(tuples))\n self.assertTrue(sorted_idx.equals(expected))\n\n sorted_idx, _ = index.sortlevel(0, ascending=False)\n self.assertTrue(sorted_idx.equals(expected[::-1]))\n\n sorted_idx, _ = index.sortlevel(1)\n by1 = sorted(tuples, key=lambda x: (x[1], x[0]))\n expected = MultiIndex.from_tuples(by1)\n self.assertTrue(sorted_idx.equals(expected))\n\n sorted_idx, _ = index.sortlevel(1, ascending=False)\n self.assertTrue(sorted_idx.equals(expected[::-1]))\n\n def test_dims(self):\n pass\n\n def test_drop(self):\n dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])\n\n index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])\n dropped2 = self.index.drop(index)\n\n expected = self.index[[0, 2, 3, 5]]\n self.assert_index_equal(dropped, expected)\n self.assert_index_equal(dropped2, expected)\n\n dropped = self.index.drop(['bar'])\n expected = self.index[[0, 1, 3, 4, 5]]\n self.assert_index_equal(dropped, expected)\n\n dropped = self.index.drop('foo')\n expected = self.index[[2, 3, 4, 5]]\n self.assert_index_equal(dropped, expected)\n\n index = MultiIndex.from_tuples([('bar', 'two')])\n self.assertRaises(KeyError, self.index.drop, [('bar', 'two')])\n self.assertRaises(KeyError, self.index.drop, index)\n self.assertRaises(KeyError, self.index.drop, ['foo', 'two'])\n\n # partially correct argument\n mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])\n self.assertRaises(KeyError, self.index.drop, mixed_index)\n\n # error='ignore'\n dropped = self.index.drop(index, errors='ignore')\n expected = self.index[[0, 1, 2, 3, 4, 5]]\n self.assert_index_equal(dropped, expected)\n\n dropped = self.index.drop(mixed_index, errors='ignore')\n expected = self.index[[0, 1, 2, 3, 5]]\n self.assert_index_equal(dropped, expected)\n\n dropped = self.index.drop(['foo', 'two'], errors='ignore')\n expected = self.index[[2, 3, 4, 5]]\n self.assert_index_equal(dropped, expected)\n\n # mixed partial / full drop\n dropped = self.index.drop(['foo', ('qux', 'one')])\n expected = self.index[[2, 3, 5]]\n self.assert_index_equal(dropped, expected)\n\n # mixed partial / full drop / error='ignore'\n mixed_index = ['foo', ('qux', 'one'), 'two']\n self.assertRaises(KeyError, self.index.drop, mixed_index)\n dropped = self.index.drop(mixed_index, errors='ignore')\n expected = self.index[[2, 3, 5]]\n self.assert_index_equal(dropped, expected)\n\n def test_droplevel_with_names(self):\n index = self.index[self.index.get_loc('foo')]\n dropped = index.droplevel(0)\n self.assertEqual(dropped.name, 'second')\n\n index = MultiIndex(levels=[Index(lrange(4)),\n Index(lrange(4)),\n Index(lrange(4))],\n labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0])],\n names=['one', 'two', 'three'])\n dropped = index.droplevel(0)\n self.assertEqual(dropped.names, ('two', 'three'))\n\n dropped = index.droplevel('two')\n expected = index.droplevel(1)\n self.assertTrue(dropped.equals(expected))\n\n def test_droplevel_multiple(self):\n index = MultiIndex(levels=[Index(lrange(4)),\n Index(lrange(4)),\n Index(lrange(4))],\n labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0])],\n names=['one', 'two', 'three'])\n\n dropped = index[:2].droplevel(['three', 'one'])\n expected = index[:2].droplevel(2).droplevel(0)\n self.assertTrue(dropped.equals(expected))\n\n def test_insert(self):\n # key contained in all levels\n new_index = self.index.insert(0, ('bar', 'two'))\n self.assertTrue(new_index.equal_levels(self.index))\n self.assertEqual(new_index[0], ('bar', 'two'))\n\n # key not contained in all levels\n new_index = self.index.insert(0, ('abc', 'three'))\n tm.assert_numpy_array_equal(new_index.levels[0],\n list(self.index.levels[0]) + ['abc'])\n tm.assert_numpy_array_equal(new_index.levels[1],\n list(self.index.levels[1]) + ['three'])\n self.assertEqual(new_index[0], ('abc', 'three'))\n\n # key wrong length\n assertRaisesRegexp(ValueError, \"Item must have length equal to number\"\n \" of levels\", self.index.insert, 0, ('foo2',))\n\n left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],\n columns=['1st', '2nd', '3rd'])\n left.set_index(['1st', '2nd'], inplace=True)\n ts = left['3rd'].copy(deep=True)\n\n left.loc[('b', 'x'), '3rd'] = 2\n left.loc[('b', 'a'), '3rd'] = -1\n left.loc[('b', 'b'), '3rd'] = 3\n left.loc[('a', 'x'), '3rd'] = 4\n left.loc[('a', 'w'), '3rd'] = 5\n left.loc[('a', 'a'), '3rd'] = 6\n\n ts.loc[('b', 'x')] = 2\n ts.loc['b', 'a'] = -1\n ts.loc[('b', 'b')] = 3\n ts.loc['a', 'x'] = 4\n ts.loc[('a', 'w')] = 5\n ts.loc['a', 'a'] = 6\n\n right = pd.DataFrame([['a', 'b', 0],\n ['b', 'd', 1],\n ['b', 'x', 2],\n ['b', 'a', -1],\n ['b', 'b', 3],\n ['a', 'x', 4],\n ['a', 'w', 5],\n ['a', 'a', 6]],\n columns=['1st', '2nd', '3rd'])\n right.set_index(['1st', '2nd'], inplace=True)\n # FIXME data types changes to float because\n # of intermediate nan insertion;\n tm.assert_frame_equal(left, right, check_dtype=False)\n tm.assert_series_equal(ts, right['3rd'])\n\n # GH9250\n idx = [('test1', i) for i in range(5)] + \\\n [('test2', i) for i in range(6)] + \\\n [('test', 17), ('test', 18)]\n\n left = pd.Series(np.linspace(0, 10, 11),\n pd.MultiIndex.from_tuples(idx[:-2]))\n\n left.loc[('test', 17)] = 11\n left.ix[('test', 18)] = 12\n\n right = pd.Series(np.linspace(0, 12, 13),\n pd.MultiIndex.from_tuples(idx))\n\n tm.assert_series_equal(left, right)\n\n def test_take_preserve_name(self):\n taken = self.index.take([3, 0, 1])\n self.assertEqual(taken.names, self.index.names)\n\n def test_join_level(self):\n def _check_how(other, how):\n join_index, lidx, ridx = other.join(self.index, how=how,\n level='second',\n return_indexers=True)\n\n exp_level = other.join(self.index.levels[1], how=how)\n self.assertTrue(join_index.levels[0].equals(self.index.levels[0]))\n self.assertTrue(join_index.levels[1].equals(exp_level))\n\n # pare down levels\n mask = np.array(\n [x[1] in exp_level for x in self.index], dtype=bool)\n exp_values = self.index.values[mask]\n tm.assert_numpy_array_equal(join_index.values, exp_values)\n\n if how in ('outer', 'inner'):\n join_index2, ridx2, lidx2 = \\\n self.index.join(other, how=how, level='second',\n return_indexers=True)\n\n self.assertTrue(join_index.equals(join_index2))\n tm.assert_numpy_array_equal(lidx, lidx2)\n tm.assert_numpy_array_equal(ridx, ridx2)\n tm.assert_numpy_array_equal(join_index2.values, exp_values)\n\n def _check_all(other):\n _check_how(other, 'outer')\n _check_how(other, 'inner')\n _check_how(other, 'left')\n _check_how(other, 'right')\n\n _check_all(Index(['three', 'one', 'two']))\n _check_all(Index(['one']))\n _check_all(Index(['one', 'three']))\n\n # some corner cases\n idx = Index(['three', 'one', 'two'])\n result = idx.join(self.index, level='second')\n tm.assertIsInstance(result, MultiIndex)\n\n assertRaisesRegexp(TypeError, \"Join.*MultiIndex.*ambiguous\",\n self.index.join, self.index, level=1)\n\n def test_join_self(self):\n kinds = 'outer', 'inner', 'left', 'right'\n for kind in kinds:\n res = self.index\n joined = res.join(res, how=kind)\n self.assertIs(res, joined)\n\n def test_join_multi(self):\n # GH 10665\n midx = pd.MultiIndex.from_product([np.arange(4), np.arange(4)], names=['a', 'b'])\n idx = pd.Index([1, 2, 5], name='b')\n\n # inner\n jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)\n exp_idx = pd.MultiIndex.from_product([np.arange(4), [1, 2]], names=['a', 'b'])\n exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14])\n exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1])\n self.assert_index_equal(jidx, exp_idx)\n self.assert_numpy_array_equal(lidx, exp_lidx)\n self.assert_numpy_array_equal(ridx, exp_ridx)\n # flip\n jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)\n self.assert_index_equal(jidx, exp_idx)\n self.assert_numpy_array_equal(lidx, exp_lidx)\n self.assert_numpy_array_equal(ridx, exp_ridx)\n\n # keep MultiIndex\n jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)\n exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1])\n self.assert_index_equal(jidx, midx)\n self.assertIsNone(lidx)\n self.assert_numpy_array_equal(ridx, exp_ridx)\n # flip\n jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)\n self.assert_index_equal(jidx, midx)\n self.assertIsNone(lidx)\n self.assert_numpy_array_equal(ridx, exp_ridx)\n\n def test_reindex(self):\n result, indexer = self.index.reindex(list(self.index[:4]))\n tm.assertIsInstance(result, MultiIndex)\n self.check_level_names(result, self.index[:4].names)\n\n result, indexer = self.index.reindex(list(self.index))\n tm.assertIsInstance(result, MultiIndex)\n self.assertIsNone(indexer)\n self.check_level_names(result, self.index.names)\n\n def test_reindex_level(self):\n idx = Index(['one'])\n\n target, indexer = self.index.reindex(idx, level='second')\n target2, indexer2 = idx.reindex(self.index, level='second')\n\n exp_index = self.index.join(idx, level='second', how='right')\n exp_index2 = self.index.join(idx, level='second', how='left')\n\n self.assertTrue(target.equals(exp_index))\n exp_indexer = np.array([0, 2, 4])\n tm.assert_numpy_array_equal(indexer, exp_indexer)\n\n self.assertTrue(target2.equals(exp_index2))\n exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])\n tm.assert_numpy_array_equal(indexer2, exp_indexer2)\n\n assertRaisesRegexp(TypeError, \"Fill method not supported\",\n self.index.reindex, self.index, method='pad',\n level='second')\n\n assertRaisesRegexp(TypeError, \"Fill method not supported\",\n idx.reindex, idx, method='bfill', level='first')\n\n def test_duplicates(self):\n self.assertFalse(self.index.has_duplicates)\n self.assertTrue(self.index.append(self.index).has_duplicates)\n\n index = MultiIndex(levels=[[0, 1], [0, 1, 2]],\n labels=[[0, 0, 0, 0, 1, 1, 1],\n [0, 1, 2, 0, 0, 1, 2]])\n self.assertTrue(index.has_duplicates)\n\n # GH 9075\n t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),\n (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),\n (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),\n (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),\n (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),\n (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),\n (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),\n (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),\n (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),\n (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),\n (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),\n (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),\n (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),\n (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),\n (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),\n (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),\n (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),\n (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]\n\n index = pd.MultiIndex.from_tuples(t)\n self.assertFalse(index.has_duplicates)\n\n # handle int64 overflow if possible\n def check(nlevels, with_nulls):\n labels = np.tile(np.arange(500), 2)\n level = np.arange(500)\n\n if with_nulls: # inject some null values\n labels[500] = -1 # common nan value\n labels = list(labels.copy() for i in range(nlevels))\n for i in range(nlevels):\n labels[i][500 + i - nlevels // 2 ] = -1\n\n labels += [np.array([-1, 1]).repeat(500)]\n else:\n labels = [labels] * nlevels + [np.arange(2).repeat(500)]\n\n levels = [level] * nlevels + [[0, 1]]\n\n # no dups\n index = MultiIndex(levels=levels, labels=labels)\n self.assertFalse(index.has_duplicates)\n\n # with a dup\n if with_nulls:\n f = lambda a: np.insert(a, 1000, a[0])\n labels = list(map(f, labels))\n index = MultiIndex(levels=levels, labels=labels)\n else:\n values = index.values.tolist()\n index = MultiIndex.from_tuples(values + [values[0]])\n\n self.assertTrue(index.has_duplicates)\n\n # no overflow\n check(4, False)\n check(4, True)\n\n # overflow possible\n check(8, False)\n check(8, True)\n\n # GH 9125\n n, k = 200, 5000\n levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]\n labels = [np.random.choice(n, k * n) for lev in levels]\n mi = MultiIndex(levels=levels, labels=labels)\n\n for keep in ['first', 'last', False]:\n left = mi.duplicated(keep=keep)\n right = pd.lib.duplicated(mi.values, keep=keep)\n tm.assert_numpy_array_equal(left, right)\n\n # GH5873\n for a in [101, 102]:\n mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])\n self.assertFalse(mi.has_duplicates)\n self.assertEqual(mi.get_duplicates(), [])\n tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(2, dtype='bool'))\n\n for n in range(1, 6): # 1st level shape\n for m in range(1, 5): # 2nd level shape\n # all possible unique combinations, including nan\n lab = product(range(-1, n), range(-1, m))\n mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],\n labels=np.random.permutation(list(lab)).T)\n self.assertEqual(len(mi), (n + 1) * (m + 1))\n self.assertFalse(mi.has_duplicates)\n self.assertEqual(mi.get_duplicates(), [])\n tm.assert_numpy_array_equal(mi.duplicated(),\n np.zeros(len(mi), dtype='bool'))\n\n def test_duplicate_meta_data(self):\n # GH 10115\n index = MultiIndex(levels=[[0, 1], [0, 1, 2]],\n labels=[[0, 0, 0, 0, 1, 1, 1],\n [0, 1, 2, 0, 0, 1, 2]])\n for idx in [index,\n index.set_names([None, None]),\n index.set_names([None, 'Num']),\n index.set_names(['Upper','Num']),\n ]:\n self.assertTrue(idx.has_duplicates)\n self.assertEqual(idx.drop_duplicates().names, idx.names)\n\n def test_tolist(self):\n result = self.index.tolist()\n exp = list(self.index.values)\n self.assertEqual(result, exp)\n\n def test_repr_with_unicode_data(self):\n with pd.core.config.option_context(\"display.encoding\",'UTF-8'):\n d = {\"a\": [u(\"\\u05d0\"), 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n index = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n self.assertFalse(\"\\\\u\" in repr(index)) # we don't want unicode-escaped\n\n def test_repr_roundtrip(self):\n\n mi = MultiIndex.from_product([list('ab'),range(3)],names=['first','second'])\n str(mi)\n\n if compat.PY3:\n tm.assert_index_equal(eval(repr(mi)), mi, exact=True)\n else:\n result = eval(repr(mi))\n # string coerces to unicode\n tm.assert_index_equal(result, mi, exact=False)\n self.assertEqual(mi.get_level_values('first').inferred_type, 'string')\n self.assertEqual(result.get_level_values('first').inferred_type, 'unicode')\n\n mi_u = MultiIndex.from_product([list(u'ab'),range(3)],names=['first','second'])\n result = eval(repr(mi_u))\n tm.assert_index_equal(result, mi_u, exact=True)\n\n # formatting\n if compat.PY3:\n str(mi)\n else:\n compat.text_type(mi)\n\n # long format\n mi = MultiIndex.from_product([list('abcdefg'),range(10)],names=['first','second'])\n result = str(mi)\n\n if compat.PY3:\n tm.assert_index_equal(eval(repr(mi)), mi, exact=True)\n else:\n result = eval(repr(mi))\n # string coerces to unicode\n tm.assert_index_equal(result, mi, exact=False)\n self.assertEqual(mi.get_level_values('first').inferred_type, 'string')\n self.assertEqual(result.get_level_values('first').inferred_type, 'unicode')\n\n mi = MultiIndex.from_product([list(u'abcdefg'),range(10)],names=['first','second'])\n result = eval(repr(mi_u))\n tm.assert_index_equal(result, mi_u, exact=True)\n\n def test_str(self):\n # tested elsewhere\n pass\n\n def test_unicode_string_with_unicode(self):\n d = {\"a\": [u(\"\\u05d0\"), 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n\n if compat.PY3:\n str(idx)\n else:\n compat.text_type(idx)\n\n def test_bytestring_with_unicode(self):\n d = {\"a\": [u(\"\\u05d0\"), 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n\n if compat.PY3:\n bytes(idx)\n else:\n str(idx)\n\n def test_slice_keep_name(self):\n x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],\n names=['x', 'y'])\n self.assertEqual(x[1:].names, x.names)\n\n def test_isnull_behavior(self):\n # should not segfault GH5123\n # NOTE: if MI representation changes, may make sense to allow\n # isnull(MI)\n with tm.assertRaises(NotImplementedError):\n pd.isnull(self.index)\n\n def test_level_setting_resets_attributes(self):\n ind = MultiIndex.from_arrays([\n ['A', 'A', 'B', 'B', 'B'],\n [1, 2, 1, 2, 3]])\n assert ind.is_monotonic\n ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],\n inplace=True)\n # if this fails, probably didn't reset the cache correctly.\n assert not ind.is_monotonic\n\n def test_isin(self):\n values = [('foo', 2), ('bar', 3), ('quux', 4)]\n\n idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'],\n np.arange(4)])\n result = idx.isin(values)\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # empty, return dtype bool\n idx = MultiIndex.from_arrays([[], []])\n result = idx.isin(values)\n self.assertEqual(len(result), 0)\n self.assertEqual(result.dtype, np.bool_)\n\n def test_isin_nan(self):\n idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])\n tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),\n [False, False])\n tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),\n [False, False])\n\n def test_isin_level_kwarg(self):\n idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'],\n np.arange(4)])\n\n vals_0 = ['foo', 'bar', 'quux']\n vals_1 = [2, 3, 10]\n\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))\n tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))\n\n tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))\n tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))\n\n self.assertRaises(IndexError, idx.isin, vals_0, level=5)\n self.assertRaises(IndexError, idx.isin, vals_0, level=-5)\n\n self.assertRaises(KeyError, idx.isin, vals_0, level=1.0)\n self.assertRaises(KeyError, idx.isin, vals_1, level=-1.0)\n self.assertRaises(KeyError, idx.isin, vals_1, level='A')\n\n idx.names = ['A', 'B']\n tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))\n tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))\n\n self.assertRaises(KeyError, idx.isin, vals_1, level='C')\n\n def test_reindex_preserves_names_when_target_is_list_or_ndarray(self):\n # GH6552\n idx = self.index.copy()\n target = idx.copy()\n idx.names = target.names = [None, None]\n\n other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]])\n\n # list & ndarray cases\n self.assertEqual(idx.reindex([])[0].names, [None, None])\n self.assertEqual(idx.reindex(np.array([]))[0].names, [None, None])\n self.assertEqual(idx.reindex(target.tolist())[0].names, [None, None])\n self.assertEqual(idx.reindex(target.values)[0].names, [None, None])\n self.assertEqual(idx.reindex(other_dtype.tolist())[0].names, [None, None])\n self.assertEqual(idx.reindex(other_dtype.values)[0].names, [None, None])\n\n idx.names = ['foo', 'bar']\n self.assertEqual(idx.reindex([])[0].names, ['foo', 'bar'])\n self.assertEqual(idx.reindex(np.array([]))[0].names, ['foo', 'bar'])\n self.assertEqual(idx.reindex(target.tolist())[0].names, ['foo', 'bar'])\n self.assertEqual(idx.reindex(target.values)[0].names, ['foo', 'bar'])\n self.assertEqual(idx.reindex(other_dtype.tolist())[0].names, ['foo', 'bar'])\n self.assertEqual(idx.reindex(other_dtype.values)[0].names, ['foo', 'bar'])\n\n def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self):\n # GH7774\n idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],\n names=['foo', 'bar'])\n self.assertEqual(idx.reindex([], level=0)[0].names, ['foo', 'bar'])\n self.assertEqual(idx.reindex([], level=1)[0].names, ['foo', 'bar'])\n\n def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self):\n # GH7774\n idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])\n self.assertEqual(idx.reindex([], level=0)[0].levels[0].dtype.type,\n np.int64)\n self.assertEqual(idx.reindex([], level=1)[0].levels[1].dtype.type,\n np.object_)\n\n def test_groupby(self):\n groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2]))\n labels = self.index.get_values().tolist()\n exp = {1: labels[:3], 2: labels[3:]}\n tm.assert_dict_equal(groups, exp)\n\n # GH5620\n groups = self.index.groupby(self.index)\n exp = dict((key, [key]) for key in self.index)\n tm.assert_dict_equal(groups, exp)\n\n def test_index_name_retained(self):\n # GH9857\n result = pd.DataFrame({'x': [1, 2, 6],\n 'y': [2, 2, 8],\n 'z': [-5, 0, 5]})\n result = result.set_index('z')\n result.loc[10] = [9, 10]\n df_expected = pd.DataFrame({'x': [1, 2, 6, 9],\n 'y': [2, 2, 8, 10],\n 'z': [-5, 0, 5, 10]})\n df_expected = df_expected.set_index('z')\n tm.assert_frame_equal(result, df_expected)\n\n def test_equals_operator(self):\n # GH9785\n self.assertTrue((self.index == self.index).all())\n\n\ndef test_get_combined_index():\n from pandas.core.index import _get_combined_index\n result = _get_combined_index([])\n assert(result.equals(Index([])))\n\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] |
[
[
"numpy.sqrt",
"pandas.PeriodIndex",
"pandas.util.testing.assert_contains_all",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.util.testing.assert_index_equal",
"pandas.util.testing.makeUnicodeIndex",
"pandas.compat.lzip",
"pandas.compat.text_type",
"numpy.sin",
"numpy.insert",
"pandas.np.random.randn",
"numpy.zeros",
"numpy.multiply",
"pandas.MultiIndex",
"numpy.random.choice",
"numpy.append",
"pandas.util.testing.assert_equal",
"pandas.date_range",
"numpy.array",
"pandas.CategoricalIndex",
"numpy.absolute",
"pandas.TimedeltaIndex",
"pandas.period_range",
"pandas.util.testing.get_data_path",
"numpy.datetime64",
"pandas.lib.Timestamp",
"numpy.random.permutation",
"numpy.add",
"pandas.compat.range",
"pandas.Series",
"numpy.asarray",
"numpy.concatenate",
"numpy.divide",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.compat.StringIO",
"numpy.intersect1d",
"pandas.core.config.option_context",
"pandas.set_option",
"pandas.util.testing.equalContents",
"pandas.lib.duplicated",
"pandas.compat.u",
"pandas.util.testing.makeStringIndex",
"numpy.timedelta64",
"numpy.random.rand",
"pandas.infer_freq",
"pandas.util.testing.assertRaisesRegexp",
"pandas.to_timedelta",
"pandas.util.testing.makeFloatIndex",
"numpy.empty",
"pandas.util.testing.assert_dict_equal",
"pandas.util.testing.makePeriodIndex",
"numpy.linspace",
"pandas.DataFrame",
"pandas.compat.iteritems",
"numpy.negative",
"numpy.random.randint",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_series_equal",
"pandas.Int64Index",
"pandas.tseries.index._to_m8",
"pandas.compat.long",
"pandas.core.index._get_combined_index",
"pandas.util.testing.makeCategoricalIndex",
"pandas.util.testing.assert_almost_equal",
"pandas.Float64Index",
"pandas.Timedelta",
"pandas.lib.list_to_object_array",
"pandas.util.testing.makeCustomDataframe",
"pandas.isnull",
"pandas.util.testing.assertRaises",
"pandas.util.testing.makeIntIndex",
"pandas.compat.zip",
"pandas.offsets.Hour",
"pandas.read_pickle",
"pandas.to_datetime",
"pandas.util.testing.assertIsInstance",
"pandas.util.testing.assertNotIsInstance",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"pandas.util.testing.assert_copy",
"pandas.tseries.offsets.BDay",
"pandas.util.testing.makeTimeDataFrame",
"numpy.arange",
"numpy.subtract",
"pandas.util.testing.makeDateIndex",
"pandas.compat.is_platform_windows",
"pandas.Categorical",
"pandas.option_context",
"numpy.int_",
"pandas.MultiIndex.from_product",
"pandas.util.testing.makeTimedeltaIndex",
"pandas.MultiIndex.from_arrays",
"pandas.Timestamp",
"pandas.compat.lrange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
}
] |
tblxio/sinfo
|
[
"57e4b38fc4a643a4218c472526b7fdc03b61415e",
"57e4b38fc4a643a4218c472526b7fdc03b61415e"
] |
[
"ds/model_utils.py",
"ds/features.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import (make_scorer, f1_score, precision_score, \n recall_score, confusion_matrix)\nfrom sklearn.model_selection import RandomizedSearchCV\n\n\ndef randomized_hyperparameter_search(X_train, y_train, X_val, y_val, \n pipeline, params, n_iter, \n score_func, n_jobs, seed):\n \"\"\" \n Performs Randomized Search over the hyperparameters of a pipeline.\n Then, chooses the best parameters and trains the pipeline in the\n complete training set.\n\n Parameters\n ----------\n X_train: numpy array\n The training set in the form [n_train_samples, n_features].\n\n y_train: numpy array\n The true labels of the training set.\n \n X_train: column of Dataframe\n The validation set in the form [n_val_samples, n_features].\n\n y_val: numpy array\n The true labels of the validation set.\n \n pipeline: instantiated Pipeline\n The ML pipeline.\n \n params: dict\n The parameters and ranges.\n \n n_iter: int\n The number of iterations.\n \n score_func: function\n The function that computes the metric to be optimized.\n \n n_jobs: int\n The number of parallel jobs.\n \n seed: int\n The random seed.\n\n Returns\n -------\n pipeline: sklearn Pipeline\n The pipeline after being trained in the complete Training set\n together with validation.\n \"\"\"\n def train_val_iter(y_train, y_val): \n yield np.arange(0, y_train.size), \\\n np.arange(y_train.size, y_train.size + y_val.size)\n \n data_iter = iter(train_val_iter(y_train, y_val))\n \n random_search = RandomizedSearchCV(pipeline,\n param_distributions=params,\n n_iter=n_iter,\n scoring=make_scorer(score_func),\n n_jobs=n_jobs,\n random_state=seed,\n cv=data_iter, refit=False)\n \n X = np.append(X_train, X_val, axis=0)\n y = np.append(y_train, y_val)\n \n random_search.fit(X, y)\n best_params = random_search.best_params_\n print('Random search best score:', \n round(random_search.best_score_, 3))\n print('Best parameters:')\n print(best_params)\n \n pipeline.set_params(**best_params)\n pipeline.fit(X, y)\n \n return pipeline\n\n\ndef binarize_prob(prediction_prob, threshold=0.5):\n \"\"\" \n Given probabilistic predictions, returns binary predictions.\n\n Parameters\n ----------\n prediction_prob: numpy array\n A vector containing the probabilistic predictions.\n\n threshold : float\n The probabilities threshold to binarize the predictions.\n\n Returns\n -------\n numpy array\n The binarized hard predictions.\n \"\"\"\n assert prediction_prob.ndim in (1, 2)\n \n if prediction_prob.ndim == 2:\n hard_prediction = prediction_prob[:, 1] >= threshold\n elif prediction_prob.ndim == 1:\n hard_prediction = prediction_prob >= threshold\n else:\n raise ValueError\n \n return hard_prediction.astype(np.int)\n\n\ndef get_predictions(pipeline, X, prob_threshold):\n \"\"\" \n Given a trained pipeline and dataset, performs prediction.\n\n Parameters\n ----------\n pipeline: sklearn Pipeline\n The trained pipeline.\n \n X: column of Dataframe\n The samples.\n \n prob_threshold: float\n The probabilities threshold for binarization\n\n Returns\n -------\n numpy array\n The hard classifications.\n numpy array\n The probabilistic predictions.\n \"\"\"\n prediction_prob = pipeline.predict_proba(X)\n prediction = binarize_prob(prediction_prob=prediction_prob, \n threshold=prob_threshold)\n \n return prediction, prediction_prob[:, 1]\n\n\ndef metrics_report(prediction, y):\n \"\"\" \n Prints the Recall, Precision, F1 score, and plots \n a confusion matrix.\n\n Parameters\n ----------\n prediction: numpy array\n The hard predictions.\n\n y : numpy array\n The true labels.\n \"\"\"\n recall = round(recall_score(y, prediction), 3)\n precision = round(precision_score(y, prediction), 3)\n f1 = round(f1_score(y, prediction), 3)\n print('Recall:', recall)\n print('Precision:', precision)\n print('F1-score:', f1)\n \n print('\\nConfusion Matrix')\n cm = confusion_matrix(y_true=y, y_pred=prediction, \n labels=[0, 1])\n cm = pd.DataFrame(cm)\n cm = cm.rename({0: 'True no event', 1: 'True event'}, axis='index')\n cm = cm.rename({0: 'Pred no event', 1: 'Pred event'}, axis='columns')\n print(cm)\n \n return recall, precision, f1\n",
"import numpy as np\n\n\ndef get_mean(array, axis=None):\n \"\"\" \n Computes the mean of an array, along a given axis.\n\n Parameters\n ----------\n array: numpy array\n The array over which the operation will be done.\n\n axis : None, int\n Axis along which the operation will be done.\n\n Returns\n -------\n numpy array\n The array after the computing operation.\n \"\"\"\n return np.mean(array, axis=axis)\n\n\ndef get_min(array, axis=None):\n \"\"\" \n Computes the minimum of an array, along a given axis.\n\n Parameters\n ----------\n array: numpy array\n The array over which the operation will be done.\n\n axis : None, int\n Axis along which the operation will be done.\n\n Returns\n -------\n numpy array\n The array after the computing operation.\n \"\"\"\n return np.min(array, axis=axis)\n\n\ndef get_max(array, axis=None):\n \"\"\" \n Computes the maximum of an array, along a given axis.\n\n Parameters\n ----------\n array: numpy array\n The array over which the operation will be done.\n\n axis : None, int\n Axis along which the operation will be done.\n\n Returns\n -------\n numpy array\n The array after the computing operation.\n \"\"\"\n return np.max(array, axis=axis)\n\n\ndef get_std(array, axis=None):\n \"\"\" \n Computes the standard deviation of an array, along a \n given axis.\n\n Parameters\n ----------\n array: numpy array\n The array over which the operation will be done.\n\n axis : None, int\n Axis along which the operation will be done.\n\n Returns\n -------\n numpy array\n The array after the computing operation.\n \"\"\"\n return np.std(array, axis=axis)\n\n\ndef get_range(array, axis=None):\n \"\"\" \n Computes the range of an array, along a given axis.\n\n Parameters\n ----------\n array: numpy array\n The array over which the operation will be done.\n\n axis : None, int\n Axis along which the operation will be done.\n\n Returns\n -------\n numpy array\n The array after the computing operation.\n \"\"\"\n return array.max(axis=axis) - np.abs(array.min(axis=axis))\n\n\ndef get_quantile(array, quantile, axis=None):\n \"\"\" \n Computes the value of a given quantile of an array, \n along a given axis.\n\n Parameters\n ----------\n array: numpy array\n The array over which the operation will be done.\n\n axis : None, int\n Axis along which the operation will be done.\n\n Returns\n -------\n numpy array\n The array after the computing operation.\n \"\"\"\n return np.quantile(array, q=quantile, axis=axis)\n\n\ndef get_window_lims(window_size):\n \"\"\"Computes the limits of the window.\n\n This is half of the window size, and it differs if the\n window size is odd or pair.\n\n Parameters\n ----------\n window_size : int\n The size of the window, i.e., the number of samples\n that it considers.\n\n Returns\n -------\n int\n Considering the window is centered on the point,\n how many elements to consider backwards.\n int\n Considering the window is centered on the point,\n how many elements to consider forward.\n \"\"\"\n if window_size % 2 == 0:\n w_min, w_max = int(window_size / 2), int(window_size / 2)\n else:\n w_min, w_max = int(window_size / 2), int(window_size / 2) + 1\n \n return w_min, w_max\n\n\ndef get_features(signals, labels, window_size, \n feature_list, idx,\n label_pos='center'):\n \"\"\"Computes the features.\n\n Given an array of indexes where windows should be extracted, and a\n list of functions for computing features, returns the features\n for all the selected samples.\n\n Parameters\n ----------\n signals : numpy array\n Array with shape [n_signal_points, n_signals] with the signals\n of interest acquisitions.\n labels: numpy array\n Array of shape [n_signal_points] with the labels of the event\n of interest, where 0 is non-event and 1 is event.\n window_size: int\n The size of the considered window.\n feature_list: list\n A list of functions, where each function computes one feature.\n The function is expected to receive an array of shape\n [n_selected_windows, window_size].\n idx: numpy array\n Array with the index of the signal samples around which the\n window will be extracted.\n label_pos: string\n The point from which to extract the label. If center, it is the point\n in the middle of the window. Otherwise, with end it is the last\n point.\n\n Returns\n -------\n numpy array\n 2D array with shape [n_samples, n_features] with the features.\n numpy array\n The array with the label of the samples.\n \"\"\"\n assert label_pos in ('center', 'end')\n \n n_samples = idx.size\n \n n_features = len(feature_list)\n n_signals = signals.shape[1]\n feats = np.zeros((n_samples, n_features * n_signals))\n y = np.zeros(n_samples, dtype=np.int)\n \n w_min, w_max = get_window_lims(window_size=window_size)\n if label_pos == 'center':\n pad = int(np.max([w_min, w_max]))\n else:\n pad = window_size\n idx = idx + pad\n \n pad_labels = np.pad(labels.copy(), pad, 'constant', \n constant_values=0)\n \n for signal_i in range(signals.shape[1]):\n signal_windows = np.zeros((n_samples, window_size))\n signal = signals[:, signal_i]\n signal = np.pad(signal, pad, mode='reflect')\n \n for i in range(n_samples):\n if label_pos == 'center':\n y[i] = pad_labels[idx[i]]\n signal_windows[i, :] = \\\n signal[idx[i] - w_min: idx[i] + w_max]\n else:\n y[i] = pad_labels[idx[i]]\n signal_windows[i, :] = \\\n signal[idx[i] - window_size + 1: idx[i] + 1]\n \n # vectorized computation\n for feature_i, feature in enumerate(feature_list):\n feats[:, signal_i * n_features + feature_i] = \\\n feature(array=signal_windows, axis=1)\n \n return feats, y"
] |
[
[
"numpy.arange",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"numpy.append",
"sklearn.metrics.make_scorer",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score"
],
[
"numpy.pad",
"numpy.min",
"numpy.quantile",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rubysown/equities
|
[
"aa113e2001be7c338fcd069490e252144b25c9b5"
] |
[
"equities/api.py"
] |
[
"import concurrent.futures\nimport pandas as pd \nfrom equities import static as STATIC \nfrom solaris.api import Client as SolarisClient\nfrom pytrends.request import TrendReq as GoogleTrendClient\nimport yfinance as YahooFinanceClient\n\n__version__ = STATIC.__version__\n__author__ = STATIC.__author__\n\nclass Client(object):\n \"\"\" equities composed clients \"\"\"\n\n def __init__(self,verbose=False):\n try:\n # sets verbosity level and prints begin init text\n self.verbose = verbose; STATIC.initialize(self.verbose)\n\n # connects clients \n self._sol = SolarisClient(verbose=self.verbose)\n self._pytrends = GoogleTrendClient(hl='en-US', tz=360)\n\n # sets stock identification variables \n self.tickers = list(set(self._sol.cik_to_ticker.values()))\n self.names = list(set(self._sol.cik_to_name.values()))\n self.ciks = list(set(self._sol.cik_to_name.keys()))\n\n # prints end init text\n messages = self._sol._fetch_equities_messages()\n STATIC.initialized(self.verbose,messages,len(self.ciks))\n\n except Exception as e:\n STATIC.failed(e)\n\n def __len__(self):\n return len(self.ciks)\n\n def __str__(self):\n return str(self._sol.name_to_cik)\n\n def _query_y_finance(self,cik_or_ticker):\n \"\"\" returns a yfinance Ticker object by quering YahooFinance\n for a given cik or ticker\n \"\"\"\n try:\n cik = self._convert_to_cik(cik_or_ticker)\n ticker = self._sol.cik_to_ticker[cik]\n except:\n ticker = cik_or_ticker\n return YahooFinanceClient.Ticker(ticker)\n\n\n def _convert_to_cik(self,cik_or_ticker):\n \"\"\" returns a cik from a query/ticker into the corresponding \n cik number with cleaning.\n \"\"\"\n symbol = cik_or_ticker.lower().replace(' ','')\n try:\n if symbol in self.tickers:\n return self._sol.ticker_to_cik[symbol]\n elif str(int(cik_or_ticker)) in self.ciks:\n return str(int(cik_or_ticker))\n else: \n print('Error Could not Convert: %s'%cik_or_ticker)\n return cik_or_ticker\n except: \n return cik_or_ticker\n\n def _set_verbose(self,verbose):\n \"\"\"sets universes' stdout level of verbosity\"\"\"\n self.verbose = verbose\n self._sol.verbose = verbose \n\n def _invert_dict(self,to_invert):\n \"\"\"inverts an arbitrary python dictionary\"\"\"\n return {v:k for k,v in to_invert.items()}\n\n def cik_to_name(self):\n \"\"\"returns a dict mapping ciks to company names\"\"\"\n return dict(zip(\n self._sol.cik_to_name.keys(),self._sol.cik_to_name.values()\n ))\n\n def cik_to_ticker(self):\n \"\"\"returns a dict mapping ciks to tickers\"\"\"\n return dict(zip(\n self._sol.cik_to_ticker.keys(),self._sol.cik_to_ticker.values()\n ))\n\n def ticker_to_cik(self):\n \"\"\"returns a dict mapping tickers to ciks\"\"\"\n return self._invert_dict(self.cik_to_name)\n\n def name_to_cik(self):\n \"\"\"returns a dict mapping names to ciks\"\"\"\n return self._invert_dict(self.cik_to_name)\n\n def prices(self,cik_or_ticker,period='max'):\n \"\"\"returns a price dataframe for the given cik or tickers and period.\n period must be a string contained in the following list:\n ['1d','5d','1mo','3mo','6mo','1y','2y','5y','10y','ytd','max'].\n \"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).history(period=period)\n except Exception as e:\n return pd.DataFrame()\n\n def actions(self,cik_or_ticker):\n \"\"\"returns a corporate actions dataframe for the given cik or tickers\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).actions\n except Exception as e:\n return pd.DataFrame()\n\n def dividends(self,cik_or_ticker):\n \"\"\"returns a dividends dataframe for the given cik or tickers\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).dividends\n except Exception as e:\n return pd.DataFrame()\n\n def splits(self,cik_or_ticker):\n \"\"\"returns a splits dataframe for the given cik or tickers\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).splits\n except Exception as e:\n return pd.DataFrame()\n\n def major_holders(self,cik_or_ticker):\n \"\"\"returns a dataframe of major holders for the given cik or ticker\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).major_holders\n except Exception as e:\n return pd.DataFrame()\n \n def institutional_holders(self,cik_or_ticker):\n \"\"\"returns a dataframe of instiutional holders for the given cik or ticker\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).institutional_holders\n except Exception as e:\n return pd.DataFrame()\n\n def events(self,cik_or_ticker):\n \"\"\"returns a dataframe of earnings events for the given cik or ticker\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).calendar\n except Exception as e:\n return pd.DataFrame()\n\n def recommendations(self,cik_or_ticker):\n \"\"\"returns a dataframe buy/sell side recommendations for the given cik or ticker\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).recommendations\n except Exception as e:\n return pd.DataFrame()\n\n def esg(self,cik_or_ticker):\n \"\"\"returns a dataframe esg metrics for the given cik or ticker\"\"\"\n try:\n return self._query_y_finance(cik_or_ticker).sustainability\n except Exception as e:\n return pd.DataFrame()\n\n def financial_statement(self,cik_or_ticker,kind):\n \"\"\"returns a financial statement dataframe for the given cik or ticker\n and kind of statement. kind must be a string contained in the following \n list: ['income','balance','cash','equity'].\n \"\"\"\n try:\n cik = self._convert_to_cik(cik_or_ticker)\n return self._sol.financial_statement(cik,kind,df=True)\n except Exception as e:\n #print(e)\n return pd.DataFrame()\n\n def interest(self,name):\n \"\"\"returns a dataframe of google search interest for a particular name\"\"\"\n try:\n self._pytrends.build_payload(\n [name.replace('/','')],\n cat=0,\n timeframe='today 5-y',\n geo='',\n gprop='')\n return self._pytrends.interest_over_time()\n except Exception as e:\n print(e)\n return pd.DataFrame()\n\n def search(self,query):\n \"\"\" returns a dict mapping ciks to names containing \n matched companies to a given query.\n \"\"\"\n matches = []\n for name in self.names:\n if query.lower() in name.lower():\n matches.append(self._sol.name_to_cik[name])\n for ticker in self.tickers:\n if query.lower() in ticker.lower():\n matches.append(self._sol.ticker_to_cik[ticker])\n for cik in self.ciks:\n if query in cik:\n matches.append(cik)\n STATIC.search(self.verbose,query,str(len(matches)))\n return {self._sol.cik_to_name[match]:match for match in matches}\n\n def company(self,query,search=False):\n \"\"\"returns a dictionary of data for a given ticker/cik/search query\"\"\"\n\n def execute_request(cik):\n \"\"\"executes data request\"\"\"\n try:\n sol_data = self._sol.company(cik,df=True)\n except:\n sol_data = {'name' : cik }\n\n yfinance_data = {\n 'prices' : self.prices(cik),\n 'actions' : self.actions(cik),\n 'dividends' : self.dividends(cik),\n 'splits' : self.splits(cik),\n 'major_holders' : self.major_holders(cik),\n 'institutional_holders': self.institutional_holders(cik),\n 'events': self.events(cik),\n 'recommendations': self.recommendations(cik),\n 'esg': self.esg(cik),\n 'interest': self.interest(sol_data['name'])\n }\n sol_data.update(yfinance_data)\n return sol_data\n\n if type(query) == str:\n if search: # singleticker case (searches if specified)\n cik = list(self.search(query).values())[0]\n else:\n cik = self._convert_to_cik(query)\n return execute_request(query)\n elif type(query) == list:\n results = None # multiticker case (multithreaded,recursive)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n args = (ticker for ticker in query)\n saved_verbose = self.verbose; self._set_verbose(True)\n future = executor.map(self.company, args)\n results = dict(zip(query,future))\n self._set_verbose(saved_verbose)\n return results \n else: \n print('Error with arguments: %s'%str(query))\n quit()\n \n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
OshanIvantha/tf-encrypted
|
[
"538e4857fa7adaa024a03c532ba3b5d78d89d1b9"
] |
[
"tensorflow_encrypted/layers/convolution.py"
] |
[
"import numpy as np\nfrom . import core\n\n\nclass Conv2D(core.Layer):\n def __init__(self, filter_shape, strides=1, padding=\"SAME\",\n filter_init=lambda shp: np.random.normal(scale = 0.1, size = shp),\n l2reg_lambda=0.0, channels_first=True):\n \"\"\" 2 Dimensional convolutional layer, expects NCHW data format\n filter_shape: tuple of rank 4\n strides: int with stride size\n filter init: lambda function with shape parameter\n Example:\n Conv2D((4, 4, 1, 20), strides=2, filter_init=lambda shp:\n np.random.normal(scale=0.01, size=shp))\n \"\"\"\n self.fshape = filter_shape\n self.strides = strides\n self.padding = padding\n self.filter_init = filter_init\n self.l2reg_lambda = l2reg_lambda\n self.cache = None\n self.cached_x_col = None\n self.cached_input_shape = None\n self.initializer = None\n self.weights = None\n self.bias = None\n self.model = None\n assert channels_first\n\n def initialize(self, input_shape, initial_weights=None):\n\n h_filter, w_filter, d_filters, n_filters = self.fshape\n n_x, d_x, h_x, w_x = input_shape\n\n if self.padding == \"SAME\":\n h_out = int(np.ceil(float(h_x) / float(self.strides)))\n w_out = int(np.ceil(float(w_x) / float(self.strides)))\n if self.padding == \"VALID\":\n h_out = int(np.ceil(float(h_x - h_filter + 1) / float(self.strides)))\n w_out = int(np.ceil(float(w_x - w_filter + 1) / float(self.strides)))\n\n if initial_weights is None:\n initial_weights = self.filter_init(self.fshape)\n self.weights = self.prot.define_private_variable(initial_weights)\n self.bias = self.prot.define_private_variable(np.zeros((n_filters, h_out, w_out)))\n\n return [n_x, n_filters, h_out, w_out]\n\n def forward(self, x):\n self.cached_input_shape = x.shape\n self.cache = x\n out = self.prot.conv2d(x, self.weights, self.strides, self.padding)\n\n return out + self.bias\n\n def backward(self, d_y, learning_rate):\n x = self.cache\n h_filter, w_filter, d_filter, n_filter = map(int, self.weights.shape)\n\n if self.model.layers.index(self) != 0:\n W_reshaped = self.weights.reshape(n_filter, -1).transpose()\n dout_reshaped = d_y.transpose(1, 2, 3, 0).reshape(n_filter, -1)\n dx = W_reshaped.dot(dout_reshaped).col2im(imshape=self.cached_input_shape,\n field_height=h_filter,\n field_width=w_filter,\n padding=self.padding,\n stride=self.strides)\n\n d_w = self.prot.conv2d_bw(x, d_y, self.weights.shape, self.strides, self.padding)\n d_bias = d_y.sum(axis=0)\n\n self.weights.assign((d_w * learning_rate).neg() + self.weights)\n self.bias.assign((d_bias * learning_rate).neg() + self.bias)\n\n return dx\n\n\ndef set_protocol(new_prot):\n core.Layer.prot = new_prot\n"
] |
[
[
"numpy.random.normal",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sarvex/privacy
|
[
"7c4f5bab0964bd32b7ceafa009d9488920856440",
"7c4f5bab0964bd32b7ceafa009d9488920856440",
"7c4f5bab0964bd32b7ceafa009d9488920856440"
] |
[
"tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/membership_inference_attack_test.py",
"tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/models.py",
"tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation.py"
] |
[
"# Copyright 2020, The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.utils.\"\"\"\nfrom absl.testing import absltest\nimport numpy as np\n\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import membership_inference_attack as mia\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackInputData\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import DataSize\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SingleSliceSpec\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingFeature\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec\n\n\ndef get_test_input(n_train, n_test):\n \"\"\"Get example inputs for attacks.\"\"\"\n rng = np.random.RandomState(4)\n return AttackInputData(\n logits_train=rng.randn(n_train, 5) + 0.2,\n logits_test=rng.randn(n_test, 5) + 0.2,\n labels_train=np.array([i % 5 for i in range(n_train)]),\n labels_test=np.array([i % 5 for i in range(n_test)]))\n\n\ndef get_test_input_logits_only(n_train, n_test):\n \"\"\"Get example input logits for attacks.\"\"\"\n rng = np.random.RandomState(4)\n return AttackInputData(\n logits_train=rng.randn(n_train, 5) + 0.2,\n logits_test=rng.randn(n_test, 5) + 0.2)\n\n\nclass RunAttacksTest(absltest.TestCase):\n\n def test_run_attacks_size(self):\n result = mia.run_attacks(\n get_test_input(100, 100), SlicingSpec(),\n (AttackType.THRESHOLD_ATTACK, AttackType.LOGISTIC_REGRESSION))\n\n self.assertLen(result.single_attack_results, 2)\n\n def test_trained_attacks_logits_only_size(self):\n result = mia.run_attacks(\n get_test_input_logits_only(100, 100), SlicingSpec(),\n (AttackType.LOGISTIC_REGRESSION,))\n\n self.assertLen(result.single_attack_results, 1)\n\n def test_run_attack_trained_sets_attack_type(self):\n result = mia._run_attack(\n get_test_input(100, 100), AttackType.LOGISTIC_REGRESSION)\n\n self.assertEqual(result.attack_type, AttackType.LOGISTIC_REGRESSION)\n\n def test_run_attack_threshold_sets_attack_type(self):\n result = mia._run_attack(\n get_test_input(100, 100), AttackType.THRESHOLD_ATTACK)\n\n self.assertEqual(result.attack_type, AttackType.THRESHOLD_ATTACK)\n\n def test_run_attack_threshold_entropy_sets_attack_type(self):\n result = mia._run_attack(\n get_test_input(100, 100), AttackType.THRESHOLD_ENTROPY_ATTACK)\n\n self.assertEqual(result.attack_type, AttackType.THRESHOLD_ENTROPY_ATTACK)\n\n def test_run_attack_threshold_sets_membership_scores(self):\n result = mia._run_attack(\n get_test_input(100, 50), AttackType.THRESHOLD_ATTACK)\n\n self.assertLen(result.membership_scores_train, 100)\n self.assertLen(result.membership_scores_test, 50)\n\n def test_run_attack_threshold_entropy_sets_membership_scores(self):\n result = mia._run_attack(\n get_test_input(100, 50), AttackType.THRESHOLD_ENTROPY_ATTACK)\n\n self.assertLen(result.membership_scores_train, 100)\n self.assertLen(result.membership_scores_test, 50)\n\n def test_run_attack_threshold_calculates_correct_auc(self):\n result = mia._run_attack(\n AttackInputData(\n loss_train=np.array([0.1, 0.2, 1.3, 0.4, 0.5, 0.6]),\n loss_test=np.array([1.1, 1.2, 1.3, 0.4, 1.5, 1.6])),\n AttackType.THRESHOLD_ATTACK)\n\n np.testing.assert_almost_equal(result.roc_curve.get_auc(), 0.83, decimal=2)\n\n def test_run_attack_threshold_entropy_calculates_correct_auc(self):\n result = mia._run_attack(\n AttackInputData(\n entropy_train=np.array([0.1, 0.2, 1.3, 0.4, 0.5, 0.6]),\n entropy_test=np.array([1.1, 1.2, 1.3, 0.4, 1.5, 1.6])),\n AttackType.THRESHOLD_ENTROPY_ATTACK)\n\n np.testing.assert_almost_equal(result.roc_curve.get_auc(), 0.83, decimal=2)\n\n def test_run_attack_by_slice(self):\n result = mia.run_attacks(\n get_test_input(100, 100), SlicingSpec(by_class=True),\n (AttackType.THRESHOLD_ATTACK,))\n\n self.assertLen(result.single_attack_results, 6)\n expected_slice = SingleSliceSpec(SlicingFeature.CLASS, 2)\n self.assertEqual(result.single_attack_results[3].slice_spec, expected_slice)\n\n def test_accuracy(self):\n predictions = [[0.5, 0.2, 0.3], [0.1, 0.6, 0.3], [0.5, 0.2, 0.3]]\n logits = [[1, -1, -3], [-3, -1, -2], [9, 8, 8.5]]\n labels = [0, 1, 2]\n self.assertEqual(mia._get_accuracy(predictions, labels), 2 / 3)\n self.assertEqual(mia._get_accuracy(logits, labels), 2 / 3)\n # If accuracy is already present, simply return it.\n self.assertIsNone(mia._get_accuracy(None, labels))\n\n def test_run_compute_membership_probability_correct_probs(self):\n result = mia._compute_membership_probability(\n AttackInputData(\n loss_train=np.array([1, 1, 1, 10, 100]),\n loss_test=np.array([10, 100, 100, 1000, 10000])))\n\n np.testing.assert_almost_equal(\n result.train_membership_probs, [1, 1, 1, 0.5, 0.33], decimal=2)\n np.testing.assert_almost_equal(\n result.test_membership_probs, [0.5, 0.33, 0.33, 0, 0], decimal=2)\n\n def test_run_attack_data_size(self):\n result = mia.run_attacks(\n get_test_input(100, 80), SlicingSpec(by_class=True),\n (AttackType.THRESHOLD_ATTACK,))\n self.assertEqual(result.single_attack_results[0].data_size,\n DataSize(ntrain=100, ntest=80))\n self.assertEqual(result.single_attack_results[3].data_size,\n DataSize(ntrain=20, ntest=16))\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# Copyright 2020, The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Trained models for membership inference attacks.\"\"\"\n\nfrom dataclasses import dataclass\nimport numpy as np\nfrom sklearn import ensemble\nfrom sklearn import linear_model\nfrom sklearn import model_selection\nfrom sklearn import neighbors\nfrom sklearn import neural_network\n\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackInputData\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import DataSize\n\n\n@dataclass\nclass AttackerData:\n \"\"\"Input data for an ML classifier attack.\n\n This includes only the data, and not configuration.\n \"\"\"\n\n features_train: np.ndarray = None\n # element-wise boolean array denoting if the example was part of training.\n is_training_labels_train: np.ndarray = None\n\n features_test: np.ndarray = None\n # element-wise boolean array denoting if the example was part of training.\n is_training_labels_test: np.ndarray = None\n\n data_size: DataSize = None\n\n\ndef create_attacker_data(attack_input_data: AttackInputData,\n test_fraction: float = 0.25,\n balance: bool = True) -> AttackerData:\n \"\"\"Prepare AttackInputData to train ML attackers.\n\n Combines logits and losses and performs a random train-test split.\n\n Args:\n attack_input_data: Original AttackInputData\n test_fraction: Fraction of the dataset to include in the test split.\n balance: Whether the training and test sets for the membership inference\n attacker should have a balanced (roughly equal) number of samples\n from the training and test sets used to develop the model\n under attack.\n\n Returns:\n AttackerData.\n \"\"\"\n attack_input_train = _column_stack(attack_input_data.logits_or_probs_train,\n attack_input_data.get_loss_train())\n attack_input_test = _column_stack(attack_input_data.logits_or_probs_test,\n attack_input_data.get_loss_test())\n\n if balance:\n min_size = min(attack_input_data.get_train_size(),\n attack_input_data.get_test_size())\n attack_input_train = _sample_multidimensional_array(attack_input_train,\n min_size)\n attack_input_test = _sample_multidimensional_array(attack_input_test,\n min_size)\n ntrain, ntest = attack_input_train.shape[0], attack_input_test.shape[0]\n\n features_all = np.concatenate((attack_input_train, attack_input_test))\n\n labels_all = np.concatenate(((np.zeros(ntrain)), (np.ones(ntest))))\n\n # Perform a train-test split\n features_train, features_test, is_training_labels_train, is_training_labels_test = model_selection.train_test_split(\n features_all, labels_all, test_size=test_fraction, stratify=labels_all)\n return AttackerData(features_train, is_training_labels_train, features_test,\n is_training_labels_test,\n DataSize(ntrain=ntrain, ntest=ntest))\n\n\ndef _sample_multidimensional_array(array, size):\n indices = np.random.choice(len(array), size, replace=False)\n return array[indices]\n\n\ndef _column_stack(logits, loss):\n \"\"\"Stacks logits and losses.\n\n In case that only one exists, returns that one.\n Args:\n logits: logits array\n loss: loss array\n\n Returns:\n stacked logits and losses (or only one if both do not exist).\n \"\"\"\n if logits is None:\n return np.expand_dims(loss, axis=-1)\n if loss is None:\n return logits\n return np.column_stack((logits, loss))\n\n\nclass TrainedAttacker:\n \"\"\"Base class for training attack models.\"\"\"\n model = None\n\n def train_model(self, input_features, is_training_labels):\n \"\"\"Train an attacker model.\n\n This is trained on examples from train and test datasets.\n Args:\n input_features : array-like of shape (n_samples, n_features) Training\n vector, where n_samples is the number of samples and n_features is the\n number of features.\n is_training_labels : a vector of booleans of shape (n_samples, )\n representing whether the sample is in the training set or not.\n \"\"\"\n raise NotImplementedError()\n\n def predict(self, input_features):\n \"\"\"Predicts whether input_features belongs to train or test.\n\n Args:\n input_features : A vector of features with the same semantics as x_train\n passed to train_model.\n Returns:\n An array of probabilities denoting whether the example belongs to test.\n \"\"\"\n if self.model is None:\n raise AssertionError(\n 'Model not trained yet. Please call train_model first.')\n return self.model.predict_proba(input_features)[:, 1]\n\n\nclass LogisticRegressionAttacker(TrainedAttacker):\n \"\"\"Logistic regression attacker.\"\"\"\n\n def train_model(self, input_features, is_training_labels):\n lr = linear_model.LogisticRegression(solver='lbfgs')\n param_grid = {\n 'C': np.logspace(-4, 2, 10),\n }\n model = model_selection.GridSearchCV(\n lr, param_grid=param_grid, cv=3, n_jobs=1, verbose=0)\n model.fit(input_features, is_training_labels)\n self.model = model\n\n\nclass MultilayerPerceptronAttacker(TrainedAttacker):\n \"\"\"Multilayer perceptron attacker.\"\"\"\n\n def train_model(self, input_features, is_training_labels):\n mlp_model = neural_network.MLPClassifier()\n param_grid = {\n 'hidden_layer_sizes': [(64,), (32, 32)],\n 'solver': ['adam'],\n 'alpha': [0.0001, 0.001, 0.01],\n }\n n_jobs = -1\n model = model_selection.GridSearchCV(\n mlp_model, param_grid=param_grid, cv=3, n_jobs=n_jobs, verbose=0)\n model.fit(input_features, is_training_labels)\n self.model = model\n\n\nclass RandomForestAttacker(TrainedAttacker):\n \"\"\"Random forest attacker.\"\"\"\n\n def train_model(self, input_features, is_training_labels):\n \"\"\"Setup a random forest pipeline with cross-validation.\"\"\"\n rf_model = ensemble.RandomForestClassifier()\n\n param_grid = {\n 'n_estimators': [100],\n 'max_features': ['auto', 'sqrt'],\n 'max_depth': [5, 10, 20, None],\n 'min_samples_split': [2, 5, 10],\n 'min_samples_leaf': [1, 2, 4]\n }\n n_jobs = -1\n model = model_selection.GridSearchCV(\n rf_model, param_grid=param_grid, cv=3, n_jobs=n_jobs, verbose=0)\n model.fit(input_features, is_training_labels)\n self.model = model\n\n\nclass KNearestNeighborsAttacker(TrainedAttacker):\n \"\"\"K nearest neighbor attacker.\"\"\"\n\n def train_model(self, input_features, is_training_labels):\n knn_model = neighbors.KNeighborsClassifier()\n param_grid = {\n 'n_neighbors': [3, 5, 7],\n }\n model = model_selection.GridSearchCV(\n knn_model, param_grid=param_grid, cv=3, n_jobs=1, verbose=0)\n model.fit(input_features, is_training_labels)\n self.model = model\n",
"# Copyright 2020, The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"A hook and a function in tf estimator for membership inference attack.\"\"\"\n\nimport os\nfrom typing import Iterable\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import membership_inference_attack as mia\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackInputData\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.utils import log_loss\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.utils_tensorboard import write_results_to_tensorboard\n\n\ndef calculate_losses(estimator, input_fn, labels):\n \"\"\"Get predictions and losses for samples.\n\n The assumptions are 1) the loss is cross-entropy loss, and 2) user have\n specified prediction mode to return predictions, e.g.,\n when mode == tf.estimator.ModeKeys.PREDICT, the model function returns\n tf.estimator.EstimatorSpec(mode=mode, predictions=tf.nn.softmax(logits)).\n\n Args:\n estimator: model to make prediction\n input_fn: input function to be used in estimator.predict\n labels: array of size (n_samples, ), true labels of samples (integer valued)\n\n Returns:\n preds: probability vector of each sample\n loss: cross entropy loss of each sample\n \"\"\"\n pred = np.array(list(estimator.predict(input_fn=input_fn)))\n loss = log_loss(labels, pred)\n return pred, loss\n\n\nclass MembershipInferenceTrainingHook(tf.estimator.SessionRunHook):\n \"\"\"Training hook to perform membership inference attack on epoch end.\"\"\"\n\n def __init__(\n self,\n estimator,\n in_train, out_train,\n input_fn_constructor,\n slicing_spec: SlicingSpec = None,\n attack_types: Iterable[AttackType] = (AttackType.THRESHOLD_ATTACK,),\n tensorboard_dir=None,\n tensorboard_merge_classifiers=False):\n \"\"\"Initialize the hook.\n\n Args:\n estimator: model to be tested\n in_train: (in_training samples, in_training labels)\n out_train: (out_training samples, out_training labels)\n input_fn_constructor: a function that receives sample, label and construct\n the input_fn for model prediction\n slicing_spec: slicing specification of the attack\n attack_types: a list of attacks, each of type AttackType\n tensorboard_dir: directory for tensorboard summary\n tensorboard_merge_classifiers: if true, plot different classifiers with\n the same slicing_spec and metric in the same figure\n \"\"\"\n in_train_data, self._in_train_labels = in_train\n out_train_data, self._out_train_labels = out_train\n\n # Define the input functions for both in and out-training samples.\n self._in_train_input_fn = input_fn_constructor(in_train_data,\n self._in_train_labels)\n self._out_train_input_fn = input_fn_constructor(out_train_data,\n self._out_train_labels)\n self._estimator = estimator\n self._slicing_spec = slicing_spec\n self._attack_types = attack_types\n self._tensorboard_merge_classifiers = tensorboard_merge_classifiers\n if tensorboard_dir:\n if tensorboard_merge_classifiers:\n self._writers = {}\n with tf.Graph().as_default():\n for attack_type in attack_types:\n self._writers[attack_type.name] = tf.summary.FileWriter(\n os.path.join(tensorboard_dir, 'MI', attack_type.name))\n else:\n with tf.Graph().as_default():\n self._writers = tf.summary.FileWriter(\n os.path.join(tensorboard_dir, 'MI'))\n logging.info('Will write to tensorboard.')\n else:\n self._writers = None\n\n def end(self, session):\n results = run_attack_helper(self._estimator,\n self._in_train_input_fn,\n self._out_train_input_fn,\n self._in_train_labels, self._out_train_labels,\n self._slicing_spec,\n self._attack_types)\n logging.info(results)\n\n att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(\n results)\n print('Attack result:')\n print('\\n'.join([' %s: %.4f' % (', '.join([s, t, m]), v) for t, s, m, v in\n zip(att_types, att_slices, att_metrics, att_values)]))\n\n # Write to tensorboard if tensorboard_dir is specified\n global_step = self._estimator.get_variable_value('global_step')\n if self._writers is not None:\n write_results_to_tensorboard(results, self._writers, global_step,\n self._tensorboard_merge_classifiers)\n\n\ndef run_attack_on_tf_estimator_model(\n estimator, in_train, out_train,\n input_fn_constructor,\n slicing_spec: SlicingSpec = None,\n attack_types: Iterable[AttackType] = (AttackType.THRESHOLD_ATTACK,)):\n \"\"\"Performs the attack in the end of training.\n\n Args:\n estimator: model to be tested\n in_train: (in_training samples, in_training labels)\n out_train: (out_training samples, out_training labels)\n input_fn_constructor: a function that receives sample, label and construct\n the input_fn for model prediction\n slicing_spec: slicing specification of the attack\n attack_types: a list of attacks, each of type AttackType\n Returns:\n Results of the attack\n \"\"\"\n in_train_data, in_train_labels = in_train\n out_train_data, out_train_labels = out_train\n\n # Define the input functions for both in and out-training samples.\n in_train_input_fn = input_fn_constructor(in_train_data, in_train_labels)\n out_train_input_fn = input_fn_constructor(out_train_data, out_train_labels)\n\n # Call the helper to run the attack.\n results = run_attack_helper(estimator,\n in_train_input_fn, out_train_input_fn,\n in_train_labels, out_train_labels,\n slicing_spec,\n attack_types)\n logging.info('End of training attack:')\n logging.info(results)\n return results\n\n\ndef run_attack_helper(\n estimator,\n in_train_input_fn, out_train_input_fn,\n in_train_labels, out_train_labels,\n slicing_spec: SlicingSpec = None,\n attack_types: Iterable[AttackType] = (AttackType.THRESHOLD_ATTACK,)):\n \"\"\"A helper function to perform attack.\n\n Args:\n estimator: model to be tested\n in_train_input_fn: input_fn for in training data\n out_train_input_fn: input_fn for out of training data\n in_train_labels: in training labels\n out_train_labels: out of training labels\n slicing_spec: slicing specification of the attack\n attack_types: a list of attacks, each of type AttackType\n Returns:\n Results of the attack\n \"\"\"\n # Compute predictions and losses\n in_train_pred, in_train_loss = calculate_losses(estimator,\n in_train_input_fn,\n in_train_labels)\n out_train_pred, out_train_loss = calculate_losses(estimator,\n out_train_input_fn,\n out_train_labels)\n attack_input = AttackInputData(\n logits_train=in_train_pred, logits_test=out_train_pred,\n labels_train=in_train_labels, labels_test=out_train_labels,\n loss_train=in_train_loss, loss_test=out_train_loss\n )\n results = mia.run_attacks(attack_input,\n slicing_spec=slicing_spec,\n attack_types=attack_types)\n return results\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.random.RandomState"
],
[
"sklearn.neural_network.MLPClassifier",
"sklearn.model_selection.GridSearchCV",
"numpy.expand_dims",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"numpy.logspace",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.concatenate",
"numpy.ones",
"numpy.column_stack",
"numpy.zeros"
],
[
"tensorflow.compat.v1.Graph"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LegionChang/CoTNet
|
[
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280",
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280"
] |
[
"models/tnt.py",
"datasets/mixup.py"
] |
[
"\"\"\" Transformer in Transformer (TNT) in PyTorch\n\nA PyTorch implement of TNT as described in\n'Transformer in Transformer' - https://arxiv.org/abs/2103.00112\n\nThe official mindspore code is released and available at\nhttps://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT\n\"\"\"\nimport math\nimport torch\nimport torch.nn as nn\nfrom functools import partial\n\nfrom config import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .helpers import build_model_with_cfg\nfrom .layers import Mlp, DropPath, trunc_normal_\nfrom .layers.helpers import to_2tuple\nfrom .registry import register_model\nfrom .vision_transformer import resize_pos_embed\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,\n 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'pixel_embed.proj', 'classifier': 'head',\n **kwargs\n }\n\n\ndefault_cfgs = {\n 'tnt_s_patch16_224': _cfg(\n url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n ),\n 'tnt_b_patch16_224': _cfg(\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n ),\n}\n\n\nclass Attention(nn.Module):\n \"\"\" Multi-Head Attention\n \"\"\"\n def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.num_heads = num_heads\n head_dim = hidden_dim // num_heads\n self.head_dim = head_dim\n self.scale = head_dim ** -0.5\n\n self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias)\n self.v = nn.Linear(dim, dim, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop, inplace=True)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop, inplace=True)\n\n def forward(self, x):\n B, N, C = x.shape\n qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)\n q, k = qk[0], qk[1] # make torchscript happy (cannot use tensor as tuple)\n v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, -1)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Block(nn.Module):\n \"\"\" TNT Block\n \"\"\"\n def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4.,\n qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n # Inner transformer\n self.norm_in = norm_layer(in_dim)\n self.attn_in = Attention(\n in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias,\n attn_drop=attn_drop, proj_drop=drop)\n \n self.norm_mlp_in = norm_layer(in_dim)\n self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4),\n out_features=in_dim, act_layer=act_layer, drop=drop)\n \n self.norm1_proj = norm_layer(in_dim)\n self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True)\n # Outer transformer\n self.norm_out = norm_layer(dim)\n self.attn_out = Attention(\n dim, dim, num_heads=num_heads, qkv_bias=qkv_bias,\n attn_drop=attn_drop, proj_drop=drop)\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n \n self.norm_mlp = norm_layer(dim)\n self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio),\n out_features=dim, act_layer=act_layer, drop=drop)\n\n def forward(self, pixel_embed, patch_embed):\n # inner\n pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))\n pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))\n # outer\n B, N, C = patch_embed.size()\n patch_embed[:, 1:] = patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))\n patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))\n patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))\n return pixel_embed, patch_embed\n\n\nclass PixelEmbed(nn.Module):\n \"\"\" Image to Pixel Embedding\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n # grid_size property necessary for resizing positional embedding\n self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])\n num_patches = (self.grid_size[0]) * (self.grid_size[1])\n self.img_size = img_size\n self.num_patches = num_patches\n self.in_dim = in_dim\n new_patch_size = [math.ceil(ps / stride) for ps in patch_size]\n self.new_patch_size = new_patch_size\n\n self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)\n self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size)\n\n def forward(self, x, pixel_pos):\n B, C, H, W = x.shape\n assert H == self.img_size[0] and W == self.img_size[1], \\\n f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n x = self.proj(x)\n x = self.unfold(x)\n x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1])\n x = x + pixel_pos\n x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2)\n return x\n\n\nclass TNT(nn.Module):\n \"\"\" Transformer in Transformer - https://arxiv.org/abs/2103.00112\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12,\n num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,\n drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4, global_pool='avg'):\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n\n self.pixel_embed = PixelEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride)\n num_patches = self.pixel_embed.num_patches\n self.num_patches = num_patches\n new_patch_size = self.pixel_embed.new_patch_size\n num_pixel = new_patch_size[0] * new_patch_size[1]\n \n self.norm1_proj = norm_layer(num_pixel * in_dim)\n self.proj = nn.Linear(num_pixel * in_dim, embed_dim)\n self.norm2_proj = norm_layer(embed_dim)\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))\n self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1]))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n blocks = []\n for i in range(depth):\n blocks.append(Block(\n dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head,\n mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[i], norm_layer=norm_layer))\n self.blocks = nn.ModuleList(blocks)\n self.norm = norm_layer(embed_dim)\n\n self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.cls_token, std=.02)\n trunc_normal_(self.patch_pos, std=.02)\n trunc_normal_(self.pixel_pos, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'patch_pos', 'pixel_pos', 'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n B = x.shape[0]\n pixel_embed = self.pixel_embed(x, self.pixel_pos)\n \n patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1))))\n patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1)\n patch_embed = patch_embed + self.patch_pos\n patch_embed = self.pos_drop(patch_embed)\n\n for blk in self.blocks:\n pixel_embed, patch_embed = blk(pixel_embed, patch_embed)\n\n patch_embed = self.norm(patch_embed)\n return patch_embed[:, 0]\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\ndef checkpoint_filter_fn(state_dict, model):\n \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\n if state_dict['patch_pos'].shape != model.patch_pos.shape:\n state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'],\n model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size)\n return state_dict\n\n\ndef _create_tnt(variant, pretrained=False, **kwargs):\n if kwargs.get('features_only', None):\n raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n model = build_model_with_cfg(\n TNT, variant, pretrained,\n default_cfg=default_cfgs[variant],\n pretrained_filter_fn=checkpoint_filter_fn,\n **kwargs)\n return model\n\n\n@register_model\ndef tnt_s_patch16_224(pretrained=False, **kwargs):\n model_cfg = dict(\n patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4,\n qkv_bias=False, **kwargs)\n model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg)\n return model\n\n\n@register_model\ndef tnt_b_patch16_224(pretrained=False, **kwargs):\n model_cfg = dict(\n patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4,\n qkv_bias=False, **kwargs)\n model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg)\n return model\n",
"\"\"\" Mixup and Cutmix\n\nPapers:\nmixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)\n\nCutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)\n\nCode Reference:\nCutMix: https://github.com/clovaai/CutMix-PyTorch\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport numpy as np\nimport torch\n\n\ndef one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):\n x = x.long().view(-1, 1)\n return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)\n\n\ndef mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):\n off_value = smoothing / num_classes\n on_value = 1. - smoothing + off_value\n y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)\n y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)\n return y1 * lam + y2 * (1. - lam)\n\n\ndef rand_bbox(img_shape, lam, margin=0., count=None):\n \"\"\" Standard CutMix bounding-box\n Generates a random square bbox based on lambda value. This impl includes\n support for enforcing a border margin as percent of bbox dimensions.\n\n Args:\n img_shape (tuple): Image shape as tuple\n lam (float): Cutmix lambda value\n margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)\n count (int): Number of bbox to generate\n \"\"\"\n ratio = np.sqrt(1 - lam)\n img_h, img_w = img_shape[-2:]\n cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)\n margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)\n cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)\n cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)\n yl = np.clip(cy - cut_h // 2, 0, img_h)\n yh = np.clip(cy + cut_h // 2, 0, img_h)\n xl = np.clip(cx - cut_w // 2, 0, img_w)\n xh = np.clip(cx + cut_w // 2, 0, img_w)\n return yl, yh, xl, xh\n\n\ndef rand_bbox_minmax(img_shape, minmax, count=None):\n \"\"\" Min-Max CutMix bounding-box\n Inspired by Darknet cutmix impl, generates a random rectangular bbox\n based on min/max percent values applied to each dimension of the input image.\n\n Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.\n\n Args:\n img_shape (tuple): Image shape as tuple\n minmax (tuple or list): Min and max bbox ratios (as percent of image size)\n count (int): Number of bbox to generate\n \"\"\"\n assert len(minmax) == 2\n img_h, img_w = img_shape[-2:]\n cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)\n cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)\n yl = np.random.randint(0, img_h - cut_h, size=count)\n xl = np.random.randint(0, img_w - cut_w, size=count)\n yu = yl + cut_h\n xu = xl + cut_w\n return yl, yu, xl, xu\n\n\ndef cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):\n \"\"\" Generate bbox and apply lambda correction.\n \"\"\"\n if ratio_minmax is not None:\n yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)\n else:\n yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)\n if correct_lam or ratio_minmax is not None:\n bbox_area = (yu - yl) * (xu - xl)\n lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])\n return (yl, yu, xl, xu), lam\n\n\nclass Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n \"\"\"\n def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,\n mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if len(self.cutmix_minmax) == 0:\n self.cutmix_minmax = None\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _params_per_elem(self, batch_size):\n lam = np.ones(batch_size, dtype=np.float32)\n use_cutmix = np.zeros(batch_size, dtype=np.bool)\n if self.mixup_enabled:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand(batch_size) < self.switch_prob\n lam_mix = np.where(\n use_cutmix,\n np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),\n np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n elif self.cutmix_alpha > 0.:\n use_cutmix = np.ones(batch_size, dtype=np.bool)\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)\n return lam, use_cutmix\n\n def _params_per_batch(self):\n lam = 1.\n use_cutmix = False\n if self.mixup_enabled and np.random.rand() < self.mix_prob:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand() < self.switch_prob\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \\\n np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.cutmix_alpha > 0.:\n use_cutmix = True\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = float(lam_mix)\n return lam, use_cutmix\n\n def _mix_elem(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_pair(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n x[j] = x[j] * lam + x_orig[i] * (1 - lam)\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]\n else:\n x_flipped = x.flip(0).mul_(1. - lam)\n x.mul_(lam).add_(x_flipped)\n return lam\n\n def __call__(self, x, target):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n if self.mode == 'elem':\n lam = self._mix_elem(x)\n elif self.mode == 'pair':\n lam = self._mix_pair(x)\n else:\n lam = self._mix_batch(x)\n target = mixup_target(target, self.num_classes, lam, self.label_smoothing)\n return x, target\n\n\nclass FastCollateMixup(Mixup):\n \"\"\" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch\n\n A Mixup impl that's performed while collating the batches.\n \"\"\"\n\n def _mix_elem_collate(self, output, batch, half=False):\n batch_size = len(batch)\n num_elem = batch_size // 2 if half else batch_size\n assert len(output) == num_elem\n lam_batch, use_cutmix = self._params_per_elem(num_elem)\n for i in range(num_elem):\n j = batch_size - i - 1\n lam = lam_batch[i]\n mixed = batch[i][0]\n if lam != 1.:\n if use_cutmix[i]:\n if not half:\n mixed = mixed.copy()\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)\n np.rint(mixed, out=mixed)\n output[i] += torch.from_numpy(mixed.astype(np.uint8))\n if half:\n lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))\n return torch.tensor(lam_batch).unsqueeze(1)\n\n def _mix_pair_collate(self, output, batch):\n batch_size = len(batch)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n mixed_i = batch[i][0]\n mixed_j = batch[j][0]\n assert 0 <= lam <= 1.0\n if lam < 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n patch_i = mixed_i[:, yl:yh, xl:xh].copy()\n mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]\n mixed_j[:, yl:yh, xl:xh] = patch_i\n lam_batch[i] = lam\n else:\n mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)\n mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)\n mixed_i = mixed_temp\n np.rint(mixed_j, out=mixed_j)\n np.rint(mixed_i, out=mixed_i)\n output[i] += torch.from_numpy(mixed_i.astype(np.uint8))\n output[j] += torch.from_numpy(mixed_j.astype(np.uint8))\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return torch.tensor(lam_batch).unsqueeze(1)\n\n def _mix_batch_collate(self, output, batch):\n batch_size = len(batch)\n lam, use_cutmix = self._params_per_batch()\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n for i in range(batch_size):\n j = batch_size - i - 1\n mixed = batch[i][0]\n if lam != 1.:\n if use_cutmix:\n mixed = mixed.copy() # don't want to modify the original while iterating\n mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]\n else:\n mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)\n np.rint(mixed, out=mixed)\n output[i] += torch.from_numpy(mixed.astype(np.uint8))\n return lam\n\n def __call__(self, batch, _=None):\n batch_size = len(batch)\n assert batch_size % 2 == 0, 'Batch size should be even when using this'\n half = 'half' in self.mode\n if half:\n batch_size //= 2\n output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)\n if self.mode == 'elem' or self.mode == 'half':\n lam = self._mix_elem_collate(output, batch, half=half)\n elif self.mode == 'pair':\n lam = self._mix_pair_collate(output, batch)\n else:\n lam = self._mix_batch_collate(output, batch)\n target = torch.tensor([b[1] for b in batch], dtype=torch.int64)\n target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu')\n target = target[:batch_size]\n return output, target\n\n"
] |
[
[
"torch.nn.Dropout",
"torch.linspace",
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.Unfold",
"torch.nn.Identity"
],
[
"numpy.random.beta",
"numpy.sqrt",
"numpy.clip",
"torch.zeros",
"numpy.rint",
"numpy.ones",
"numpy.concatenate",
"torch.tensor",
"numpy.random.rand",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RC-Dynamics/de_pid_angle
|
[
"04302a2b19fe2a5caf2ba3b4c2c63be99f949b14"
] |
[
"main.py"
] |
[
"import socket\nimport json\nimport time\nfrom matplotlib import pyplot as plt\nfrom robot import Robot\nfrom path import Path\nfrom pid import PIDAngle\nfrom datetime import datetime\n\n\ndef restartRobot():\n\tdata = conn.recv(1024)\n\trecData = json.loads(data.decode('utf-8'))\n\trobot.setCoord(recData['robot0']['x'],recData['robot0']['y'],recData['robot0']['z'])\n\tx, y, theta = robot.getCoord()\n\trobot.setVel(0,0)\n\td = robot.getVel()\n\tconn.send(d.encode())\n\treturn x,y,theta\n\ndef updateRobot(i):\n\tdata = conn.recv(1024)\n\trecData = json.loads(data.decode('utf-8'))\n\trobot.setCoord(recData['robot0']['x'],recData['robot0']['y'],recData['robot0']['z'])\t\t\n\tx, y, theta = robot.getCoord()\n\t#robotPositions[countInt].append((x, y))\n\terro = pid.calcSpeeds(x, 130-y, 360-theta, i[0], 130-i[1])\n\t# print (\"t: {0:.2f} e: {1:.2f} x:{2:.2f} y:{3:.2f} rx:{4:.2f} ry:{5:.2f}\".format(theta, erro, i[0], i[1], x, y))\n\trobot.setVel(pid.getLeftSpeed(),pid.getRightSpeed())\n\td = robot.getVel()\n\tconn.send(d.encode())\n\treturn x,y,theta\n\n\n\n\n\n######################################################################################\nimport numpy as np \nimport random\nfrom tqdm import tqdm\n\nHOST = '127.0.0.1' # Local host\nPORT = 50007 # Arbitrary port\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(1)\n\nprint ('Waiting for connection...')\nconn, addr = s.accept()\nrobot = Robot()\npid = PIDAngle(0.1, 0, 3, 20)\npath = Path(40)\n\n\nprint ('Connected by client', addr)\n\nrobotPositions = []\nstartPoints = []\nendPoints = []\nstartPoints.append((125, 65))\nrestartFlag = False\n\npath.circleDiscretization(50)\n\n#while 1: #DEA a todo gene \ndef fitnessFunction(kp, ki, kd):\n\tpath.circleDiscretization(50)\n\n\t#\tprint(\"Trying: Kp: {:0.2f} Ki: {:0.2f} Kd: {:0.2f}\".format(kp, ki, kd))\n\tfitness_error = 0.0\n\tpid.setK(kp, ki, kd)\n\trobotPositions = []\n\tstartPoints = (125, 65)\n\n\tx, y, theta = restartRobot()\n\twhile (path.pointDistance(125, 65, x, y) != 0.0):\n\t\tx,y,theta = restartRobot()\n\t\n\tx, y, theta = restartRobot()\n\n\trestartFlag = False\n\tcountInt = 0 \n\tpid.setK(kp, ki, kd)\n\t#print (\"chegggou\")\n\twarningRestart = True\n\n\tfor i in path.getPath():\n\t\trobotPositions = []\n\t\tstart_time = time.time()\n\t\tendPoints = (i[0], i[1])\n\t\t#print(countInt)\n\t\twhile path.pointDistance(i[0], i[1], x, y) > 1 and time.time() - start_time < 0.06: \n\t\t\tx,y,theta = updateRobot(i)\n\t\t\tif(path.pointDistance(125, 65, x, y) <= 0.05 and countInt > 20 and warningRestart):\n\t\t\t\t#print(\"Warring: Possible Restart\")\n\t\t\t\twarningRestart = False\n\t\t\t\t\n\n\t\t\trobotPositions.append((x, y))\n\t\t\n\t\tif restartFlag == True:\n\t\t\tbreak\n\t\t\n\t\tfitness_error += path.getPerimeterError(startPoints, endPoints, robotPositions)\n\t\tstartPoints = (i[0], i[1])\n\t\tcountInt += 1\n\t#print(\"Path complete!, with Fitness = \", fitness_error)\n\tif countInt != len(path.getPath()):\n\t\tprint(\"ERROR: Count Interation Error {}\".format(countInt))\n\tif(fitness_error < 250):\n\t\tprint(\"Warring: Fitness error too low! {}\".format(fitness_error))\n\t\tprint(\"Warring: Set Fitness to Inf\")\n\t\tfitness_error = 99999999\n\t\n\trestartRobot()\n\ttime.sleep(0.2)\n\n\treturn fitness_error\n\n\nclass DEA:\n\tdef __init__(self, NP=15, D=3, MaxGen=500, CR=0.90, F=0.7):\n\t\t\"\"\"\n\t\tAttributes:\n\t\t\tNP = Number of Population\n\t\t\tD = Dimentions\n\t\t\tMaxGen = Max generations\n\t\t\tCR = Crossover and Mutation rate\n\t\t\tF = Scaling Factor \n\t\t\"\"\"\n\t\tself.NP = NP\n\t\tself.D = D\n\t\tself.MaxGen = MaxGen\n\t\tself.CR = CR\n\t\tself.F = F\n\t\tself.kpMax, self.kiMax, self.kdMax = 1, 1, 1\n\t\tself.timeStamp = (datetime.now()) \n\t\tself.logName = '_'.join(str(x) for x in (self.timeStamp.year,self.timeStamp.month, self.timeStamp.day, self.timeStamp.hour, self.timeStamp.minute))\n\t\t\n\t\tself.__init_csv()\n\t\tself.__init__population()\n\t\tself.__init__fitness()\n\n\n\tdef __init__population(self):\n\t\tkp = np.random.rand(self.NP, 1) * self.kpMax\n\t\tki = np.random.rand(self.NP, 1) * self.kiMax\n\t\tkd = np.random.rand(self.NP, 1) * self.kdMax\n\t\tpopulation = (np.hstack((kp, ki, kd)))\n\t\tself.population = np.zeros((self.NP, self.D), dtype=np.float)\n\t\t\n\t\tfor i in range(0, self.NP ):\n\t\t\tfor j in range(self.D):\n\t\t\t\tminD = np.min(population[:, j])\n\t\t\t\tmaxD = np.max(population[:, j])\n\t\t\t\tself.population[i][j] = minD + np.random.rand(1) * (maxD - minD)\n\t\t\n\t\t\n\tdef __init__fitness(self):\n\t\tself.fitness = np.zeros((self.NP, 1), dtype=np.float)\n\t\tfor i in range(self.NP ):\n\t\t\tself.fitness[i] = fitnessFunction(self.population[i][0], self.population[i][1], self.population[i][2])\n\t\t\tprint(\"PID: {}, Fitness: {}\".format(self.population[i], self.fitness[i]) )\n\n\tdef __get_fitness(self, genotype):\n\t\treturn fitnessFunction(genotype[0],genotype[1],genotype[2]);\n\n\tdef __init_csv(self):\n\t\twith open ('log/log_'+self.logName+'_epoch.csv', 'a') as log:\n\t\t\tlog.write(\"Epoch\\t Min\\t Mean\\t Best\\t\\n\")\n\n\t\twith open ('log/log_'+self.logName+'_population.csv', 'a') as log:\n\t\t\tlog.write(\"Kp\\tKi\\tKd\\tFitness\\t\\n\")\n\n\tdef forward(self):\n\t\t# Mutation and Cross Over\n\t\tt = tqdm(range(self.MaxGen))\n\t\tepoch = 0\n\t\tfor G in t:\n\n\t\t\twith open ('log/log_'+self.logName+'_population.csv', 'a') as log:\n\t\t\t\tlog.write(\"Epoch: {}\\n\".format(epoch))\n\t\t\t\t#logWrite = np.array2string(np.hstack((self.population, self.fitness)), formatter={'float_kind':lambda x: \"%.4f\" % x})\n\t\t\t\tPIDList = np.hstack((self.population, self.fitness))\n\t\t\t\tfor PID in PIDList:\n\t\t\t\t\tlog.write(\"{:0.4f}\\t{:0.4f}\\t{:0.4f}\\t{:0.4f}\\t\\n\".format(PID[0], PID[1], PID[2], PID[3]))\n\t\t\t\t\n\n\t\t\twith open ('log/log_'+self.logName+'_epoch.csv', 'a') as log:\n\t\t\t\tindexMin = np.argmin(self.fitness)\n\t\t\t\tminFit = np.min(self.fitness)\n\t\t\t\tmeanFit = np.mean(self.fitness)\n\t\t\t\tlogWrite = np.array2string( self.population[indexMin] , formatter={'float_kind':lambda x: \"%.4f\" % x})\n\t\t\t\tlog.write(\"{}\\t{:.4f}\\t{:.4f}\\t{}\\t\\n\".format(epoch, minFit, meanFit, logWrite))\n\t\t\t\n\t\t\tepoch += 1\n\t\t\n\t\t\tself.popG = np.zeros((self.NP, self.D), dtype=np.float)\n\t\t\tfor i in range(self.NP):\n\t\t\t\tr1, r2, r3 = random.sample([x for x in range(self.NP) if x != i], 3)\n\t\t\t\tjrand = np.random.randint(self.D)\n\n\t\t\t\tfor j in range(self.D):\n\t\t\t\t\tif(np.random.random(1) < self.CR) or (j == jrand):\n\t\t\t\t\t\tgeneR1 = self.population[r1, j]\n\t\t\t\t\t\tgeneR2 = self.population[r2, j]\n\t\t\t\t\t\tgeneR3 = self.population[r3, j]\n\t\t\t\t\t\tgene = geneR1 + self.F * (geneR2 - geneR3)\n\t\t\t\t\t\tif gene < 0.0:\n\t\t\t\t\t\t\tgene = np.random.random(1) * 0.01\n\t\t\t\t\t\tself.popG[i,j] = abs(gene)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.popG[i,j] = self.population[i, j]\n\t\t\t\t# Selection\n\t\t\t\tpopGFit = self.__get_fitness( self.popG[i])\n\t\t\t\tt.set_description(\"PID: {}, Fitness: {}\".format(self.popG[i],popGFit))\n\n\t\t\t\tif popGFit <= self.fitness[i]:\n\t\t\t\t\tself.population[i] = self.popG[i]\t\n\t\t\t\t\tself.fitness[i] = popGFit\n\n\n#######################################################################################\n\nclass GridSearch:\n\tdef __init__(self, KpS=0.0, KpE=1.0, KdS=0.0, KdE=3.0, step=0.1):\n\t\tself.KpS = KpS\n\t\tself.KpE = KpE\n\t\tself.KdS = KdS\n\t\tself.KdE = KdE\n\t\tself.step = step\n\t\tself.timeStamp = (datetime.now()) \n\t\tself.logName = '_'.join(str(x) for x in (self.timeStamp.year,self.timeStamp.month, self.timeStamp.day, self.timeStamp.hour, self.timeStamp.minute))\n\t\tself.__init_csv()\n\t\t\n\tdef __get_fitness(self, genotype):\n\t\treturn fitnessFunction(genotype[0], 0.0,genotype[1]);\n\n\tdef __init_csv(self):\n\t\twith open ('log/log_grid_'+self.logName+'_population.csv', 'a') as log:\n\t\t\tlog.write(\"Kp\\tKd\\tFitness\\t\\n\")\n\n\tdef forward(self):\n\t\t# Mutation and Cross Over\n\t\tKpAxis = []\n\t\tKdAxis = []\n\t\tfitAxis = []\n\t\tt = tqdm(np.arange(self.KpS, self.KpE, self.step))\n\n\t\tfor kp in t:\n\t\t\tfor kd in np.arange(self.KdS, self.KdE, self.step):\n\t\t\t\tfitness = self.__get_fitness(np.array([kp, kd]))\n\t\t\t\tKpAxis.append(kp)\n\t\t\t\tKdAxis.append(kd)\n\t\t\t\tfitAxis.append(fitness)\n\t\t\t\twith open ('log/log_grid_'+self.logName+'_population.csv', 'a') as log:\n\t\t\t\t\tlog.write(\"{:0.4f}\\t{:0.4f}\\t{:0.4f}\\t\\n\".format(kp, kd, fitness))\n\t\t\t\tt.set_description(\"PID: {:0.4f} {:0.4f}, Fitness: {:0.4f}\".format(kp, kd, fitness))\n\n\t\tfig = plt.figure()\n\t\tax = fig.gca(projection='3d')\n\t\tax.plot_trisurf(KpAxis, KdAxis, fitAxis, cmap=\"jet\")\n\t\tplt.show()\n\n\nif __name__ == '__main__':\n\ttry:\n\t\t#dea = DEA(NP=15, MaxGen=200)\n\t\t#dea.forward()\n\t\t#print (np.hstack((dea.population, dea.fitness)))\n\t\t# gs = GridSearch()\n\t\t# gs.forward()\n\t\tfitnessFunction(0.1, 0, 2)\n\t\tfitnessFunction(0.1, 0, 2)\n\t\tfitnessFunction(0.0509, 0.0009, 0.0084)\n\texcept:\n\t\tconn.close()\n\t\tprint ('Server closed.')\n\tconn.close()\n\tprint ('Server closed.')\n\n"
] |
[
[
"numpy.array",
"numpy.hstack",
"numpy.random.random",
"numpy.min",
"numpy.arange",
"numpy.max",
"numpy.argmin",
"numpy.random.rand",
"numpy.mean",
"numpy.random.randint",
"numpy.array2string",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Digusil/eventsearch
|
[
"3a8767d236ad8f84ac54a6db9c860623eb5036e1",
"3a8767d236ad8f84ac54a6db9c860623eb5036e1"
] |
[
"tests/test_signals.py",
"eventsearch/signals.py"
] |
[
"import unittest\n\nimport numpy as np\n\nfrom eventsearch.signals import SingleSignal, SmoothedSignal\nfrom eventsearch.utils import Smoother\nfrom .utils import TemporaryFolder\n\n\nclass TestSingleSingal(unittest.TestCase):\n def test_create_object(self):\n t = np.linspace(0, 5, 10)\n y = t ** 2\n\n test = SingleSignal(t=t, y=y)\n\n self.assertListEqual([len(t), 2], list(test.data.shape))\n\n def test_get_config(self):\n t = np.linspace(0, 5, 10)\n y = t ** 2\n\n test = SingleSignal(t=t, y=y)\n\n self.assertIn('creation_date', test.get_config())\n self.assertIn('identifier', test.get_config())\n self.assertIn('cached_properties', test.get_config())\n\n def test_save_and_load(self):\n t = np.linspace(0, 5, 10)\n y = t ** 2\n\n test = SingleSignal(t=t, y=y)\n\n with TemporaryFolder() as tmp:\n test.save(tmp.folder('test_signal.h5'))\n\n test2 = SingleSignal.load(tmp.folder('test_signal.h5'))\n\n np.testing.assert_array_equal(test.t, test2.t)\n np.testing.assert_array_equal(test.y, test2.y)\n self.assertEqual(test.get_config(), test2.get_config())\n\n\nclass TestSmoothedSignal(unittest.TestCase):\n def test_create_object(self):\n t = np.linspace(0, 5, 10)\n y = t ** 2\n\n test = SingleSignal(t=t, y=y)\n smoothed = test.to_smoothed_signal(smoother=Smoother(window_len=5))\n\n self.assertListEqual([len(t), 2], list(smoothed.data.shape))\n\n def test_get_config(self):\n t = np.linspace(0, 5, 10)\n y = t ** 2\n\n test = SingleSignal(t=t, y=y)\n smoothed = test.to_smoothed_signal()\n\n self.assertIn('creation_date', smoothed.get_config())\n self.assertIn('identifier', smoothed.get_config())\n self.assertIn('cached_properties', smoothed.get_config())\n self.assertIn('smoother', smoothed.get_config())\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n",
"from copy import copy\n\nimport numpy as np\nimport pandas as pd\nfrom cached_property import cached_property\n\nfrom .saving import save_signal_to_hdf5, load_signal_from_hdf5\nfrom .core import CoreSingleSignal\nfrom .utils import Smoother\n\n\nclass SingleSignal(CoreSingleSignal):\n def __init__(self, *args, **kwargs):\n \"\"\"\n Signal class that stores the data and support various calculations.\n\n Parameters\n ----------\n t: ndarray\n time points\n y: ndarray\n values corresponding to the time points t\n name: str or None, optional\n Name for the signal that will registrated in the global singal name dictionary if the parameter \"listed\" is\n True. If None, a unique generic singal name will be generated.\n listed: bool, optional\n If True the singal will be registrated in the global signal name dictionary. Default is True.\n \"\"\"\n super(SingleSignal, self).__init__(*args, **kwargs)\n\n def to_smoothed_signal(self, listed=False, **kwargs):\n \"\"\"\n Convert signal instance to smoothed signal instance.\n\n Parameters\n ----------\n listed: bool, optional\n If True the singal will be registrated in the global singal name dictionary. Default is False.\n\n Returns\n -------\n smoothed signal: SmoothedSignal\n \"\"\"\n if 'name' in kwargs:\n smoothed = SmoothedSignal(t=copy(self.t), y=copy(self.y), listed=listed, **kwargs)\n else:\n smoothed = SmoothedSignal(t=copy(self.t), y=copy(self.y), name='smoothed_' + self.name, listed=listed,\n **kwargs)\n\n if 'y' in smoothed.__dict__:\n del smoothed.__dict__['y']\n\n return smoothed\n\n def to_series(self):\n \"\"\"\n Returns\n -------\n signal: pandas Series\n \"\"\"\n return pd.Series(self.y, index=self.t, )\n\n def save(self, filepath, **kwargs):\n \"\"\"\n Save the object.\n\n Parameters\n ----------\n filepath: str\n Path / file name\n \"\"\"\n save_signal_to_hdf5(self, filepath, **kwargs)\n\n @classmethod\n def load(cls, filepath, **kwargs):\n \"\"\"\n Load signal.\n\n Parameters\n ----------\n filepath: str\n Path / file name\n\n Returns\n -------\n signal: SingleSignal\n \"\"\"\n return load_signal_from_hdf5(filepath, use_class=cls)\n\n\nclass SmoothedSignal(SingleSignal):\n def __init__(self, *args, smoother: Smoother = Smoother(), **kwargs):\n \"\"\"\n smoothed signal class\n\n Parameters\n ----------\n t: ndarray\n time points\n y: ndarray\n values corresponding to the time points t\n name: str or None, optional\n Name for the signal that will registrated in the global signal name dictionary if the parameter \"listed\" is\n True. If None, a unique generic singal name will be generated. Defalt is None.\n smoother: Smoother\n smoother object\n listed: bool, optional\n If True the singal will be registrated in the global signal name dictionary. Default is True.\n \"\"\"\n kwargs['name'] = self._gen_name(kwargs['name'])\n super(SmoothedSignal, self).__init__(*args, **kwargs)\n\n self._smoother = None\n\n self.register_cached_property('y')\n\n if isinstance(smoother, dict):\n self.smoother = Smoother(**smoother)\n else:\n self.smoother = smoother\n\n def _gen_name(self, name: str = None):\n \"\"\"\n Generate objact name.\n\n Parameters\n ----------\n name: str or None\n Custom object name\n\n Returns\n -------\n generated object name: str\n \"\"\"\n if name is None:\n return 'smoothed_{}_{}'.format(self.__class__.__name__, self.__identifier__)\n else:\n return name\n\n @property\n def smoother(self):\n \"\"\"\n Returns\n -------\n smoother object: Smoother\n \"\"\"\n return self._smoother\n\n @smoother.setter\n def smoother(self, value):\n \"\"\"\n Set smoother object.\n\n Parameters\n ----------\n value: Smoother\n smoother object\n \"\"\"\n self.del_cache()\n\n self._smoother = value\n\n @cached_property\n def y(self):\n \"\"\"\n Smoothed signal. The values will be cached.\n\n Returns\n -------\n smoothed signal values: ndarray\n \"\"\"\n return self.smoother.smooth(self._y)\n\n def get_config(self):\n \"\"\"\n Get config of the object for serialization.\n\n Returns\n -------\n object config: dict\n \"\"\"\n base_config = super(SmoothedSignal, self).get_config()\n config = {'smoother': self.smoother.get_config()}\n\n return dict(list(base_config.items()) + list(config.items()))\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.linspace"
],
[
"pandas.Series"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ethanwhite/EvergladesTools
|
[
"8ee39bad52752db8086206a39b982eb1bbb359cd"
] |
[
"Zooniverse/aggregate.py"
] |
[
"#aggregation script\nfrom distributed import wait\nimport pandas as pd\nimport geopandas as gpd\nfrom panoptes_client import Panoptes\nfrom shapely.geometry import box, Point\nimport json\nimport numpy as np\nimport os\nfrom datetime import datetime\nimport utils\nimport extract\nimport start_cluster\n\ndef download_data(everglades_watch, min_version, generate=False):\n #see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification\n classification_export = everglades_watch.get_export('classifications', generate=generate)\n rows = []\n for row in classification_export.csv_dictreader():\n rows.append(row) \n \n df = pd.DataFrame(rows)\n df[\"workflow_version\"] = df.workflow_version.astype(float)\n df = df[df.workflow_version > min_version] \n df = df[df.workflow_name ==\"Counts and Behavior\"] \n \n return df\n\ndef download_subject_data(everglades_watch, savedir, generate=False):\n #see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification\n classification_export = everglades_watch.get_export('subjects', generate=generate)\n rows = []\n for row in classification_export.csv_dictreader():\n rows.append(row) \n \n df = pd.DataFrame(rows) \n fname = \"{}/{}.csv\".format(savedir,\"everglades-watch-subjects\")\n \n #Overwrite subject set\n df.to_csv(fname)\n \n return df\n\ndef load_classifications(classifications_file, min_version):\n \"\"\"Load classifications from Zooniverse\n classifications_file: path to .csv\n \"\"\"\n df = pd.read_csv(classifications_file)\n df = df[df.workflow_version > min_version] \n df = df[df.workflow_name ==\"Counts and Behavior\"] \n return df\n \ndef parse_additional_response(x):\n annotation_dict = json.loads(x)[0]\n response = annotation_dict[\"value\"]\n return response\n\ndef parse_front_screen(x):\n #Extract and parse json\n annotation_dict = json.loads(x)[0]\n boxes = annotation_dict[\"value\"]\n \n if len(boxes) == 0:\n return pd.DataFrame({\"species\":[None],\"x\":[None],\"y\":[None],\"additional_observations\":[None]})\n \n boxes = pd.DataFrame(boxes)\n boxes = boxes.rename(columns = {\"tool_label\": \"label\"})\n \n #Loop through each box and create a dataframe \n box_df = pd.DataFrame()\n for index, box in boxes.iterrows():\n box_df = box_df.append(box,ignore_index=True)\n \n #Split label into Species and Behavior\n new_columns = box_df.label.str.split(\"-\",n=1,expand=True)\n box_df[\"species\"] = new_columns[0]\n box_df[\"behavior\"] = new_columns[1]\n \n return box_df[[\"label\",\"species\",\"behavior\",\"x\",\"y\"]]\n\ndef parse_uncommon_labels(x):\n boxes = pd.DataFrame(x)\n \n #This needs to be done carefully, as Zooniverse only returns the ordinal sublabel position\n sublabels= {0:\"Flying\",1:\"Courting\",2:\"Roosting/Nesting\",3:\"Unknown\"}\n \n #Loop through each box and create a dataframe \n box_df = pd.DataFrame()\n for index, box in boxes.iterrows():\n #we used to allow multiples\n value = box.details[0][\"value\"]\n if type(value) is list:\n value = value[0]\n \n #If unknown class assign it to species, else its a behavior\n if box.tool_label == \"Other\":\n box[\"WriteInSpecies\"] = value\n box[\"behavior\"] = None\n else:\n box[\"behavior\"] = sublabels[value]\n box_df = box_df.append(box,ignore_index=True)\n \n box_df = box_df.rename(columns = {\"tool_label\": \"species\"})\n box_df = box_df[[\"species\",\"behavior\",\"x\",\"y\"]]\n \n return box_df\n\ndef parse_additional_observations(x):\n \"\"\"Parse the optional second screen of less common labels\"\"\"\n uncommon_annotation_dict = json.loads(x)[2]\n \n results = [ ]\n \n if len(uncommon_annotation_dict[\"value\"]) > 0:\n results.append(parse_uncommon_labels(uncommon_annotation_dict[\"value\"]))\n #combine results into a single dataframe\n results = pd.concat(results)\n return results\n else: \n return None\n \ndef parse_annotations(x):\n #Parse each piece of the workflow\n front_screen = parse_front_screen(x)\n response = parse_additional_response(x)\n #TODO parse response and add to species class\n if response:\n front_screen[\"additional_observations\"] = None\n else:\n front_screen[\"additional_observations\"] = None\n \n if response == 'Yes':\n additional_screen = parse_additional_observations(x)\n if additional_screen is None:\n #Sometime a user selects yes, but there is no data - they were just curious\n return pd.concat([front_screen, additional_screen])\n else:\n return front_screen \n else:\n return front_screen\n\ndef parse_subject_data(x):\n \"\"\"Parse image metadata\"\"\"\n annotation_dict = json.loads(x)\n assert len(annotation_dict.keys()) == 1\n \n for key in annotation_dict:\n data = annotation_dict[key]\n try:\n utm_left, utm_bottom, utm_right, utm_top = data[\"bounds\"]\n except:\n return None\n subject_reference = data[\"subject_reference\"]\n resolution = data[\"resolution\"][0]\n \n try:\n site_data = os.path.splitext(os.path.basename(data[\"site\"]))[0]\n site = site_data.split(\"_\", maxsplit=1)[0]\n event = site_data.split(\"_\", maxsplit=1)[1]\n \n except:\n site = np.nan\n event = np.nan\n \n bounds = pd.DataFrame({\"subject_ids\":[key],\"image_utm_left\": [utm_left], \"image_utm_bottom\":[utm_bottom],\"image_utm_right\":[utm_right],\"image_utm_top\":[utm_top],\"site\":site,\"event\":event,\"resolution\":[resolution],\"subject_reference\":[subject_reference]})\n \n return bounds\n\ndef parse_birds(df):\n #remove empty annotations\n results = [ ]\n for index, row in df.iterrows(): \n #Extract annotations for each image\n annotations = parse_annotations(row.annotations) \n \n #Extract subject data\n bounds = parse_subject_data(row.subject_data)\n \n if bounds is None:\n print(\"Row {} had no spatial bounds\".format(row[\"subject_data\"]))\n continue\n \n #Assign columns\n annotations[\"classification_id\"] = row[\"classification_id\"]\n annotations[\"user_name\"] = row[\"user_name\"]\n annotations[\"created_at\"] = row[\"created_at\"]\n \n for col_name in bounds:\n annotations[col_name] = bounds[col_name].values[0]\n \n results.append(annotations)\n \n results = pd.concat(results)\n results = results.reset_index(drop=True)\n \n return results\n\ndef project_box(df):\n \"\"\"Convert points into utm coordinates\"\"\"\n df[\"box_utm_left\"] = df.image_utm_left + (df.resolution * df.x)\n df[\"box_utm_bottom\"] = df.image_utm_top - (df.resolution * df.y)\n df[\"box_utm_right\"] = df.image_utm_left + (df.resolution * (df.x + df.width))\n df[\"box_utm_top\"] = df.image_utm_top - (df.resolution * (df.y + df.height))\n \n #Create geopandas\n geoms = [box(left, bottom, right, top) for left, bottom, right, top in zip(df.box_utm_left, df.box_utm_bottom, df.box_utm_right, df.box_utm_top)]\n gdf = gpd.GeoDataFrame(df, geometry=geoms)\n \n #set CRS\n gdf.crs = 'epsg:32617'\n \n return gdf\n \ndef project_point(df):\n \"\"\"Convert points into utm coordinates\"\"\"\n df[\"utm_x\"] = df.image_utm_left + (df.resolution * df.x)\n df[\"utm_y\"] = df.image_utm_top - (df.resolution * df.y)\n\n #Create geopandas\n geoms = [Point(x,y) for x,y in zip(df.utm_x, df.utm_y)]\n gdf = gpd.GeoDataFrame(df, geometry=geoms)\n \n #set CRS, this is a bit complicated as we originally started uploading in epsg 32617 (UTM) and changed for mapbox to 3857 web mercator. We can infer from first digit, but its not ideal.\n utm17 = gdf[gdf.utm_x.astype('str').str.startswith(\"5\")]\n web_mercator = gdf[gdf.utm_x.astype('str').str.startswith(\"-8\")]\n web_mercator.crs = 'epsg:3857'\n reprojected_utm_points = web_mercator.to_crs(epsg=32617)\n reprojected_utm_points[\"utm_x\"] = reprojected_utm_points.geometry.apply(lambda x: x.coords[0][0])\n reprojected_utm_points[\"utm_y\"] = reprojected_utm_points.geometry.apply(lambda x: x.coords[0][1])\n gdf = pd.concat([utm17,reprojected_utm_points], ignore_index=True)\n \n \n gdf.crs = 'epsg:32617'\n \n return gdf\n\ndef spatial_join_image(group, IoU_threshold, buffer_size):\n #Unique index for each image\n unique_index_value = 0\n \n #Create spatial index\n spatial_index = group.sindex\n \n if len(group.classification_id.unique()) == 1:\n group[\"selected_index\"] = group.index.values\n else:\n for index, row in group.iterrows():\n geom = row[\"bbox\"]\n #Spatial clip to window using spatial index for faster querying\n possible_matches_index = list(spatial_index.intersection(geom.bounds))\n possible_matches = group.iloc[possible_matches_index]\n \n #If just matches itself, skip indexing\n if len(possible_matches) == 1:\n group.loc[index, \"selected_index\"] = unique_index_value\n else:\n boxes_to_merge = { }\n labels = []\n \n #Add target box to consider\n boxes_to_merge[index] = geom\n labels.append(row[\"species\"])\n \n #Find intersection over union\n for match_index, match_row in possible_matches.iterrows():\n match_geom = match_row[\"bbox\"]\n IoU = calculate_IoU(geom, match_geom)\n if IoU > IoU_threshold:\n boxes_to_merge[match_index] = match_geom\n labels.append(match_row[\"species\"])\n \n #Choose final box and labels\n average_geom = create_average_box(boxes_to_merge,buffer_size=buffer_size)\n for x in boxes_to_merge:\n group.loc[x,\"bbox\"] = average_geom\n group.loc[x,\"selected_index\"] = unique_index_value\n group.loc[x,\"species\"] = vote_on_label(labels)\n unique_index_value+=1\n \n return group\n \ndef spatial_join(gdf, IoU_threshold = 0.4, buffer_size=1, client=None):\n \"\"\"Find overlapping predictions in a geodataframe\n IoU_threshold: float threshold [0-1] for degree of overlap to merge annotations and vote on class\n buffer_size: in the units of the gdf, meters if projected, pixels if not.\n client: optional dask client to parallelize\n \"\"\" \n \n #Turn buffered points into boxes\n gdf[\"bbox\"] = [box(left, bottom, right, top) for left, bottom, right, top in gdf.geometry.buffer(buffer_size).bounds.values]\n \n #for each overlapping image\n results = []\n if client:\n futures = [] \n for name, group in gdf.groupby(\"subject_ids\"):\n future = client.submit(spatial_join_image,group=group, IoU_threshold=IoU_threshold, buffer_size=buffer_size)\n futures.append(future)\n wait(futures)\n for x in futures:\n try:\n result = x.result()\n results.append(result)\n except Exception as e:\n print(e.with_traceback())\n \n results = pd.concat(results) \n else: \n for name, group in gdf.groupby(\"subject_ids\"):\n group_result = spatial_join_image(group, IoU_threshold, buffer_size)\n results.append(group_result)\n results = pd.concat(results)\n \n print(\"spatial join complete\")\n final_gdf = gpd.GeoDataFrame(results)\n \n #remove duplicates\n final_gdf[\"geometry\"] = final_gdf[\"bbox\"]\n \n final_gdf.crs = gdf.crs\n \n return final_gdf\n\ndef vote_on_label(labels):\n choosen_label = pd.Series(labels).mode()[0]\n \n return choosen_label\n\ndef create_average_box(boxes_to_merge, buffer_size):\n \"\"\"Create a mean centered box based on input annotations\"\"\"\n centroid_x = np.mean([boxes_to_merge[x].centroid.x for x in boxes_to_merge])\n centroid_y = np.mean([boxes_to_merge[x].centroid.y for x in boxes_to_merge])\n point_geom = Point(centroid_x,centroid_y)\n left, bottom, right, top = point_geom.buffer(buffer_size).bounds\n geom = box(left, bottom, right, top)\n \n return geom\n \ndef calculate_IoU(geom, match):\n \"\"\"Calculate intersection-over-union scores for a pair of boxes\"\"\"\n intersection = geom.intersection(match).area\n union = geom.union(match).area\n iou = intersection/float(union)\n \n return iou\n\ndef run(classifications_file=None, savedir=\".\", download=False, generate=False,min_version=300, debug=False, client=None):\n \n #Authenticate\n if download:\n everglades_watch = utils.connect() \n df = download_data(everglades_watch, min_version, generate=generate)\n \n #add subject data to dir\n download_subject_data(everglades_watch, savedir=savedir)\n\n else:\n #Read file from zooniverse download\n df = load_classifications(classifications_file, min_version=min_version) \n \n #if debug for testing, just sample 20 rows \n if debug:\n df = df.sample(n=2000) \n \n #Parse JSON and filter\n #df = df[df.subject_ids==\"43845902\"]\n df = parse_birds(df)\n \n #Write parsed data\n df.to_csv(\"{}/{}.csv\".format(savedir, \"parsed_annotations\"),index=True)\n \n #Remove blank frames and spatial coordinates of bird points\n df = df[df.species.notna()]\n \n #save an unprojected copy\n geoms = [Point(x,y) for x,y in zip(df.x, df.y)]\n unprojected_data_gdf = gpd.GeoDataFrame(df, geometry=geoms)\n unprojected_data_gdf = spatial_join(unprojected_data_gdf, buffer_size=75, client=client) \n fname = \"{}/{}.shp\".format(savedir, \"everglades-watch-classifications_unprojected\")\n unprojected_data_gdf=unprojected_data_gdf.drop(columns=[\"bbox\"]) \n unprojected_data_gdf.to_file(fname) \n \n projected_data = df[~(df.image_utm_left == 0)]\n projected_data_gdf = project_point(projected_data)\n \n #Find overlapping annotations and select annotations. Vote on best class for final box\n projected_data_gdf = spatial_join(projected_data_gdf, buffer_size=1, client=client)\n \n #write shapefile\n projected_data_gdf=projected_data_gdf.drop(columns=[\"bbox\"])\n \n #Connect to index\n fname = \"{}/{}.shp\".format(savedir, \"everglades-watch-classifications\")\n projected_data_gdf.to_file(fname)\n\n return fname\n\nif __name__ == \"__main__\":\n #Download from Zooniverse and parse\n #Optional dask client\n #client = start_cluster.start(cpus=40, mem_size=\"8GB\")\n client = None\n \n fname = run(savedir=\"../App/Zooniverse/data/\", download=True, \n generate=False, min_version=300, client=client, debug=False)\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.Series",
"pandas.DataFrame",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
sha256feng/LanczosNetwork
|
[
"c16910b32efecb24a6897343d88fd302229bb44d"
] |
[
"model/gcn.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = ['GCN']\n\n\nclass GCN(nn.Module):\n \"\"\" Graph Convolutional Networks,\n see reference below for more information\n\n Kipf, T.N. and Welling, M., 2016.\n Semi-supervised classification with graph convolutional networks.\n arXiv preprint arXiv:1609.02907.\n \"\"\"\n\n def __init__(self, config):\n super(GCN, self).__init__()\n self.config = config\n self.input_dim = config.model.input_dim\n self.hidden_dim = config.model.hidden_dim\n self.output_dim = config.model.output_dim\n self.num_layer = config.model.num_layer\n self.num_atom = config.dataset.num_atom\n self.num_edgetype = config.dataset.num_bond_type\n self.dropout = config.model.dropout if hasattr(config.model,\n 'dropout') else 0.0\n\n dim_list = [self.input_dim] + self.hidden_dim + [self.output_dim]\n self.filter = nn.ModuleList([\n nn.Linear(dim_list[tt] * (self.num_edgetype + 1), dim_list[tt + 1])\n for tt in range(self.num_layer)\n ] + [nn.Linear(dim_list[-2], dim_list[-1])])\n\n self.embedding = nn.Embedding(self.num_atom, self.input_dim)\n\n # attention\n self.att_func = nn.Sequential(*[nn.Linear(dim_list[-2], 1), nn.Sigmoid()])\n\n if config.model.loss == 'CrossEntropy':\n self.loss_func = torch.nn.CrossEntropyLoss()\n elif config.model.loss == 'MSE':\n self.loss_func = torch.nn.MSELoss()\n elif config.model.loss == 'L1':\n self.loss_func = torch.nn.L1Loss()\n else:\n raise ValueError(\"Non-supported loss function!\")\n\n self._init_param()\n\n def _init_param(self):\n for ff in self.filter:\n if isinstance(ff, nn.Linear):\n nn.init.xavier_uniform_(ff.weight.data)\n if ff.bias is not None:\n ff.bias.data.zero_()\n\n for ff in self.att_func:\n if isinstance(ff, nn.Linear):\n nn.init.xavier_uniform_(ff.weight.data)\n if ff.bias is not None:\n ff.bias.data.zero_()\n\n def forward(self, node_feat, L, label=None, mask=None):\n \"\"\"\n shape parameters:\n batch size = B\n embedding dim = D\n max number of nodes within one mini batch = N\n number of edge types = E\n number of predicted properties = P\n \n Args:\n node_feat: long tensor, shape B X N\n L: float tensor, shape B X N X N X (E + 1)\n label: float tensor, shape B X P\n mask: float tensor, shape B X N\n \"\"\"\n batch_size = node_feat.shape[0]\n num_node = node_feat.shape[1]\n state = self.embedding(node_feat) # shape: B X N X D\n\n # propagation\n for tt in range(self.num_layer):\n msg = []\n\n for ii in range(self.num_edgetype + 1):\n msg += [torch.bmm(L[:, :, :, ii], state)] # shape: B X N X D\n\n msg = torch.cat(msg, dim=2).view(num_node * batch_size, -1)\n state = F.relu(self.filter[tt](msg)).view(batch_size, num_node, -1)\n state = F.dropout(state, self.dropout, training=self.training)\n\n # output\n state = state.view(batch_size * num_node, -1)\n y = self.filter[-1](state) # shape: BN X 1\n att_weight = self.att_func(state) # shape: BN X 1\n y = (att_weight * y).view(batch_size, num_node, -1)\n\n score = []\n if mask is not None:\n for bb in range(batch_size):\n score += [torch.mean(y[bb, mask[bb], :], dim=0)]\n else:\n for bb in range(batch_size):\n score += [torch.mean(y[bb, :, :], dim=0)]\n\n score = torch.stack(score)\n\n if label is not None:\n return score, self.loss_func(score, label)\n else:\n return score\n"
] |
[
[
"torch.mean",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.bmm",
"torch.nn.L1Loss",
"torch.nn.init.xavier_uniform_",
"torch.stack",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yamamoto-kazuki-fixer/decode2019-Azure-MLOps
|
[
"bae4db710b889b529332c27f68bbbfda13ae1689"
] |
[
"train.py"
] |
[
"import os\nimport torch\nimport argparse\nimport torch.nn as nn\nfrom pathlib import Path\nimport torch.onnx as onnx\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\n\nfrom azureml.core.run import Run\n\n###################################################################\n# Helpers #\n###################################################################\ndef info(msg, char = \"#\", width = 75):\n print(\"\")\n print(char * width)\n print(char + \" %0*s\" % ((-1*width)+5, msg) + char)\n print(char * width)\n\ndef check_dir(path, check=False):\n if check:\n assert os.path.exists(path), '{} does not exist!'.format(path)\n else:\n if not os.path.exists(path):\n os.makedirs(path)\n return Path(path).resolve()\n\n###################################################################\n# Data Loader #\n###################################################################\ndef get_dataloader(train=True, batch_size=64, data_dir='data'):\n digits = datasets.MNIST(data_dir, train=train, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.reshape(28*28))\n ]),\n target_transform=transforms.Compose([\n transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, y, 1))\n ])\n )\n\n return DataLoader(digits, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True)\n\n###################################################################\n# Saving #\n###################################################################\ndef save_model(model, device, path, name):\n base = Path(path)\n onnx_file = base.joinpath('{}.onnx'.format(name)).resolve()\n pth_file = base.joinpath('{}.pth'.format(name)).resolve()\n \n # create dummy variable to traverse graph\n x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255\n onnx.export(model, x, onnx_file)\n print('Saved onnx model to {}'.format(onnx_file))\n\n # saving PyTorch Model Dictionary\n torch.save(model.state_dict(), pth_file)\n print('Saved PyTorch Model to {}'.format(pth_file))\n\n###################################################################\n# Models #\n###################################################################\nclass SimpleLinear(nn.Module):\n def __init__(self):\n super(SimpleLinear, self).__init__()\n self.layer1 = nn.Linear(28*28, 10)\n\n def forward(self, x):\n x = self.layer1(x)\n return F.softmax(x, dim=1)\n\nclass NeuralNework(nn.Module):\n def __init__(self):\n super(NeuralNework, self).__init__()\n self.layer1 = nn.Linear(28*28, 512)\n self.layer2 = nn.Linear(512, 512)\n self.output = nn.Linear(512, 10)\n\n def forward(self, x):\n x = F.relu(self.layer1(x))\n x = F.relu(self.layer2(x))\n x = self.output(x)\n return F.softmax(x, dim=1)\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = x.view(-1, 1, 28, 28)\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.softmax(x, dim=1)\n\n###################################################################\n# Train/Test #\n###################################################################\ndef train(model, device, dataloader, cost, optimizer, epoch, run):\n model.train()\n for batch, (X, Y) in enumerate(dataloader):\n X, Y = X.to(device), Y.to(device)\n optimizer.zero_grad()\n pred = model(X)\n loss = cost(pred, Y)\n loss.backward()\n optimizer.step()\n\n if run != None:\n run.log('loss', loss.item())\n\n if batch % 100 == 0:\n print('loss: {:>10f} [{:>5d}/{:>5d}]'.format(loss.item(), batch * len(X), len(dataloader.dataset)))\n\ndef test(model, device, dataloader, cost, run):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for batch, (X, Y) in enumerate(dataloader):\n X, Y = X.to(device), Y.to(device)\n pred = model(X)\n\n test_loss += cost(pred, Y).item()\n correct += (pred.argmax(1) == Y.argmax(1)).type(torch.float).sum().item()\n\n test_loss /= len(dataloader.dataset)\n correct /= len(dataloader.dataset)\n\n if run != None:\n run.log('accuracy', 100*correct)\n\n print('\\nTest Error:')\n print('acc: {:>0.1f}%, avg loss: {:>8f}'.format(100*correct, test_loss))\n\n###################################################################\n# Main Loop #\n###################################################################\ndef main(data_dir, output_dir, log_dir, epochs, batch, lr, model_kind):\n # use GPU?\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # AML Logging (if available)\n try:\n run = Run.get_context()\n print('Using AML Logging...')\n run.log('data', data_dir)\n run.log('output', output_dir)\n run.log('logs', log_dir)\n run.log('epochs', epochs)\n run.log('batch', batch)\n run.log('learning_rate', lr)\n run.log('model_kind', model_kind)\n run.log('device', device)\n except:\n run = None\n\n # get data loaders\n training = get_dataloader(train=True, batch_size=batch, data_dir=data_dir)\n testing = get_dataloader(train=False, batch_size=batch, data_dir=data_dir)\n\n # model\n if model_kind == 'linear':\n model = SimpleLinear().to(device)\n elif model_kind == 'nn':\n model = NeuralNework().to(device)\n else:\n model = CNN().to(device)\n\n info('Model')\n print(model)\n\n # cost function\n cost = torch.nn.BCELoss()\n\n # optimizers\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n for epoch in range(1, epochs + 1):\n info('Epoch {}'.format(epoch))\n train(model, device, training, cost, optimizer, epoch, run)\n test(model, device, testing, cost, run)\n\n # save model\n info('Saving Model')\n save_model(model, device, output_dir, 'model')\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='CNN Training for Image Recognition.')\n parser.add_argument('-d', '--data', help='directory to training and test data', default='data')\n parser.add_argument('-o', '--output', help='output directory', default='outputs')\n parser.add_argument('-g', '--logs', help='log directory', default='logs')\n \n parser.add_argument('-e', '--epochs', help='number of epochs', default=5, type=int)\n parser.add_argument('-b', '--batch', help='batch size', default=100, type=int)\n parser.add_argument('-l', '--lr', help='learning rate', default=0.001, type=float)\n\n parser.add_argument('-m', '--model', help='model type', default='cnn', choices=['linear', 'nn', 'cnn'])\n\n args = parser.parse_args()\n\n # enforce folder locatations\n args.data = check_dir(args.data).resolve()\n args.outputs = check_dir(args.output).resolve()\n args.logs = check_dir(args.logs).resolve()\n\n main(data_dir=args.data, output_dir=args.output, log_dir=args.logs, \n epochs=args.epochs, batch=args.batch, lr=args.lr, model_kind=args.model)"
] |
[
[
"torch.onnx.export",
"torch.nn.functional.softmax",
"torch.nn.Dropout2d",
"torch.randint",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fkubota/kaggle-Rainforest-Connection-Species-Audio-Detection
|
[
"7134edff0ba1c60f597b64a0efd953b7707b98e1",
"7134edff0ba1c60f597b64a0efd953b7707b98e1"
] |
[
"exp/exp002/utils.py",
"exp/exp002/trainner.py"
] |
[
"from ipdb import set_trace as st\nimport os\nimport time\nimport random\nimport numpy as np\nfrom loguru import logger\nimport torch\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed) # type: ignore\n torch.backends.cudnn.deterministic = True # type: ignore\n torch.backends.cudnn.benchmark = True # type: ignore\n\n\ndef get_save_dir_exp(config):\n _dir = os.path.dirname(os.path.abspath(__file__))\n exp_name = _dir.split('/')[-1]\n dir_save_exp = f'{config[\"path\"][\"dir_save\"]}{exp_name}'\n dir_save_ignore_exp = f'{config[\"path\"][\"dir_save_ignore\"]}{exp_name}'\n return dir_save_exp, dir_save_ignore_exp, exp_name\n\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef get_debug_idx(trn_tp, trn_idxs, val_idxs, config):\n n_classes = config['model']['params']['n_classes']\n\n trn_tp_trn = trn_tp.iloc[trn_idxs].copy()\n trn_tp_val = trn_tp.iloc[val_idxs].copy()\n trn_tp_trn['idx_'] = trn_idxs\n trn_tp_val['idx_'] = val_idxs\n\n trn_idxs_debug = []\n val_idxs_debug = []\n for idx in range(n_classes):\n bools = trn_tp_trn.species_id == idx\n trn_idxs_debug.append(trn_tp_trn[bools]['idx_'].values[0])\n\n bools = trn_tp_val.species_id == idx\n val_idxs_debug.append(trn_tp_val[bools]['idx_'].values[0])\n\n return trn_idxs_debug, val_idxs_debug\n\n\ndef set_debug_config(config):\n if config['globals']['debug']:\n logger.info(':: debug mode ::')\n config['globals']['num_epochs'] = 2\n config['split']['n_fold'] = 2\n config['loader']['train']['batch_size'] = 1\n config['loader']['valid']['batch_size'] = 1\n return config\n else:\n return config\n\n\ndef sec2time(sec):\n hour = int(sec//3600)\n minute = int((sec - 3600*hour)//60)\n second = int(sec - 3600*hour - 60*minute)\n\n hour = str(hour).zfill(2)\n minute = str(minute).zfill(2)\n second = str(second).zfill(2)\n str_time = f'{hour}:{minute}:{second}'\n\n return str_time\n\n\ndef LWLRAP(preds, labels):\n '''\n https://github.com/yuki-a4/rfcx-species-audio-detection/blob/main/yuki/notebook/ex_059_resnest_changeLoss_lr_0.15_aug0.3_seed239.ipynb\n '''\n # st()\n # device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n preds, labels = preds.to(device), labels.to(device)\n\n # Ranks of the predictions\n ranked_classes = torch.argsort(preds, dim=-1, descending=True)\n # i, j corresponds to rank of prediction in row i\n class_ranks = torch.zeros_like(ranked_classes)\n for i in range(ranked_classes.size(0)):\n for j in range(ranked_classes.size(1)):\n class_ranks[i, ranked_classes[i][j]] = j + 1\n # Mask out to only use the ranks of relevant GT labels\n ground_truth_ranks = class_ranks * labels + (1e6) * (1 - labels)\n # All the GT ranks are in front now\n sorted_ground_truth_ranks, _ = torch.sort(ground_truth_ranks,\n dim=-1, descending=False)\n # Number of GT labels per instance\n # num_labels = labels.sum(-1)\n pos_matrix = torch.tensor(\n np.array([i+1 for i in range(labels.size(-1))])).unsqueeze(0)\n pos_matrix = pos_matrix.to(device)\n sorted_ground_truth_ranks = sorted_ground_truth_ranks.to(device)\n score_matrix = pos_matrix / sorted_ground_truth_ranks\n score_mask_matrix, _ = torch.sort(labels, dim=-1, descending=True)\n scores = score_matrix * score_mask_matrix\n score = scores.sum() / labels.sum()\n return score.item()\n",
"from ipdb import set_trace as st\nfrom icecream import ic\nimport gc\nimport os\nimport wandb\nimport pandas as pd\nfrom fastprogress import progress_bar\nfrom loguru import logger\nimport numpy as np\nimport torch\nfrom sklearn.metrics import accuracy_score\n\nimport utils as U\nimport configuration as C\nimport result_handler as rh\nfrom criterion import mixup_criterion\nfrom early_stopping import EarlyStopping\n\n\ndef train_cv(config):\n # config\n debug = config['globals']['debug']\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_fold = config['split']['n_fold']\n n_epoch = config['globals']['num_epochs']\n path_trn_tp = config['path']['path_train_tp']\n n_classes = config['model']['params']['n_classes']\n dir_save_exp, dir_save_ignore_exp, _ = U.get_save_dir_exp(config)\n\n # load data\n pwd = os.path.dirname(os.path.abspath(__file__))\n trn_tp = pd.read_csv(f'{pwd}/{path_trn_tp}')\n\n # init\n acc_val_folds = []\n lwlrap_val_folds = []\n if debug:\n oof_sig = np.zeros([n_classes*n_fold, n_classes])\n else:\n oof_sig = np.zeros([len(trn_tp), n_classes])\n for i_fold in progress_bar(range(n_fold)):\n # logger\n logger.info(\"-\" * 18)\n logger.info(f'\\tFold {i_fold + 1}/{n_fold}')\n logger.info(\"-\" * 18)\n\n # preparation\n model = C.get_model(config).to(device)\n criterion = C.get_criterion(config)\n optimizer = C.get_optimizer(model, config)\n scheduler = C.get_scheduler(optimizer, config)\n _, _, exp_name = U.get_save_dir_exp(config)\n\n # wandb\n wb_fold = wandb.init(project='kaggle-rfcx',\n group=exp_name,\n name=f'fold{i_fold}')\n wb_fold.config.config = config\n\n epochs = []\n losses_trn = []\n losses_val = []\n accs_val = []\n lwlraps_val = []\n best_acc_val = 0\n best_lwlrap_val = 0\n best_loss_val = 0\n best_output_sig = 0\n save_path = f'{dir_save_ignore_exp}/'\\\n f'{model.__class__.__name__}_fold{i_fold}.pth'\n early_stopping = EarlyStopping(patience=12,\n verbose=True,\n path=save_path,\n trace_func=logger.info)\n for epoch in range(1, n_epoch+1):\n # 学習を行う\n result_dict = train_fold(i_fold, trn_tp, model,\n criterion, optimizer,\n scheduler, config)\n val_idxs = result_dict['val_idxs']\n output_sig = result_dict['output_sig']\n loss_trn = result_dict['loss_trn']\n loss_val = result_dict['loss_val']\n acc_val = result_dict['acc_val']\n lwlrap_val = result_dict['lwlrap_val']\n logger.info(f'[fold({i_fold+1})epoch({epoch})]'\n f'loss_trn={loss_trn:.6f} '\n f'loss_val={loss_val:.6f} '\n f'acc_val={acc_val:.6f} '\n f'lwlrap_val={lwlrap_val:.6f}')\n wb_fold.log({'epoch': epoch,\n 'loss_trn': loss_trn,\n 'loss_val': loss_val,\n 'acc_val': acc_val,\n 'lwlrap_val': lwlrap_val})\n\n # 格納\n epochs.append(int(epoch))\n losses_trn.append(loss_trn)\n losses_val.append(loss_val)\n accs_val.append(acc_val)\n lwlraps_val.append(lwlrap_val)\n\n # best model ?\n is_update = early_stopping(loss_val, result_dict['model'], debug)\n if is_update:\n best_loss_val = loss_val\n best_acc_val = acc_val\n best_lwlrap_val = lwlrap_val\n best_output_sig = output_sig\n wb_fold.summary['loss_val'] = best_loss_val\n wb_fold.summary['acc_val'] = best_acc_val\n wb_fold.summary['lwlrap_val'] = best_lwlrap_val\n\n if early_stopping.early_stop:\n logger.info(\"Early stopping\")\n break\n wb_fold.finish()\n # result\n rh.save_plot_figure(i_fold, epochs, losses_trn, accs_val, lwlraps_val,\n losses_val, dir_save_exp)\n rh.save_result_csv(i_fold, best_loss_val, best_acc_val, best_lwlrap_val,\n dir_save_exp, config)\n\n # --- fold end ---\n # oof_sig\n acc_val_folds.append(best_acc_val)\n lwlrap_val_folds.append(best_lwlrap_val)\n if debug:\n oof_sig[i_fold*n_classes:(i_fold+1)*n_classes] = best_output_sig\n else:\n oof_sig[val_idxs, :] = best_output_sig\n logger.info(f'best_loss_val: {best_loss_val:.6f}, '\n f'best_acc_val: {best_acc_val:.6f}, '\n f'best_lwlrap_val: {best_lwlrap_val:.6f}')\n\n oof = np.argmax(oof_sig, axis=1)\n oof_sig = torch.tensor(oof_sig)\n labels = np.zeros([len(oof), 24], dtype=int)\n if debug:\n # 適当な値を答えとする\n labels[:, 0] = 1\n labels = torch.tensor(labels)\n acc_oof = accuracy_score(np.zeros(len(oof)), oof)\n lwlrap_oof = U.LWLRAP(oof_sig, labels)\n else:\n for i_id, id_ in enumerate(trn_tp['species_id'].values):\n labels[i_id][id_] = 1\n labels = torch.tensor(labels)\n acc_oof = accuracy_score(trn_tp['species_id'].values, oof)\n lwlrap_oof = U.LWLRAP(oof_sig, labels)\n\n # acc_val_folds\n acc_val_folds_mean = np.mean(acc_val_folds)\n acc_val_folds_std = np.std(acc_val_folds)\n logger.info(f'acc_folds(mean, std): '\n f'{acc_val_folds_mean:.6f} +- {acc_val_folds_std:6f}')\n logger.info(f'acc_oof: {acc_oof:6f}')\n\n # lwlrap_val_folds\n lwlrap_val_folds_mean = np.mean(lwlrap_val_folds)\n lwlrap_val_folds_std = np.std(lwlrap_val_folds)\n logger.info(f'lwlrap_folds(mean, std): '\n f'{lwlrap_val_folds_mean:.6f} +- {lwlrap_val_folds_std:6f}')\n logger.info(f'lwlrap_oof: {lwlrap_oof:6f}')\n\n # wandb\n wb_summary = wandb.init(project='kaggle-rfcx',\n group=exp_name,\n name='summary')\n wb_summary.config.config = config\n wb_summary.log({'acc_val_folds_mean': acc_val_folds_mean,\n 'acc_val_folds_std': acc_val_folds_std,\n 'acc_oof': acc_oof,\n 'lwlrap_val_folds_mean': lwlrap_val_folds_mean,\n 'lwlrap_val_folds_std': lwlrap_val_folds_std,\n 'lwlrap_oof': lwlrap_oof})\n wb_summary.finish()\n\n # 開放\n del result_dict\n del model\n del optimizer\n del scheduler\n gc.collect()\n torch.cuda.empty_cache()\n\n\ndef train_fold(i_fold, trn_tp, model,\n criterion, optimizer,\n scheduler, config):\n mixup = config['globals']['mixup']\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n trn_idxs, val_idxs = C.get_index_fold(trn_tp, i_fold, config)\n trn_tp_trn = trn_tp.iloc[trn_idxs].reset_index(drop=True)\n trn_tp_val = trn_tp.iloc[val_idxs].reset_index(drop=True)\n trn_loader = C.get_trn_val_loader(trn_tp_trn, 'train', config)\n val_loader = C.get_trn_val_loader(trn_tp_val, 'valid', config)\n\n # train\n model.train()\n epoch_train_loss = 0\n for batch_idx, (data, target) in enumerate(trn_loader):\n data, target = data.to(device), target.to(device)\n if mixup:\n data, targets_a, targets_b, lam = U.mixup_data(data,\n target,\n alpha=1.0)\n optimizer.zero_grad()\n output = model(data)\n if mixup:\n loss = mixup_criterion(criterion, output,\n targets_a, targets_b, lam)\n else:\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n epoch_train_loss += loss.item()*data.size(0)\n scheduler.step()\n loss_trn = epoch_train_loss / len(trn_loader.dataset)\n del data\n\n # eval valid\n loss_val, acc_val, lwlrap_val, output_sig = get_loss_score(model,\n val_loader,\n criterion,\n device)\n\n result_dict = {\n 'model': model,\n 'val_idxs': val_idxs,\n 'output_sig': output_sig,\n 'loss_trn': loss_trn,\n 'loss_val': loss_val,\n 'acc_val': acc_val,\n 'lwlrap_val': lwlrap_val\n }\n return result_dict\n\n\ndef get_loss_score(model, val_loader, criterion, device):\n model.eval()\n epoch_valid_loss = 0\n y_pred_list = []\n y_true_list = []\n output_sig_list = []\n lwlrap_val = 0\n for batch_idx, (data, target) in enumerate(val_loader):\n data, target = data.to(device), target.to(device)\n output = model(data)\n loss = criterion(output, target)\n epoch_valid_loss += loss.item()*data.size(0)\n\n output_ = output['output']\n output_sig = output['output_sigmoid']\n output_sig = output_sig.detach().cpu().numpy()\n _y_pred = output_.detach().cpu().numpy().argmax(axis=1)\n _y_true = target.detach().cpu().numpy().argmax(axis=1)\n y_pred_list.append(_y_pred)\n y_true_list.append(_y_true)\n output_sig_list.append(output_sig)\n lwlrap_val += U.LWLRAP(output_, target) / len(val_loader)\n\n loss_val = epoch_valid_loss / len(val_loader.dataset)\n y_pred = np.concatenate(y_pred_list, axis=0)\n y_true = np.concatenate(y_true_list, axis=0)\n output_sig = np.concatenate(output_sig_list, axis=0)\n acc_val = accuracy_score(y_true, y_pred)\n del data\n return loss_val, acc_val, lwlrap_val, output_sig\n"
] |
[
[
"numpy.random.beta",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.randperm",
"torch.manual_seed",
"torch.zeros_like",
"torch.sort",
"torch.cuda.is_available",
"torch.argsort"
],
[
"pandas.read_csv",
"torch.cuda.empty_cache",
"torch.tensor",
"numpy.concatenate",
"numpy.std",
"numpy.argmax",
"numpy.mean",
"torch.cuda.is_available",
"numpy.zeros",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Raashil/EasyProgrammingPython
|
[
"8d64ae8f10e1bb207cd1b314096375573c7c9cb6"
] |
[
"Covid_project/app.py"
] |
[
"import pandas as pd\r\nimport folium\r\nfrom flask import Flask,render_template\r\n\r\ndef find_top_confirmed(n = 15):\r\n \r\n corona_df=pd.read_csv(\"covid-19-dataset-2.csv\")\r\n by_country = corona_df.groupby('Country').sum()[['Confirmed', 'Deaths', 'Recovered', 'Active']]\r\n cdf = by_country.nlargest(n, 'Confirmed')[['Confirmed']]\r\n return cdf\r\ncdf=find_top_confirmed()\r\npairs=[(country,confirmed) for country,confirmed in zip(cdf.index,cdf['Confirmed'])]\r\n\r\n\r\ncorona_df = pd.read_csv(\"covid-19-dataset-2.csv\")\r\ncorona_df=corona_df[['Lat','Long_','Confirmed']]\r\ncorona_df=corona_df.dropna()\r\nm=folium.Map(location=[34.223334,-82.461707],\r\n tiles='Stamen toner',\r\n zoom_start=8)\r\ndef circle_maker(x):\r\n folium.Circle(location=[x[0],x[1]],\r\n radius=float(x[2]),\r\n color=\"red\",\r\n popup='confirmed cases:{}'.format(x[2])).add_to(m)\r\ncorona_df.apply(lambda x:circle_maker(x),axis=1)\r\nhtml_map=m._repr_html_()\r\n\r\napp=Flask(__name__)\r\[email protected]('/')\r\ndef home():\r\n return render_template(\"home.html\",table=cdf, cmap=html_map,pairs=pairs)\r\nif __name__==\"__main__\":\r\n app.run(debug=True)"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
immortal-autumn/Siamese-pytorch
|
[
"f8f10cb74860a808f3230b3caa6ca53a216c73be"
] |
[
"analyse.py"
] |
[
"import json\n\nimport matplotlib.pyplot as plt\n\nwith open('output.json', 'r') as f:\n result = json.load(f)\n\n# plotting = {}\nindexs = []\nlosses = []\nval_losses = []\naccuracies = []\n\nfor k, v in result.items():\n name = k.replace(\".pth\", \"\").split('-')\n index = int(name[0][2:])\n loss = float(name[1].replace(\"loss\", \"\"))\n val_loss = float(name[2].replace(\"val_loss\", \"\"))\n accuracy = v[\"accuracy\"]\n # Add to list\n indexs.append(index)\n losses.append(loss)\n val_losses.append(val_loss)\n accuracies.append(accuracy)\n # plotting[index] = {\n # \"loss\": loss,\n # \"val_loss\": val_loss,\n # \"accuracy\": accuracy,\n # }\n\n print(index, loss, val_loss, accuracy)\n\nprint(f\"Max Loss: {max(losses)}\\nMin Loss: {min(losses)}\")\nprint(f\"Max Validation Loss: {max(val_losses)}\\nMin Validation Loss: {min(val_losses)}\")\nprint(f\"Max Accuracy: {max(accuracies)} - {accuracies.index(max(accuracies))}\"\n f\"\\nMin Accuracy: {min(accuracies)} - {accuracies.index(min(accuracies))}\")\n\nplt.xlabel('Index')\nplt.ylabel('Value')\n# for k, v in plotting.items():\n# plt.plot(k, v['loss'])\nplt.plot(indexs, losses, label=\"loss\")\nplt.plot(indexs, val_losses, label=\"validation_loss\")\nplt.plot(indexs, accuracies, label=\"accuracy\")\n# print((k, v['loss']) for k, v in plotting.items())\nplt.legend()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Echo-CBT/ActivitySuggestion
|
[
"e4ac68aa3f1d4a45d2f65145d5c424067fba1394"
] |
[
"corelate.py"
] |
[
"import pandas as pd\nimport json\nfrom sklearn import preprocessing\nimport numpy as np\n\ndummy_data = json.dumps({\n \"indexKeys\": [\"feeling\", \"jogging\", \"exercise\", \"meditate\", \"paint\", \"read\", \"code\", \"games\", \"socialize\", \"socialMedia\", \"goodSleep\", \"badSleep\", \"moderateSleep\", \"netflix\", \"smoke\", \"junkFood\"],\n \"record\": [\n [4, 0, 2, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n [4, 0, 2, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0]\n ]\n})\n\n\ndef find_corelations(json_data):\n records = json_data[\"record\"]\n columns = json_data[\"indexKeys\"]\n\n min_max_scaler = preprocessing.MinMaxScaler()\n records = np.array(records).transpose()\n records = min_max_scaler.fit_transform(records)\n records = records.transpose()\n \n df = pd.DataFrame(records, columns=columns)\n # print(df)\n\n corrMatrix = df.corr()\n corr = np.array(corrMatrix)[0] \n # print(corr)\n return corr\n \nif __name__ == \"__main__\":\n find_corelations(dummy_data)\n"
] |
[
[
"numpy.array",
"sklearn.preprocessing.MinMaxScaler",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mhelabd/ai-ethicist
|
[
"ac3efad32c2ec34df949c8facf139e33d250db87",
"ac3efad32c2ec34df949c8facf139e33d250db87",
"ac3efad32c2ec34df949c8facf139e33d250db87"
] |
[
"ai_economist/foundation/base/world.py",
"ai_economist/foundation/scenarios/covid19/covid19_env.py",
"ai_economist/foundation/scenarios/simple_wood_and_stone/layout_from_file.py"
] |
[
"# Copyright (c) 2020, salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root\n# or https://opensource.org/licenses/BSD-3-Clause\n\nimport numpy as np\n\nfrom ai_economist.foundation.agents import agent_registry\nfrom ai_economist.foundation.entities import landmark_registry, resource_registry\n\n\nclass Maps:\n \"\"\"Manages the spatial configuration of the world as a set of entity maps.\n\n A maps object is built during world construction, which is a part of environment\n construction. The maps object is accessible through the world object. The maps\n object maintains a map state for each of the spatial entities that are involved\n in the constructed environment (which are determined by the \"required_entities\"\n attributes of the Scenario and Component classes used to build the environment).\n\n The Maps class also implements some of the basic spatial logic of the game,\n such as which locations agents can occupy based on other agent locations and\n locations of various landmarks.\n\n Args:\n size (list): A length-2 list specifying the dimensions of the 2D world.\n Interpreted as [height, width].\n n_agents (int): The number of mobile agents (does not include planner).\n world_resources (list): The resources registered during environment\n construction.\n world_landmarks (list): The landmarks registered during environment\n construction.\n \"\"\"\n\n def __init__(self, size, n_agents, world_resources, world_landmarks):\n self.size = size\n self.sz_h, self.sz_w = size\n\n self.n_agents = n_agents\n\n self.resources = world_resources\n self.landmarks = world_landmarks\n self.entities = world_resources + world_landmarks\n\n self._maps = {} # All maps\n self._blocked = [] # Solid objects that no agent can move through\n self._private = [] # Solid objects that only permit movement for parent agents\n self._public = [] # Non-solid objects that agents can move on top of\n self._resources = [] # Non-solid objects that can be collected\n\n self._private_landmark_types = []\n self._resource_source_blocks = []\n\n self._map_keys = []\n\n self._accessibility_lookup = {}\n\n for resource in self.resources:\n resource_cls = resource_registry.get(resource)\n if resource_cls.collectible:\n self._maps[resource] = np.zeros(shape=self.size)\n self._resources.append(resource)\n self._map_keys.append(resource)\n\n self.landmarks.append(\"{}SourceBlock\".format(resource))\n\n for landmark in self.landmarks:\n dummy_landmark = landmark_registry.get(landmark)()\n\n if dummy_landmark.public:\n self._maps[landmark] = np.zeros(shape=self.size)\n self._public.append(landmark)\n self._map_keys.append(landmark)\n\n elif dummy_landmark.blocking:\n self._maps[landmark] = np.zeros(shape=self.size)\n self._blocked.append(landmark)\n self._map_keys.append(landmark)\n self._accessibility_lookup[landmark] = len(self._accessibility_lookup)\n\n elif dummy_landmark.private:\n self._private_landmark_types.append(landmark)\n self._maps[landmark] = dict(\n owner=-np.ones(shape=self.size, dtype=np.int16),\n health=np.zeros(shape=self.size),\n )\n self._private.append(landmark)\n self._map_keys.append(landmark)\n self._accessibility_lookup[landmark] = len(self._accessibility_lookup)\n\n else:\n raise NotImplementedError\n\n self._idx_map = np.stack(\n [i * np.ones(shape=self.size) for i in range(self.n_agents)]\n )\n self._idx_array = np.arange(self.n_agents)\n if self._accessibility_lookup:\n self._accessibility = np.ones(\n shape=[len(self._accessibility_lookup), self.n_agents] + self.size,\n dtype=bool,\n )\n self._net_accessibility = None\n else:\n self._accessibility = None\n self._net_accessibility = np.ones(\n shape=[self.n_agents] + self.size, dtype=bool\n )\n\n self._agent_locs = [None for _ in range(self.n_agents)]\n self._unoccupied = np.ones(self.size, dtype=bool)\n\n def clear(self, entity_name=None):\n \"\"\"Clear resource and landmark maps.\"\"\"\n if entity_name is not None:\n assert entity_name in self._maps\n if entity_name in self._private_landmark_types:\n self._maps[entity_name] = dict(\n owner=-np.ones(shape=self.size, dtype=np.int16),\n health=np.zeros(shape=self.size),\n )\n else:\n self._maps[entity_name] *= 0\n\n else:\n for name in self.keys():\n self.clear(entity_name=name)\n\n if self._accessibility is not None:\n self._accessibility = np.ones_like(self._accessibility)\n self._net_accessibility = None\n\n def clear_agent_loc(self, agent=None):\n \"\"\"Remove agents or agent from the world map.\"\"\"\n # Clear all agent locations\n if agent is None:\n self._agent_locs = [None for _ in range(self.n_agents)]\n self._unoccupied[:, :] = 1\n\n # Clear the location of the provided agent\n else:\n i = agent.idx\n if self._agent_locs[i] is None:\n return\n r, c = self._agent_locs[i]\n self._unoccupied[r, c] = 1\n self._agent_locs[i] = None\n\n def set_agent_loc(self, agent, r, c):\n \"\"\"Set the location of agent to [r, c].\n\n Note:\n Things might break if you set the agent's location to somewhere it\n cannot access. Don't do that.\n \"\"\"\n assert (0 <= r < self.size[0]) and (0 <= c < self.size[1])\n i = agent.idx\n # If the agent is currently on the board...\n if self._agent_locs[i] is not None:\n curr_r, curr_c = self._agent_locs[i]\n # If the agent isn't actually moving, just return\n if (curr_r, curr_c) == (r, c):\n return\n # Make the location the agent is currently at as unoccupied\n # (since the agent is going to move)\n self._unoccupied[curr_r, curr_c] = 1\n\n # Set the agent location to the specified coordinates\n # and update the occupation map\n agent.state[\"loc\"] = [r, c]\n self._agent_locs[i] = [r, c]\n self._unoccupied[r, c] = 0\n\n def keys(self):\n \"\"\"Return an iterable over map keys.\"\"\"\n return self._maps.keys()\n\n def values(self):\n \"\"\"Return an iterable over map values.\"\"\"\n return self._maps.values()\n\n def items(self):\n \"\"\"Return an iterable over map (key, value) pairs.\"\"\"\n return self._maps.items()\n\n def get(self, entity_name, owner=False):\n \"\"\"Return the map or ownership for entity_name.\"\"\"\n assert entity_name in self._maps\n if entity_name in self._private_landmark_types:\n sub_key = \"owner\" if owner else \"health\"\n return self._maps[entity_name][sub_key]\n return self._maps[entity_name]\n\n def set(self, entity_name, map_state):\n \"\"\"Set the map for entity_name.\"\"\"\n if entity_name in self._private_landmark_types:\n assert \"owner\" in map_state\n assert self.get(entity_name, owner=True).shape == map_state[\"owner\"].shape\n assert \"health\" in map_state\n assert self.get(entity_name, owner=False).shape == map_state[\"health\"].shape\n\n h = np.maximum(0.0, map_state[\"health\"])\n o = map_state[\"owner\"].astype(np.int16)\n\n o[h <= 0] = -1\n tmp = o[h > 0]\n if len(tmp) > 0:\n assert np.min(tmp) >= 0\n\n self._maps[entity_name] = dict(owner=o, health=h)\n\n owned_by_agent = o[None] == self._idx_map\n owned_by_none = o[None] == -1\n self._accessibility[\n self._accessibility_lookup[entity_name]\n ] = np.logical_or(owned_by_agent, owned_by_none)\n self._net_accessibility = None\n\n else:\n assert self.get(entity_name).shape == map_state.shape\n self._maps[entity_name] = np.maximum(0, map_state)\n\n if entity_name in self._blocked:\n self._accessibility[\n self._accessibility_lookup[entity_name]\n ] = np.repeat(map_state[None] == 0, self.n_agents, axis=0)\n self._net_accessibility = None\n\n def set_add(self, entity_name, map_state):\n \"\"\"Add map_state to the existing map for entity_name.\"\"\"\n assert entity_name not in self._private_landmark_types\n self.set(entity_name, self.get(entity_name) + map_state)\n\n def get_point(self, entity_name, r, c, **kwargs):\n \"\"\"Return the entity state at the specified coordinates.\"\"\"\n point_map = self.get(entity_name, **kwargs)\n return point_map[r, c]\n\n def set_point(self, entity_name, r, c, val, owner=None):\n \"\"\"Set the entity state at the specified coordinates.\"\"\"\n if entity_name in self._private_landmark_types:\n assert owner is not None\n h = self._maps[entity_name][\"health\"]\n o = self._maps[entity_name][\"owner\"]\n assert o[r, c] == -1 or o[r, c] == int(owner)\n h[r, c] = np.maximum(0, val)\n if h[r, c] == 0:\n o[r, c] = -1\n else:\n o[r, c] = int(owner)\n\n self._maps[entity_name][\"owner\"] = o\n self._maps[entity_name][\"health\"] = h\n\n self._accessibility[\n self._accessibility_lookup[entity_name], :, r, c\n ] = np.logical_or(o[r, c] == self._idx_array, o[r, c] == -1).astype(bool)\n self._net_accessibility = None\n\n else:\n self._maps[entity_name][r, c] = np.maximum(0, val)\n\n if entity_name in self._blocked:\n self._accessibility[\n self._accessibility_lookup[entity_name]\n ] = np.repeat(np.array([val]) == 0, self.n_agents, axis=0)\n self._net_accessibility = None\n\n def set_point_add(self, entity_name, r, c, value, **kwargs):\n \"\"\"Add value to the existing entity state at the specified coordinates.\"\"\"\n self.set_point(\n entity_name,\n r,\n c,\n value + self.get_point(entity_name, r, c, **kwargs),\n **kwargs\n )\n\n def is_accessible(self, r, c, agent_id):\n \"\"\"Return True if agent with id agent_id can occupy the location [r, c].\"\"\"\n return bool(self.accessibility[agent_id, r, c])\n\n def location_resources(self, r, c):\n \"\"\"Return {resource: health} dictionary for any resources at location [r, c].\"\"\"\n return {\n k: self._maps[k][r, c] for k in self._resources if self._maps[k][r, c] > 0\n }\n\n def location_landmarks(self, r, c):\n \"\"\"Return {landmark: health} dictionary for any landmarks at location [r, c].\"\"\"\n tmp = {k: self.get_point(k, r, c) for k in self.keys()}\n return {k: v for k, v in tmp.items() if k not in self._resources and v > 0}\n\n @property\n def unoccupied(self):\n \"\"\"Return a boolean map indicating which locations are unoccupied.\"\"\"\n return self._unoccupied\n\n @property\n def accessibility(self):\n \"\"\"Return a boolean map indicating which locations are accessible.\"\"\"\n if self._net_accessibility is None:\n self._net_accessibility = self._accessibility.prod(axis=0).astype(bool)\n return self._net_accessibility\n\n @property\n def empty(self):\n \"\"\"Return a boolean map indicating which locations are empty.\n\n Empty locations have no landmarks or resources.\"\"\"\n return self.state.sum(axis=0) == 0\n\n @property\n def state(self):\n \"\"\"Return the concatenated maps of landmark and resources.\"\"\"\n return np.stack([self.get(k) for k in self.keys()]).astype(np.float32)\n\n @property\n def owner_state(self):\n \"\"\"Return the concatenated ownership maps of private landmarks.\"\"\"\n return np.stack(\n [self.get(k, owner=True) for k in self._private_landmark_types]\n ).astype(np.int16)\n\n @property\n def state_dict(self):\n \"\"\"Return a dictionary of the map states.\"\"\"\n return self._maps\n\n\nclass World:\n \"\"\"Manages the environment's spatial- and agent-states.\n\n The world object represents the state of the environment, minus whatever state\n information is implicitly maintained by separate components. The world object\n maintains the spatial state through an instance of the Maps class. Agent states\n are maintained through instances of Agent classes (subclasses of BaseAgent),\n with one such instance for each of the agents in the environment.\n\n The world object is built during the environment construction, after the\n required entities have been registered. As part of the world object construction,\n it instantiates a map object and the agent objects.\n\n The World class adds some functionality for interfacing with the spatial state\n (the maps object) and setting/resetting agent locations. But its function is\n mostly to wrap the stateful, non-component environment objects.\n\n Args:\n world_size (list): A length-2 list specifying the dimensions of the 2D world.\n Interpreted as [height, width].\n n_agents (int): The number of mobile agents (does not include planner).\n world_resources (list): The resources registered during environment\n construction.\n world_landmarks (list): The landmarks registered during environment\n construction.\n multi_action_mode_agents (bool): Whether \"mobile\" agents use multi action mode\n (see BaseEnvironment in base_env.py).\n multi_action_mode_planner (bool): Whether the planner agent uses multi action\n mode (see BaseEnvironment in base_env.py).\n \"\"\"\n\n def __init__(\n self,\n world_size,\n n_agents,\n world_resources,\n world_landmarks,\n multi_action_mode_agents,\n multi_action_mode_planner,\n ):\n self.world_size = world_size\n self.n_agents = n_agents\n self.resources = world_resources\n self.landmarks = world_landmarks\n self.multi_action_mode_agents = bool(multi_action_mode_agents)\n self.multi_action_mode_planner = bool(multi_action_mode_planner)\n self.maps = Maps(world_size, n_agents, world_resources, world_landmarks)\n\n mobile_class = agent_registry.get(\"BasicMobileAgent\")\n planner_class = agent_registry.get(\"BasicPlanner\")\n self._agents = [\n mobile_class(i, multi_action_mode=self.multi_action_mode_agents)\n for i in range(self.n_agents)\n ]\n self._planner = planner_class(multi_action_mode=self.multi_action_mode_planner)\n\n self.timestep = 0\n\n # CUDA-related attributes (for GPU simulations).\n # These will be set via the env_wrapper, if required.\n self.use_cuda = False\n self.cuda_function_manager = None\n self.cuda_data_manager = None\n\n @property\n def agents(self):\n \"\"\"Return a list of the agent objects in the world (sorted by index).\"\"\"\n return self._agents\n\n @property\n def planner(self):\n \"\"\"Return the planner agent object.\"\"\"\n return self._planner\n\n @property\n def loc_map(self):\n \"\"\"Return a map indicating the agent index occupying each location.\n\n Locations with a value of -1 are not occupied by an agent.\n \"\"\"\n idx_map = -np.ones(shape=self.world_size, dtype=np.int16)\n for agent in self.agents:\n r, c = agent.loc\n idx_map[r, c] = int(agent.idx)\n return idx_map\n\n def get_random_order_agents(self):\n \"\"\"The agent list in a randomized order.\"\"\"\n agent_order = np.random.permutation(self.n_agents)\n agents = self.agents\n return [agents[i] for i in agent_order]\n\n def is_valid(self, r, c):\n \"\"\"Return True if the coordinates [r, c] are within the game boundaries.\"\"\"\n return (0 <= r < self.world_size[0]) and (0 <= c < self.world_size[1])\n\n def is_location_accessible(self, r, c, agent):\n \"\"\"Return True if location [r, c] is accessible to agent.\"\"\"\n if not self.is_valid(r, c):\n return False\n return self.maps.is_accessible(r, c, agent.idx)\n\n def can_agent_occupy(self, r, c, agent):\n \"\"\"Return True if location [r, c] is accessible to agent and unoccupied.\"\"\"\n if not self.is_location_accessible(r, c, agent):\n return False\n if self.maps.unoccupied[r, c]:\n return True\n return False\n \n def agent_in_location(self, r, c):\n \"\"\"Return Agent in location [r, c] if one exists otherwise returns -1.\"\"\"\n if not self.is_valid(r, c) or self.maps.unoccupied[r, c]:\n return -1\n for agent in self.agents:\n if agent.state[\"loc\"] == [r, c]:\n return agent\n\n def agent_by_location(self, r, c, agent_idx):\n \"\"\"Return Agent one block around location [r, c] if one exists otherwise returns -1.\"\"\"\n if not self.is_valid(r, c) or self.maps.unoccupied[r, c]:\n return -1\n agents = []\n for agent in self.agents:\n if agent.idx == agent_idx:\n continue\n agent_r, agent_c = agent.state[\"loc\"]\n if np.abs(agent_r - r) <= 1 and np.abs(agent_c - c) <= 1: \n agents.append(agent)\n return -1 if agents == [] else agents\n\n def clear_agent_locs(self):\n \"\"\"Take all agents off the board. Useful for resetting.\"\"\"\n for agent in self.agents:\n agent.state[\"loc\"] = [-1, -1]\n self.maps.clear_agent_loc()\n\n def agent_locs_are_valid(self):\n \"\"\"Returns True if all agent locations comply with world semantics.\"\"\"\n return all(\n self.is_location_accessible(*agent.loc, agent) for agent in self.agents\n )\n\n def set_agent_loc(self, agent, r, c):\n \"\"\"Set the agent's location to coordinates [r, c] if possible.\n\n If agent cannot occupy [r, c], do nothing.\"\"\"\n if self.can_agent_occupy(r, c, agent):\n self.maps.set_agent_loc(agent, r, c)\n return [int(coord) for coord in agent.loc]\n\n def location_resources(self, r, c):\n \"\"\"Return {resource: health} dictionary for any resources at location [r, c].\"\"\"\n if not self.is_valid(r, c):\n return {}\n return self.maps.location_resources(r, c)\n\n def location_landmarks(self, r, c):\n \"\"\"Return {landmark: health} dictionary for any landmarks at location [r, c].\"\"\"\n if not self.is_valid(r, c):\n return {}\n return self.maps.location_landmarks(r, c)\n\n def create_landmark(self, landmark_name, r, c, agent_idx=None):\n \"\"\"Place a landmark on the world map.\n\n Place landmark of type landmark_name at the given coordinates, indicating\n agent ownership if applicable.\"\"\"\n self.maps.set_point(landmark_name, r, c, 1, owner=agent_idx)\n\n def consume_resource(self, resource_name, r, c):\n \"\"\"Consume a unit of resource_name from location [r, c].\"\"\"\n self.maps.set_point_add(resource_name, r, c, -1)\n",
"# Copyright (c) 2021, salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root\n# or https://opensource.org/licenses/BSD-3-Clause\n\nimport json\nimport os\nfrom datetime import datetime, timedelta\n\nimport GPUtil\nimport numpy as np\n\nfrom ai_economist.foundation.base.base_env import BaseEnvironment, scenario_registry\nfrom ai_economist.foundation.utils import verify_activation_code\n\ntry:\n num_gpus_available = len(GPUtil.getAvailable())\n if num_gpus_available == 0:\n # print(\"No GPUs found! Running the simulation on a CPU.\")\n num_gpus_available = 0\n else:\n from warp_drive.utils.constants import Constants\n from warp_drive.utils.data_feed import DataFeed\n\n _OBSERVATIONS = Constants.OBSERVATIONS\n _ACTIONS = Constants.ACTIONS\n _REWARDS = Constants.REWARDS\nexcept ModuleNotFoundError:\n print(\n \"Warning: The 'WarpDrive' package is not found and cannot be used! \"\n \"If you wish to use WarpDrive, please run \"\n \"'pip install rl-warp-drive' first.\"\n )\nexcept ValueError:\n # print(\"No GPUs found! Running the simulation on a CPU.\")\n num_gpus_available = 0\n\n\n@scenario_registry.add\nclass CovidAndEconomyEnvironment(BaseEnvironment):\n \"\"\"\n A simulation to model health and economy dynamics amidst the COVID-19 pandemic.\n The environment comprising 51 agents (each agent corresponding to a US state and\n Washington D.C.) and the Federal Government (planner). The state agents decide the\n stringency level of the policy response to the pandemic, while the federal\n government provides subsidies to eligible individuals.\n\n This simulation makes modeling assumptions. For details, see the technical paper:\n https://arxiv.org/abs/2108.02904\n\n Args:\n use_real_world_data (bool): Replay what happened in the real world.\n Real-world data comprises SIR (susceptible/infected/recovered),\n unemployment, government policy, and vaccination numbers.\n This setting also sets use_real_world_policies=True.\n use_real_world_policies (bool): Run the environment with real-world policies\n (stringency levels and subsidies). With this setting and\n use_real_world_data=False, SIR and economy dynamics are still\n driven by fitted models.\n path_to_data_and_fitted_params (dirpath): Full path to the directory containing\n the data, fitted parameters and model constants. This defaults to\n \"ai_economist/datasets/covid19_datasets/data_and_fitted_params\".\n For details on obtaining these parameters, please see the notebook\n \"ai-economist-foundation/ai_economist/datasets/covid19_datasets/\n gather_real_world_data_and_fit_parameters.ipynb\".\n start_date (string): Date (YYYY-MM-DD) to start the simulation.\n pop_between_age_18_65 (float): Fraction of the population between ages 18-65.\n This is the subset of the population whose employment/unemployment affects\n economic productivity.\n Range: 0 <= pop_between_age_18_65 <= 1.\n infection_too_sick_to_work_rate (float): Fraction of people infected with\n COVID-19. Infected people don't work.\n Range: 0 <= infection_too_sick_to_work_rate <= 1\n risk_free_interest_rate (float): Percentage of interest paid by the federal\n government to borrow money from the federal reserve for COVID-19 relief\n (direct payments). Higher interest rates mean that direct payments\n have a larger cost on the federal government's economic index.\n Range: 0 <= risk_free_interest_rate\n economic_reward_crra_eta (float): CRRA eta parameter for modeling the economic\n reward non-linearity.\n A useful reference: https://en.wikipedia.org/wiki/Isoelastic_utility\n Range: 0 <= economic_reward_crra_eta\n health_priority_scaling_agents (float): A factor indicating how much more the\n states prioritize health (roughly speaking, loss of lives due to\n opening up more) over the economy (roughly speaking, a loss in GDP\n due to shutting down resulting in more unemployment) compared to the\n real-world.\n For example, a value of 1 corresponds to the real-world, while\n a value of 2 means that states cared twice as much about public health\n (preventing deaths), while a value of 0.5 means that states cared twice\n as much about the economy (preventing GDP drops).\n Range: 0 <= health_priority_scaling_agents\n health_priority_scaling_planner (float): same as above,\n but for the federal government.\n Range: 0 <= health_priority_scaling_planner\n \"\"\"\n\n def __init__(\n self,\n *base_env_args,\n use_real_world_data=False,\n use_real_world_policies=False,\n path_to_data_and_fitted_params=\"\",\n start_date=\"2020-03-22\",\n pop_between_age_18_65=0.6,\n infection_too_sick_to_work_rate=0.1,\n risk_free_interest_rate=0.03,\n economic_reward_crra_eta=2,\n health_priority_scaling_agents=1,\n health_priority_scaling_planner=1,\n reward_normalization_factor=1,\n **base_env_kwargs,\n ):\n verify_activation_code()\n\n # Used for datatype checks\n self.np_float_dtype = np.float32\n self.np_int_dtype = np.int32\n\n # Flag to use real-world data or the fitted models instead\n self.use_real_world_data = use_real_world_data\n # Flag to use real-world policies (actions) or the supplied actions instead\n self.use_real_world_policies = use_real_world_policies\n\n # If we use real-world data, we also want to use the real-world policies\n if self.use_real_world_data:\n print(\n \"Using real-world data to initialize as well as to \"\n \"step through the env.\"\n )\n # Note: under this setting, the real_world policies are also used.\n assert self.use_real_world_policies, (\n \"Since the env. config. 'use_real_world_data' is True, please also \"\n \"set 'use_real_world_policies' to True.\"\n )\n else:\n print(\n \"Using the real-world data to only initialize the env, \"\n \"and using the fitted models to step through the env.\"\n )\n\n # Load real-world date\n if path_to_data_and_fitted_params == \"\":\n current_dir = os.path.dirname(__file__)\n self.path_to_data_and_fitted_params = os.path.join(\n current_dir, \"../../../datasets/covid19_datasets/data_and_fitted_params\"\n )\n else:\n self.path_to_data_and_fitted_params = path_to_data_and_fitted_params\n\n print(\n \"Loading real-world data from {}\".format(\n self.path_to_data_and_fitted_params\n )\n )\n real_world_data_npz = np.load(\n os.path.join(self.path_to_data_and_fitted_params, \"real_world_data.npz\")\n )\n self._real_world_data = {}\n for key in list(real_world_data_npz):\n self._real_world_data[key] = real_world_data_npz[key]\n\n # Load fitted parameters\n print(\n \"Loading fit parameters from {}\".format(self.path_to_data_and_fitted_params)\n )\n self.load_model_constants(self.path_to_data_and_fitted_params)\n self.load_fitted_params(self.path_to_data_and_fitted_params)\n\n try:\n self.start_date = datetime.strptime(start_date, self.date_format)\n except ValueError:\n print(f\"Incorrect data format, should be {self.date_format}\")\n\n # Start date should be beyond the date for which data is available\n assert self.start_date >= self.policy_start_date\n\n # Compute a start date index based on policy start date\n self.start_date_index = (self.start_date - self.policy_start_date).days\n assert 0 <= self.start_date_index < len(self._real_world_data[\"policy\"])\n\n # For date logging (This will be overwritten in additional_reset_steps;\n # see below)\n self.current_date = None\n\n # When using real-world policy, limit the episode length\n # to the length of the available policy.\n if self.use_real_world_policies:\n real_world_policy_length = (\n len(self._real_world_data[\"policy\"]) - self.start_date_index\n )\n print(\"Using real-world policies, ignoring external action inputs.\")\n assert base_env_kwargs[\"episode_length\"] <= real_world_policy_length, (\n f\"The real-world policies are only available for \"\n f\"{real_world_policy_length} timesteps; so the 'episode_length' \"\n f\"in the environment configuration can only be at most \"\n f\"{real_world_policy_length}\"\n )\n else:\n print(\"Using external action inputs.\")\n\n # US states and populations\n self.num_us_states = len(self.us_state_population)\n\n assert (\n base_env_kwargs[\"n_agents\"] == self.num_us_states\n ), \"n_agents should be set to the number of US states, i.e., {}.\".format(\n self.num_us_states\n )\n # Note: For a faster environment step time, we collate all the individual agents\n # into a single agent index \"a\" and we flatten the component action masks too.\n assert base_env_kwargs[\n \"collate_agent_step_and_reset_data\"\n ], \"The env. config 'collate_agent_step_and_reset_data' should be set to True.\"\n super().__init__(*base_env_args, **base_env_kwargs)\n\n # Add attributes to self.world for use in components\n self.world.us_state_population = self.us_state_population\n self.world.us_population = self.us_population\n self.world.start_date = self.start_date\n self.world.n_stringency_levels = self.num_stringency_levels\n self.world.use_real_world_policies = self.use_real_world_policies\n if self.use_real_world_policies:\n # Agent open/close stringency levels\n self.world.real_world_stringency_policy = self._real_world_data[\"policy\"][\n self.start_date_index :\n ]\n # Planner subsidy levels\n self.world.real_world_subsidy = self._real_world_data[\"subsidy\"][\n self.start_date_index :\n ]\n\n # Policy --> Unemployment\n # For accurately modeling the state-wise unemployment, we convolve\n # the current stringency policy with a family of exponential filters\n # with separate means (lambdas).\n # This code sets up things we will use in `unemployment_step()`,\n # which includes a detailed breakdown of how the unemployment model is\n # implemented.\n self.stringency_level_history = None\n # Each filter captures a temporally extended response to a stringency change.\n self.num_filters = len(self.conv_lambdas)\n self.f_ts = np.tile(\n np.flip(np.arange(self.filter_len), (0,))[None, None],\n (1, self.num_filters, 1),\n ).astype(self.np_float_dtype)\n self.unemp_conv_filters = np.exp(-self.f_ts / self.conv_lambdas[None, :, None])\n # Each state weights these filters differently.\n self.repeated_conv_weights = np.repeat(\n self.grouped_convolutional_filter_weights.reshape(\n self.num_us_states, self.num_filters\n )[:, :, np.newaxis],\n self.filter_len,\n axis=-1,\n )\n\n # For manually modulating SIR/Unemployment parameters\n self._beta_intercepts_modulation = 1\n self._beta_slopes_modulation = 1\n self._unemployment_modulation = 1\n\n # Economy-related\n # Interest rate for borrowing money from the federal reserve\n self.risk_free_interest_rate = self.np_float_dtype(risk_free_interest_rate)\n\n # Compute each worker's daily productivity when at work (to match 2019 GDP)\n # We assume the open/close stringency policy level was always at it's lowest\n # value (i.e., 1) before the pandemic started.\n num_unemployed_at_stringency_level_1 = self.unemployment_step(\n np.ones(self.num_us_states)\n )\n workforce = (\n self.us_population * pop_between_age_18_65\n - np.sum(num_unemployed_at_stringency_level_1)\n ).astype(self.np_int_dtype)\n workers_per_capita = (workforce / self.us_population).astype(\n self.np_float_dtype\n )\n gdp_per_worker = (self.gdp_per_capita / workers_per_capita).astype(\n self.np_float_dtype\n )\n self.num_days_in_an_year = 365\n self.daily_production_per_worker = (\n gdp_per_worker / self.num_days_in_an_year\n ).astype(self.np_float_dtype)\n\n self.infection_too_sick_to_work_rate = self.np_float_dtype(\n infection_too_sick_to_work_rate\n )\n assert 0 <= self.infection_too_sick_to_work_rate <= 1\n\n self.pop_between_age_18_65 = self.np_float_dtype(pop_between_age_18_65)\n assert 0 <= self.pop_between_age_18_65 <= 1\n\n # Compute max possible productivity values (used for agent reward normalization)\n max_productivity_t = self.economy_step(\n self.us_state_population,\n np.zeros((self.num_us_states), dtype=self.np_int_dtype),\n np.zeros((self.num_us_states), dtype=self.np_int_dtype),\n num_unemployed_at_stringency_level_1,\n infection_too_sick_to_work_rate=self.infection_too_sick_to_work_rate,\n population_between_age_18_65=self.pop_between_age_18_65,\n )\n self.maximum_productivity_t = max_productivity_t\n\n # Economic reward non-linearity\n self.economic_reward_crra_eta = self.np_float_dtype(economic_reward_crra_eta)\n assert 0.0 <= self.economic_reward_crra_eta < 20.0\n\n # Health indices are normalized by maximum annual GDP\n self.agents_health_norm = self.maximum_productivity_t * self.num_days_in_an_year\n self.planner_health_norm = np.sum(self.agents_health_norm)\n\n # Economic indices are normalized by maximum annual GDP\n self.agents_economic_norm = (\n self.maximum_productivity_t * self.num_days_in_an_year\n )\n self.planner_economic_norm = np.sum(self.agents_economic_norm)\n\n def scale_health_over_economic_index(health_priority_scaling, alphas):\n \"\"\"\n Given starting alpha(s), compute new alphas so that the\n resulting alpha:1-alpha ratio is scaled by health_weightage\n \"\"\"\n z = alphas / (1 - alphas) # alphas = z / (1 + z)\n scaled_z = health_priority_scaling * z\n new_alphas = scaled_z / (1 + scaled_z)\n return new_alphas\n\n # Agents' health and economic index weightages\n # fmt: off\n self.weightage_on_marginal_agent_health_index = \\\n scale_health_over_economic_index(\n health_priority_scaling_agents,\n self.inferred_weightage_on_agent_health_index,\n )\n # fmt: on\n assert (\n (self.weightage_on_marginal_agent_health_index >= 0)\n & (self.weightage_on_marginal_agent_health_index <= 1)\n ).all()\n self.weightage_on_marginal_agent_economic_index = (\n 1 - self.weightage_on_marginal_agent_health_index\n )\n\n # Planner's health and economic index weightages\n # fmt: off\n self.weightage_on_marginal_planner_health_index = \\\n scale_health_over_economic_index(\n health_priority_scaling_planner,\n self.inferred_weightage_on_planner_health_index,\n )\n # fmt: on\n assert 0 <= self.weightage_on_marginal_planner_health_index <= 1\n self.weightage_on_marginal_planner_economic_index = (\n 1 - self.weightage_on_marginal_planner_health_index\n )\n\n # Normalization factor for the reward (often useful for RL training)\n self.reward_normalization_factor = reward_normalization_factor\n\n # CUDA-related attributes (for GPU simulations)\n # Note: these will be set / overwritten via the env_wrapper\n # use_cuda will be set to True (by the env_wrapper), if needed\n # to be simulated on the GPU\n self.use_cuda = False\n self.cuda_data_manager = None\n self.cuda_function_manager = None\n self.cuda_step = lambda *args, **kwargs: None\n self.cuda_compute_reward = lambda *args, **kwargs: None\n\n # Adding use_cuda to self.world for use in components\n self.world.use_cuda = self.use_cuda\n self.world.cuda_data_manager = self.cuda_data_manager\n self.world.cuda_function_manager = self.cuda_function_manager\n\n name = \"CovidAndEconomySimulation\"\n agent_subclasses = [\"BasicMobileAgent\", \"BasicPlanner\"]\n\n required_entities = []\n\n def reset_starting_layout(self):\n pass\n\n def reset_agent_states(self):\n self.world.clear_agent_locs()\n\n def get_data_dictionary(self):\n \"\"\"\n Create a dictionary of data to push to the GPU (device).\n \"\"\"\n data_dict = DataFeed()\n # Global States\n data_dict.add_data(\n name=\"susceptible\",\n data=self.world.global_state[\"Susceptible\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"infected\",\n data=self.world.global_state[\"Infected\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"recovered\",\n data=self.world.global_state[\"Recovered\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"deaths\",\n data=self.world.global_state[\"Deaths\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"unemployed\",\n data=self.world.global_state[\"Unemployed\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"vaccinated\",\n data=self.world.global_state[\"Vaccinated\"],\n save_copy_and_apply_at_reset=True,\n )\n # Actions\n data_dict.add_data(\n name=\"stringency_level\",\n data=self.world.global_state[\"Stringency Level\"].astype(self.np_int_dtype),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"subsidy_level\",\n data=self.world.global_state[\"Subsidy Level\"].astype(self.np_int_dtype),\n save_copy_and_apply_at_reset=True,\n )\n # Economy-related\n data_dict.add_data(\n name=\"subsidy\",\n data=self.world.global_state[\"Subsidy\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"postsubsidy_productivity\",\n data=self.world.global_state[\"Postsubsidy Productivity\"],\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"productivity\",\n data=np.zeros_like(\n self.world.global_state[\"Susceptible\"], dtype=self.np_float_dtype\n ),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"incapacitated\",\n data=np.zeros((self.num_us_states), dtype=self.np_float_dtype),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"cant_work\",\n data=np.zeros((self.num_us_states), dtype=self.np_float_dtype),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"num_people_that_can_work\",\n data=np.zeros((self.num_us_states), dtype=self.np_float_dtype),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"us_state_population\",\n data=self.us_state_population,\n )\n data_dict.add_data(\n name=\"infection_too_sick_to_work_rate\",\n data=self.infection_too_sick_to_work_rate,\n )\n data_dict.add_data(\n name=\"population_between_age_18_65\",\n data=self.pop_between_age_18_65,\n )\n data_dict.add_data(\n name=\"daily_production_per_worker\",\n data=self.daily_production_per_worker,\n )\n data_dict.add_data(\n name=\"maximum_productivity\",\n data=self.maximum_productivity_t,\n )\n # SIR-related\n data_dict.add_data(\n name=\"real_world_stringency_policy_history\",\n data=(\n self._real_world_data[\"policy\"][\n self.start_date_index - self.beta_delay + 1 : self.start_date_index,\n :,\n ]\n ).astype(self.np_int_dtype),\n )\n data_dict.add_data(\n name=\"beta_delay\",\n data=self.beta_delay,\n )\n data_dict.add_data(\n name=\"beta_slopes\",\n data=self.beta_slopes,\n )\n data_dict.add_data(\n name=\"beta_intercepts\",\n data=self.beta_intercepts,\n )\n data_dict.add_data(\n name=\"beta\",\n data=np.zeros((self.num_us_states), dtype=self.np_float_dtype),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"gamma\",\n data=self.gamma,\n )\n data_dict.add_data(\n name=\"death_rate\",\n data=self.death_rate,\n )\n # Unemployment fit parameters\n data_dict.add_data(\n name=\"filter_len\",\n data=self.filter_len,\n )\n data_dict.add_data(\n name=\"num_filters\",\n data=self.num_filters,\n )\n data_dict.add_data(\n name=\"delta_stringency_level\",\n data=(\n self.stringency_level_history[1:] - self.stringency_level_history[:-1]\n ).astype(self.np_int_dtype),\n save_copy_and_apply_at_reset=True,\n )\n data_dict.add_data(\n name=\"grouped_convolutional_filter_weights\",\n data=self.grouped_convolutional_filter_weights,\n )\n data_dict.add_data(\n name=\"unemp_conv_filters\",\n data=self.unemp_conv_filters,\n )\n data_dict.add_data(\n name=\"unemployment_bias\",\n data=self.unemployment_bias,\n )\n data_dict.add_data(\n name=\"signal\",\n data=np.zeros(\n (self.n_agents, self.num_filters, self.filter_len),\n dtype=self.np_float_dtype,\n ),\n save_copy_and_apply_at_reset=True,\n )\n # Reward-related\n data_dict.add_data(\n name=\"min_marginal_agent_health_index\",\n data=self.min_marginal_agent_health_index,\n )\n data_dict.add_data(\n name=\"max_marginal_agent_health_index\",\n data=self.max_marginal_agent_health_index,\n )\n data_dict.add_data(\n name=\"min_marginal_agent_economic_index\",\n data=self.min_marginal_agent_economic_index,\n )\n data_dict.add_data(\n name=\"max_marginal_agent_economic_index\",\n data=self.max_marginal_agent_economic_index,\n )\n data_dict.add_data(\n name=\"min_marginal_planner_health_index\",\n data=self.min_marginal_planner_health_index,\n )\n data_dict.add_data(\n name=\"max_marginal_planner_health_index\",\n data=self.max_marginal_planner_health_index,\n )\n data_dict.add_data(\n name=\"min_marginal_planner_economic_index\",\n data=self.min_marginal_planner_economic_index,\n )\n data_dict.add_data(\n name=\"max_marginal_planner_economic_index\",\n data=self.max_marginal_planner_economic_index,\n )\n data_dict.add_data(\n name=\"weightage_on_marginal_agent_health_index\",\n data=self.weightage_on_marginal_agent_health_index,\n )\n data_dict.add_data(\n name=\"weightage_on_marginal_agent_economic_index\",\n data=self.weightage_on_marginal_agent_economic_index,\n )\n data_dict.add_data(\n name=\"weightage_on_marginal_planner_health_index\",\n data=self.weightage_on_marginal_planner_health_index,\n )\n data_dict.add_data(\n name=\"weightage_on_marginal_planner_economic_index\",\n data=self.weightage_on_marginal_planner_economic_index,\n )\n data_dict.add_data(\n name=\"value_of_life\",\n data=self.value_of_life,\n )\n data_dict.add_data(\n name=\"economic_reward_crra_eta\",\n data=self.economic_reward_crra_eta,\n )\n data_dict.add_data(\n name=\"num_days_in_an_year\",\n data=self.num_days_in_an_year,\n )\n data_dict.add_data(\n name=\"risk_free_interest_rate\",\n data=self.risk_free_interest_rate,\n )\n data_dict.add_data(\n name=\"agents_health_norm\",\n data=self.agents_health_norm,\n )\n data_dict.add_data(\n name=\"agents_economic_norm\",\n data=self.agents_economic_norm,\n )\n data_dict.add_data(\n name=\"planner_health_norm\",\n data=self.planner_health_norm,\n )\n data_dict.add_data(\n name=\"planner_economic_norm\",\n data=self.planner_economic_norm,\n )\n\n return data_dict\n\n def get_tensor_dictionary(self):\n \"\"\"\n Create a dictionary of (Pytorch-accessible) data to push to the GPU (device).\n \"\"\"\n tensor_dict = DataFeed()\n return tensor_dict\n\n def scenario_step(self):\n \"\"\"\n Update the state of the USA based on the Covid-19 and Economy dynamics.\n This internally implements three steps\n - sir_step() - updates the susceptible, infected, recovered, deaths\n and vaccination numbers based on the SIR equations\n - unemployment_step() - uses the unemployment model to updates the unemployment\n based on the stringency levels\n - economy_step - computes the current producitivity numbers for the agents\n \"\"\"\n if self.use_cuda:\n self.cuda_step(\n self.cuda_data_manager.device_data(\"susceptible\"),\n self.cuda_data_manager.device_data(\"infected\"),\n self.cuda_data_manager.device_data(\"recovered\"),\n self.cuda_data_manager.device_data(\"deaths\"),\n self.cuda_data_manager.device_data(\"vaccinated\"),\n self.cuda_data_manager.device_data(\"unemployed\"),\n self.cuda_data_manager.device_data(\"subsidy\"),\n self.cuda_data_manager.device_data(\"productivity\"),\n self.cuda_data_manager.device_data(\"stringency_level\"),\n self.cuda_data_manager.device_data(\"num_stringency_levels\"),\n self.cuda_data_manager.device_data(\"postsubsidy_productivity\"),\n self.cuda_data_manager.device_data(\"num_vaccines_available_t\"),\n self.cuda_data_manager.device_data(\n \"real_world_stringency_policy_history\"\n ),\n self.cuda_data_manager.device_data(\"beta_delay\"),\n self.cuda_data_manager.device_data(\"beta_slopes\"),\n self.cuda_data_manager.device_data(\"beta_intercepts\"),\n self.cuda_data_manager.device_data(\"beta\"),\n self.cuda_data_manager.device_data(\"gamma\"),\n self.cuda_data_manager.device_data(\"death_rate\"),\n self.cuda_data_manager.device_data(\"incapacitated\"),\n self.cuda_data_manager.device_data(\"cant_work\"),\n self.cuda_data_manager.device_data(\"num_people_that_can_work\"),\n self.cuda_data_manager.device_data(\"us_state_population\"),\n self.cuda_data_manager.device_data(\"infection_too_sick_to_work_rate\"),\n self.cuda_data_manager.device_data(\"population_between_age_18_65\"),\n self.cuda_data_manager.device_data(\"filter_len\"),\n self.cuda_data_manager.device_data(\"num_filters\"),\n self.cuda_data_manager.device_data(\"delta_stringency_level\"),\n self.cuda_data_manager.device_data(\n \"grouped_convolutional_filter_weights\"\n ),\n self.cuda_data_manager.device_data(\"unemp_conv_filters\"),\n self.cuda_data_manager.device_data(\"unemployment_bias\"),\n self.cuda_data_manager.device_data(\"signal\"),\n self.cuda_data_manager.device_data(\"daily_production_per_worker\"),\n self.cuda_data_manager.device_data(\"maximum_productivity\"),\n self.cuda_data_manager.device_data(\n f\"{_OBSERVATIONS}_a_world-agent_state\"\n ),\n self.cuda_data_manager.device_data(\n f\"{_OBSERVATIONS}_a_world-agent_postsubsidy_productivity\"\n ),\n self.cuda_data_manager.device_data(\n f\"{_OBSERVATIONS}_a_world-lagged_stringency_level\"\n ),\n self.cuda_data_manager.device_data(f\"{_OBSERVATIONS}_a_time\"),\n self.cuda_data_manager.device_data(\n f\"{_OBSERVATIONS}_p_world-agent_state\"\n ),\n self.cuda_data_manager.device_data(\n f\"{_OBSERVATIONS}_p_world-agent_postsubsidy_productivity\"\n ),\n self.cuda_data_manager.device_data(\n f\"{_OBSERVATIONS}_p_world-lagged_stringency_level\"\n ),\n self.cuda_data_manager.device_data(f\"{_OBSERVATIONS}_p_time\"),\n self.cuda_data_manager.device_data(\"_timestep_\"),\n self.cuda_data_manager.meta_info(\"n_agents\"),\n self.cuda_data_manager.meta_info(\"episode_length\"),\n block=self.world.cuda_function_manager.block,\n grid=self.world.cuda_function_manager.grid,\n )\n else:\n prev_t = self.world.timestep - 1\n curr_t = self.world.timestep\n\n self.current_date += timedelta(days=1)\n\n # SIR\n # ---\n if self.use_real_world_data:\n _S_t = np.maximum(\n self._real_world_data[\"susceptible\"][\n curr_t + self.start_date_index\n ],\n 0,\n )\n _I_t = np.maximum(\n self._real_world_data[\"infected\"][curr_t + self.start_date_index],\n 0,\n )\n _R_t = np.maximum(\n self._real_world_data[\"recovered\"][curr_t + self.start_date_index],\n 0,\n )\n _V_t = np.maximum(\n self._real_world_data[\"vaccinated\"][curr_t + self.start_date_index],\n 0,\n )\n _D_t = np.maximum(\n self._real_world_data[\"deaths\"][curr_t + self.start_date_index],\n 0,\n )\n\n else: # Use simulation logic\n if curr_t - self.beta_delay < 0:\n if self.start_date_index + curr_t - self.beta_delay < 0:\n stringency_level_tmk = np.ones(self.num_us_states)\n else:\n stringency_level_tmk = self._real_world_data[\"policy\"][\n self.start_date_index + curr_t - self.beta_delay, :\n ]\n else:\n stringency_level_tmk = self.world.global_state[\"Stringency Level\"][\n curr_t - self.beta_delay\n ]\n stringency_level_tmk = stringency_level_tmk.astype(self.np_int_dtype)\n\n _S_tm1 = self.world.global_state[\"Susceptible\"][prev_t]\n _I_tm1 = self.world.global_state[\"Infected\"][prev_t]\n _R_tm1 = self.world.global_state[\"Recovered\"][prev_t]\n _V_tm1 = self.world.global_state[\"Vaccinated\"][prev_t]\n\n # Vaccination\n # -----------\n num_vaccines_available_t = np.zeros(\n self.n_agents, dtype=self.np_int_dtype\n )\n for aidx, agent in enumerate(self.world.agents):\n # \"Load\" the vaccines in the inventory into this vector.\n num_vaccines_available_t[aidx] = agent.state[\"Vaccines Available\"]\n # Agents always use whatever vaccines they can, so this becomes 0:\n agent.state[\"Total Vaccinated\"] += agent.state[\"Vaccines Available\"]\n agent.state[\"Vaccines Available\"] = 0\n\n # SIR step\n # --------\n _dS, _dI, _dR, _dV = self.sir_step(\n _S_tm1,\n _I_tm1,\n stringency_level_tmk,\n num_vaccines_available_t,\n )\n _S_t = np.maximum(_S_tm1 + _dS, 0)\n _I_t = np.maximum(_I_tm1 + _dI, 0)\n _R_t = np.maximum(_R_tm1 + _dR, 0)\n _V_t = np.maximum(_V_tm1 + _dV, 0)\n\n num_recovered_but_not_vaccinated_t = _R_t - _V_t\n _D_t = self.death_rate * num_recovered_but_not_vaccinated_t\n\n # Update global state\n # -------------------\n self.world.global_state[\"Susceptible\"][curr_t] = _S_t\n self.world.global_state[\"Infected\"][curr_t] = _I_t\n self.world.global_state[\"Recovered\"][curr_t] = _R_t\n self.world.global_state[\"Deaths\"][curr_t] = _D_t\n self.world.global_state[\"Vaccinated\"][curr_t] = _V_t\n\n # Unemployment\n # ------------\n if self.use_real_world_data:\n num_unemployed_t = self._real_world_data[\"unemployed\"][\n self.start_date_index + curr_t\n ]\n else:\n num_unemployed_t = self.unemployment_step(\n current_stringency_level=self.world.global_state[\n \"Stringency Level\"\n ][curr_t]\n )\n\n self.world.global_state[\"Unemployed\"][curr_t] = num_unemployed_t\n\n # Productivity\n # ------------\n productivity_t = self.economy_step(\n self.us_state_population,\n infected=_I_t,\n deaths=_D_t,\n unemployed=num_unemployed_t,\n infection_too_sick_to_work_rate=self.infection_too_sick_to_work_rate,\n population_between_age_18_65=self.pop_between_age_18_65,\n )\n\n # Subsidies\n # ---------\n # Add federal government subsidy to productivity\n daily_statewise_subsidy_t = self.world.global_state[\"Subsidy\"][curr_t]\n postsubsidy_productivity_t = productivity_t + daily_statewise_subsidy_t\n self.world.global_state[\"Postsubsidy Productivity\"][\n curr_t\n ] = postsubsidy_productivity_t\n\n # Update agent state\n # ------------------\n current_date_string = datetime.strftime(\n self.current_date, format=self.date_format\n )\n for agent in self.world.agents:\n agent.state[\"Total Susceptible\"] = _S_t[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"New Infections\"] = (\n _I_t[agent.idx] - agent.state[\"Total Infected\"]\n ).astype(self.np_int_dtype)\n agent.state[\"Total Infected\"] = _I_t[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"Total Recovered\"] = _R_t[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"New Deaths\"] = _D_t[agent.idx] - agent.state[\n \"Total Deaths\"\n ].astype(self.np_int_dtype)\n agent.state[\"Total Deaths\"] = _D_t[agent.idx].astype(self.np_int_dtype)\n agent.state[\"Total Vaccinated\"] = _V_t[agent.idx].astype(\n self.np_int_dtype\n )\n\n agent.state[\"Total Unemployed\"] = num_unemployed_t[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"New Subsidy Received\"] = daily_statewise_subsidy_t[\n agent.idx\n ]\n agent.state[\"Postsubsidy Productivity\"] = postsubsidy_productivity_t[\n agent.idx\n ]\n agent.state[\"Date\"] = current_date_string\n\n # Update planner state\n # --------------------\n self.world.planner.state[\"Total Susceptible\"] = np.sum(_S_t).astype(\n self.np_int_dtype\n )\n self.world.planner.state[\"New Infections\"] = (\n np.sum(_I_t) - self.world.planner.state[\"Total Infected\"]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"Total Infected\"] = np.sum(_I_t).astype(\n self.np_int_dtype\n )\n self.world.planner.state[\"Total Recovered\"] = np.sum(_R_t).astype(\n self.np_int_dtype\n )\n self.world.planner.state[\"New Deaths\"] = (\n np.sum(_D_t) - self.world.planner.state[\"Total Deaths\"]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"Total Deaths\"] = np.sum(_D_t).astype(\n self.np_int_dtype\n )\n self.world.planner.state[\"Total Vaccinated\"] = np.sum(_V_t).astype(\n self.np_int_dtype\n )\n self.world.planner.state[\"Total Unemployed\"] = np.sum(\n num_unemployed_t\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"New Subsidy Provided\"] = np.sum(\n daily_statewise_subsidy_t\n )\n self.world.planner.state[\"Postsubsidy Productivity\"] = np.sum(\n postsubsidy_productivity_t\n )\n self.world.planner.state[\"Date\"] = current_date_string\n\n def generate_observations(self):\n \"\"\"\n - Process agent-specific and planner-specific data into an observation.\n - Observations contain only the relevant features for that actor.\n :return: a dictionary of observations for each agent and planner\n \"\"\"\n redux_agent_global_state = None\n for feature in [\n \"Susceptible\",\n \"Infected\",\n \"Recovered\",\n \"Deaths\",\n \"Vaccinated\",\n \"Unemployed\",\n ]:\n if redux_agent_global_state is None:\n redux_agent_global_state = self.world.global_state[feature][\n self.world.timestep\n ]\n else:\n redux_agent_global_state = np.vstack(\n (\n redux_agent_global_state,\n self.world.global_state[feature][self.world.timestep],\n )\n )\n normalized_redux_agent_state = (\n redux_agent_global_state / self.us_state_population[None]\n )\n\n # Productivity\n postsubsidy_productivity_t = self.world.global_state[\n \"Postsubsidy Productivity\"\n ][self.world.timestep]\n normalized_postsubsidy_productivity_t = (\n postsubsidy_productivity_t / self.maximum_productivity_t\n )\n\n # Let agents know about the policy about to affect SIR infection-rate beta\n t_beta = self.world.timestep - self.beta_delay + 1\n if t_beta < 0:\n lagged_stringency_level = self._real_world_data[\"policy\"][\n self.start_date_index + t_beta\n ]\n else:\n lagged_stringency_level = self.world.global_state[\"Stringency Level\"][\n t_beta\n ]\n\n normalized_lagged_stringency_level = (\n lagged_stringency_level / self.num_stringency_levels\n )\n\n # To condition policy on agent id\n agent_index = np.eye(self.n_agents, dtype=self.np_int_dtype)\n\n # Observation dict - Agents\n # -------------------------\n obs_dict = dict()\n obs_dict[\"a\"] = {\n \"agent_index\": agent_index,\n \"agent_state\": normalized_redux_agent_state,\n \"agent_postsubsidy_productivity\": normalized_postsubsidy_productivity_t,\n \"lagged_stringency_level\": normalized_lagged_stringency_level,\n }\n\n # Observation dict - Planner\n # --------------------------\n obs_dict[self.world.planner.idx] = {\n \"agent_state\": normalized_redux_agent_state,\n \"agent_postsubsidy_productivity\": normalized_postsubsidy_productivity_t,\n \"lagged_stringency_level\": normalized_lagged_stringency_level,\n }\n\n return obs_dict\n\n def compute_reward(self):\n \"\"\"\n Compute the social welfare metrics for each agent and the planner.\n :return: a dictionary of rewards for each agent in the simulation\n \"\"\"\n if self.use_cuda:\n self.cuda_compute_reward(\n self.cuda_data_manager.device_data(f\"{_REWARDS}_a\"),\n self.cuda_data_manager.device_data(f\"{_REWARDS}_p\"),\n self.cuda_data_manager.device_data(\"num_days_in_an_year\"),\n self.cuda_data_manager.device_data(\"value_of_life\"),\n self.cuda_data_manager.device_data(\"risk_free_interest_rate\"),\n self.cuda_data_manager.device_data(\"economic_reward_crra_eta\"),\n self.cuda_data_manager.device_data(\"min_marginal_agent_health_index\"),\n self.cuda_data_manager.device_data(\"max_marginal_agent_health_index\"),\n self.cuda_data_manager.device_data(\"min_marginal_agent_economic_index\"),\n self.cuda_data_manager.device_data(\"max_marginal_agent_economic_index\"),\n self.cuda_data_manager.device_data(\"min_marginal_planner_health_index\"),\n self.cuda_data_manager.device_data(\"max_marginal_planner_health_index\"),\n self.cuda_data_manager.device_data(\n \"min_marginal_planner_economic_index\"\n ),\n self.cuda_data_manager.device_data(\n \"max_marginal_planner_economic_index\"\n ),\n self.cuda_data_manager.device_data(\n \"weightage_on_marginal_agent_health_index\"\n ),\n self.cuda_data_manager.device_data(\n \"weightage_on_marginal_agent_economic_index\"\n ),\n self.cuda_data_manager.device_data(\n \"weightage_on_marginal_planner_health_index\"\n ),\n self.cuda_data_manager.device_data(\n \"weightage_on_marginal_planner_economic_index\"\n ),\n self.cuda_data_manager.device_data(\"agents_health_norm\"),\n self.cuda_data_manager.device_data(\"agents_economic_norm\"),\n self.cuda_data_manager.device_data(\"planner_health_norm\"),\n self.cuda_data_manager.device_data(\"planner_economic_norm\"),\n self.cuda_data_manager.device_data(\"deaths\"),\n self.cuda_data_manager.device_data(\"subsidy\"),\n self.cuda_data_manager.device_data(\"postsubsidy_productivity\"),\n self.cuda_data_manager.device_data(\"_done_\"),\n self.cuda_data_manager.device_data(\"_timestep_\"),\n self.cuda_data_manager.meta_info(\"n_agents\"),\n self.cuda_data_manager.meta_info(\"episode_length\"),\n block=self.world.cuda_function_manager.block,\n grid=self.world.cuda_function_manager.grid,\n )\n return {} # Return empty dict. Reward arrays are updated in-place\n rew = {\"a\": 0, \"p\": 0}\n\n def crra_nonlinearity(x, eta):\n # Reference: https://en.wikipedia.org/wiki/Isoelastic_utility\n # To be applied to (marginal) economic indices\n annual_x = self.num_days_in_an_year * x\n annual_x_clipped = np.clip(annual_x, 0.1, 3)\n annual_crra = 1 + (annual_x_clipped ** (1 - eta) - 1) / (1 - eta)\n daily_crra = annual_crra / self.num_days_in_an_year\n return daily_crra\n\n def min_max_normalization(x, min_x, max_x):\n eps = 1e-10\n return (x - min_x) / (max_x - min_x + eps)\n\n def get_weighted_average(\n health_index_weightage,\n health_index,\n economic_index_weightage,\n economic_index,\n ):\n return (\n health_index_weightage * health_index\n + economic_index_weightage * economic_index\n ) / (health_index_weightage + economic_index_weightage)\n\n # Changes this last timestep:\n marginal_deaths = (\n self.world.global_state[\"Deaths\"][self.world.timestep]\n - self.world.global_state[\"Deaths\"][self.world.timestep - 1]\n )\n\n subsidy_t = self.world.global_state[\"Subsidy\"][self.world.timestep]\n postsubsidy_productivity_t = self.world.global_state[\n \"Postsubsidy Productivity\"\n ][self.world.timestep]\n\n # Health index -- the cost equivalent (annual GDP) of covid deaths\n # Note: casting deaths to float to prevent overflow issues\n marginal_agent_health_index = (\n -marginal_deaths.astype(self.np_float_dtype)\n * self.value_of_life\n / self.agents_health_norm\n ).astype(self.np_float_dtype)\n\n # Economic index -- fraction of annual GDP achieved\n # Use a \"crra\" nonlinearity on the agent economic reward\n marginal_agent_economic_index = crra_nonlinearity(\n postsubsidy_productivity_t / self.agents_economic_norm,\n self.economic_reward_crra_eta,\n ).astype(self.np_float_dtype)\n\n # Min-max Normalization\n marginal_agent_health_index = min_max_normalization(\n marginal_agent_health_index,\n self.min_marginal_agent_health_index,\n self.max_marginal_agent_health_index,\n ).astype(self.np_float_dtype)\n marginal_agent_economic_index = min_max_normalization(\n marginal_agent_economic_index,\n self.min_marginal_agent_economic_index,\n self.max_marginal_agent_economic_index,\n ).astype(self.np_float_dtype)\n\n # Agent Rewards\n # -------------\n agent_rewards = get_weighted_average(\n self.weightage_on_marginal_agent_health_index,\n marginal_agent_health_index,\n self.weightage_on_marginal_agent_economic_index,\n marginal_agent_economic_index,\n )\n rew[\"a\"] = agent_rewards / self.reward_normalization_factor\n\n # Update agent states\n # -------------------\n for agent in self.world.agents:\n agent.state[\"Health Index\"] += marginal_agent_health_index[agent.idx]\n agent.state[\"Economic Index\"] += marginal_agent_economic_index[agent.idx]\n\n # National level\n # --------------\n # Health index -- the cost equivalent (annual GDP) of covid deaths\n # Note: casting deaths to float to prevent overflow issues\n marginal_planner_health_index = (\n -np.sum(marginal_deaths).astype(self.np_float_dtype)\n * self.value_of_life\n / self.planner_health_norm\n )\n\n # Economic index -- fraction of annual GDP achieved (minus subsidy cost)\n cost_of_subsidy_t = (1 + self.risk_free_interest_rate) * np.sum(subsidy_t)\n # Use a \"crra\" nonlinearity on the planner economic reward\n marginal_planner_economic_index = crra_nonlinearity(\n (np.sum(postsubsidy_productivity_t) - cost_of_subsidy_t)\n / self.planner_economic_norm,\n self.economic_reward_crra_eta,\n )\n\n # Min-max Normalization\n marginal_planner_health_index = min_max_normalization(\n marginal_planner_health_index,\n self.min_marginal_planner_health_index,\n self.max_marginal_planner_health_index,\n )\n marginal_planner_economic_index = min_max_normalization(\n marginal_planner_economic_index,\n self.min_marginal_planner_economic_index,\n self.max_marginal_planner_economic_index,\n )\n\n # Update planner states\n # -------------------\n self.world.planner.state[\"Health Index\"] += marginal_planner_health_index\n self.world.planner.state[\"Economic Index\"] += marginal_planner_economic_index\n\n # Planner Reward\n # --------------\n planner_rewards = get_weighted_average(\n self.weightage_on_marginal_planner_health_index,\n marginal_planner_health_index,\n self.weightage_on_marginal_planner_economic_index,\n marginal_planner_economic_index,\n )\n rew[self.world.planner.idx] = planner_rewards / self.reward_normalization_factor\n\n return rew\n\n def additional_reset_steps(self):\n assert self.world.timestep == 0\n\n # Reset current date\n self.current_date = self.start_date\n\n # SIR numbers at timestep 0\n susceptible_0 = self._real_world_data[\"susceptible\"][self.start_date_index]\n infected_0 = self._real_world_data[\"infected\"][self.start_date_index]\n newly_infected_0 = (\n infected_0\n - self._real_world_data[\"infected\"][max(0, self.start_date_index - 1)]\n )\n recovered_0 = self._real_world_data[\"recovered\"][self.start_date_index]\n deaths_0 = recovered_0 * self.death_rate\n\n # Unemployment and vaccinated numbers at timestep 0\n unemployed_0 = self._real_world_data[\"unemployed\"][self.start_date_index]\n vaccinated_0 = self._real_world_data[\"vaccinated\"][self.start_date_index]\n\n # Create a global state dictionary to save episode data\n self.world.global_state = {}\n self.set_global_state(\"Susceptible\", susceptible_0, t=self.world.timestep)\n self.set_global_state(\"Infected\", infected_0, t=self.world.timestep)\n self.set_global_state(\"Recovered\", recovered_0, t=self.world.timestep)\n self.set_global_state(\"Deaths\", deaths_0, t=self.world.timestep)\n\n self.set_global_state(\"Unemployed\", unemployed_0, t=self.world.timestep)\n self.set_global_state(\"Vaccinated\", vaccinated_0, t=self.world.timestep)\n\n new_deaths_0 = (\n deaths_0\n - self._real_world_data[\"recovered\"][max(0, self.start_date_index - 1)]\n * self.death_rate\n )\n\n # Reset stringency level history.\n # Pad with stringency levels of 1 corresponding to states being fully open\n # (as was the case before the pandemic).\n self.stringency_level_history = np.pad(\n self._real_world_data[\"policy\"][: self.start_date_index + 1],\n [(self.filter_len, 0), (0, 0)],\n constant_values=1,\n )[-(self.filter_len + 1) :]\n\n # Set the stringency level based to the real-world policy\n self.set_global_state(\n \"Stringency Level\",\n self._real_world_data[\"policy\"][self.start_date_index],\n t=self.world.timestep,\n )\n\n # All US states start with zero subsidy and zero Postsubsidy Productivity\n self.set_global_state(\"Subsidy Level\", dtype=self.np_float_dtype)\n self.set_global_state(\"Subsidy\", dtype=self.np_float_dtype)\n self.set_global_state(\"Postsubsidy Productivity\", dtype=self.np_float_dtype)\n\n # Set initial agent states\n # ------------------------\n current_date_string = datetime.strftime(\n self.current_date, format=self.date_format\n )\n\n for agent in self.world.agents:\n agent.state[\"Total Susceptible\"] = susceptible_0[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"New Infections\"] = newly_infected_0[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"Total Infected\"] = infected_0[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"Total Recovered\"] = recovered_0[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"New Deaths\"] = new_deaths_0[agent.idx].astype(\n self.np_int_dtype\n )\n agent.state[\"Total Deaths\"] = deaths_0[agent.idx].astype(self.np_int_dtype)\n agent.state[\"Health Index\"] = np.array([0]).astype(self.np_float_dtype)\n agent.state[\"Economic Index\"] = np.array([0]).astype(self.np_float_dtype)\n agent.state[\"Date\"] = current_date_string\n\n # Planner state fields\n self.world.planner.state[\"Total Susceptible\"] = np.sum(\n [agent.state[\"Total Susceptible\"] for agent in self.world.agents]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"New Infections\"] = np.sum(\n [agent.state[\"New Infections\"] for agent in self.world.agents]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"Total Infected\"] = np.sum(\n [agent.state[\"Total Infected\"] for agent in self.world.agents]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"Total Recovered\"] = np.sum(\n [agent.state[\"Total Recovered\"] for agent in self.world.agents]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"New Deaths\"] = np.sum(\n [agent.state[\"New Deaths\"] for agent in self.world.agents]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"Total Deaths\"] = np.sum(\n [agent.state[\"Total Deaths\"] for agent in self.world.agents]\n ).astype(self.np_int_dtype)\n self.world.planner.state[\"Total Vaccinated\"] = np.sum(vaccinated_0).astype(\n self.np_int_dtype\n )\n self.world.planner.state[\"Health Index\"] = np.array([0]).astype(\n self.np_float_dtype\n )\n self.world.planner.state[\"Economic Index\"] = np.array([0]).astype(\n self.np_float_dtype\n )\n\n self.world.planner.state[\"Date\"] = current_date_string\n\n # Reset any manually set parameter modulations\n self._beta_intercepts_modulation = 1\n self._beta_slopes_modulation = 1\n self._unemployment_modulation = 1\n\n def set_global_state(self, key=None, value=None, t=None, dtype=None):\n # Use floats by default for the SIR dynamics\n if dtype is None:\n dtype = self.np_float_dtype\n assert key in [\n \"Susceptible\",\n \"Infected\",\n \"Recovered\",\n \"Deaths\",\n \"Unemployed\",\n \"Vaccinated\",\n \"Stringency Level\",\n \"Subsidy Level\",\n \"Subsidy\",\n \"Postsubsidy Productivity\",\n ]\n # If no values are passed, set everything to zeros.\n if key not in self.world.global_state:\n self.world.global_state[key] = np.zeros(\n (self.episode_length + 1, self.num_us_states), dtype=dtype\n )\n\n if t is not None and value is not None:\n assert isinstance(value, np.ndarray)\n assert value.shape[0] == self.world.global_state[key].shape[1]\n\n self.world.global_state[key][t] = value\n else:\n pass\n\n def set_parameter_modulations(\n self, beta_intercept=None, beta_slope=None, unemployment=None\n ):\n \"\"\"\n Apply parameter modulation, which will be in effect until the next env reset.\n\n Each modulation term scales the associated set of model parameters by the\n input value. This method is useful for performing a sensitivity analysis.\n\n In effect, the transmission rate (beta) will be calculated as:\n beta = (m_s * beta_slope)*lagged_stringency + (m_i * beta_intercept)\n\n The unemployment rate (u) will be calculated as:\n u = SOFTPLUS( m_u * SUM(u_filter_weight * u_filter_response) ) + u_0\n\n Args:\n beta_intercept: (float, >= 0) Modulation applied to the intercept term\n of the beta model, m_i in above equations\n beta_slope: (float, >= 0) Modulation applied to the slope term of the\n beta model, m_s in above equations\n unemployment: (float, >= 0) Modulation applied to the weighted sum of\n unemployment filter responses, m_u in above equations.\n\n Example:\n # Reset the environment\n env.reset()\n\n # Increase the slope of the beta response by 15%\n env.set_parameter_modulations(beta_slope=1.15)\n\n # Run the environment (this example skips over action selection for brevity)\n for t in range(env.episode_length):\n env.step(actions[t])\n \"\"\"\n if beta_intercept is not None:\n beta_intercept = float(beta_intercept)\n assert beta_intercept >= 0\n self._beta_intercepts_modulation = beta_intercept\n\n if beta_slope is not None:\n beta_slope = float(beta_slope)\n assert beta_slope >= 0\n self._beta_slopes_modulation = beta_slope\n\n if unemployment is not None:\n unemployment = float(unemployment)\n assert unemployment >= 0\n self._unemployment_modulation = unemployment\n\n def unemployment_step(self, current_stringency_level):\n \"\"\"\n Computes unemployment given the current stringency level and past levels.\n\n Unemployment is computed as follows:\n 1) For each of self.num_filters, an exponentially decaying filter is\n convolved with the history of stringency changes. Responses move forward in\n time, so a stringency change at time t-1 impacts the response at time t.\n 2) The filter responses at time t (the current timestep) are summed together\n using state-specific weights.\n 3) The weighted sum is passed through a SOFTPLUS function to capture excess\n unemployment due to stringency policy.\n 4) The excess unemployment is added to a state-specific baseline unemployment\n level to get the total unemployment.\n\n Note: Internally, unemployment is computed somewhat differently for speed.\n In particular, no convolution is used. Instead the \"filter response\" at\n time t is just a temporally discounted sum of past stringency changes,\n with the discounting given by the filter decay rate.\n \"\"\"\n\n def softplus(x, beta=1, threshold=20):\n \"\"\"\n Numpy implementation of softplus. For reference, see\n https://pytorch.org/docs/stable/generated/torch.nn.Softplus.html\n \"\"\"\n return 1 / beta * np.log(1 + np.exp(beta * x)) * (\n beta * x <= threshold\n ) + x * (beta * x > threshold)\n\n if (\n self.world.timestep == 0\n ): # computing unemployment at closure policy \"all ones\"\n delta_stringency_level = np.zeros((self.filter_len, self.num_us_states))\n else:\n self.stringency_level_history = np.concatenate(\n (\n self.stringency_level_history[1:],\n current_stringency_level.reshape(1, -1),\n )\n )\n delta_stringency_level = (\n self.stringency_level_history[1:] - self.stringency_level_history[:-1]\n )\n\n # Rather than modulating the unemployment params,\n # modulate the deltas (same effect)\n delta_stringency_level = delta_stringency_level * self._unemployment_modulation\n\n # Expand the [time, state] delta history to have a dimension for filter channel\n x_data = delta_stringency_level[None].transpose(2, 0, 1)\n\n # Apply the state-specific filter weights to each channel\n weighted_x_data = x_data * self.repeated_conv_weights\n\n # Compute the discounted sum of the weighted deltas, with each channel using\n # a discounting rate reflecting the time constant of the filter channel. Also\n # sum over channels and use a softplus to get excess unemployment.\n excess_unemployment = softplus(\n np.sum(weighted_x_data * self.unemp_conv_filters, axis=(1, 2)), beta=1\n )\n\n # Add excess unemployment to baseline unemployment\n unemployment_rate = excess_unemployment + self.unemployment_bias\n\n # Convert the rate (which is a percent) to raw numbers for output\n num_unemployed_t = unemployment_rate * self.us_state_population / 100\n return num_unemployed_t\n\n # --- Scenario-specific ---\n def economy_step(\n self,\n population,\n infected,\n deaths,\n unemployed,\n infection_too_sick_to_work_rate=0.05,\n population_between_age_18_65=0.67,\n ):\n \"\"\"\n Computes how much production occurs.\n\n Assumptions:\n\n - People that cannot work: \"infected + aware\" and \"unemployed\" and \"deaths\".\n - No life/death cycles.\n\n See __init__() for pre-computation of each worker's daily productivity.\n \"\"\"\n\n incapacitated = (infection_too_sick_to_work_rate * infected) + deaths\n cant_work = (incapacitated * population_between_age_18_65) + unemployed\n\n num_workers = population * population_between_age_18_65\n\n num_people_that_can_work = np.maximum(0, num_workers - cant_work)\n\n productivity = (\n num_people_that_can_work * self.daily_production_per_worker\n ).astype(self.np_float_dtype)\n\n return productivity\n\n def sir_step(\n self,\n S_tm1,\n I_tm1,\n stringency_level_tmk,\n num_vaccines_available_t,\n ):\n \"\"\"\n Simulates SIR infection model in the US.\n \"\"\"\n intercepts = self.beta_intercepts * self._beta_intercepts_modulation\n slopes = self.beta_slopes * self._beta_slopes_modulation\n beta_i = (intercepts + slopes * stringency_level_tmk).astype(\n self.np_float_dtype\n )\n\n small_number = 1e-10 # used to prevent indeterminate cases\n susceptible_fraction_vaccinated = np.minimum(\n np.ones((self.num_us_states), dtype=self.np_int_dtype),\n num_vaccines_available_t / (S_tm1 + small_number),\n ).astype(self.np_float_dtype)\n vaccinated_t = np.minimum(num_vaccines_available_t, S_tm1)\n\n # Record R0\n R0 = beta_i / self.gamma\n for agent in self.world.agents:\n agent.state[\"R0\"] = R0[agent.idx]\n\n # S -> I; dS\n neighborhood_SI_over_N = (S_tm1 / self.us_state_population) * I_tm1\n dS_t = (\n -beta_i * neighborhood_SI_over_N * (1 - susceptible_fraction_vaccinated)\n - vaccinated_t\n ).astype(self.np_float_dtype)\n\n # I -> R; dR\n dR_t = (self.gamma * I_tm1 + vaccinated_t).astype(self.np_float_dtype)\n\n # dI from d(S + I + R) = 0\n # ------------------------\n dI_t = -dS_t - dR_t\n\n dV_t = vaccinated_t.astype(self.np_float_dtype)\n\n return dS_t, dI_t, dR_t, dV_t\n\n def load_model_constants(self, path_to_model_constants):\n filename = \"model_constants.json\"\n assert filename in os.listdir(path_to_model_constants), (\n \"Unable to locate '{}' in '{}'.\\nPlease run the \"\n \"'gather_real_world_data.ipynb' notebook first\".format(\n filename, path_to_model_constants\n )\n )\n with open(os.path.join(path_to_model_constants, filename), \"r\") as fp:\n model_constants_dict = json.load(fp)\n fp.close()\n\n self.date_format = model_constants_dict[\"DATE_FORMAT\"]\n self.us_state_idx_to_state_name = model_constants_dict[\n \"US_STATE_IDX_TO_STATE_NAME\"\n ]\n self.us_state_population = self.np_int_dtype(\n model_constants_dict[\"US_STATE_POPULATION\"]\n )\n self.us_population = self.np_int_dtype(model_constants_dict[\"US_POPULATION\"])\n self.num_stringency_levels = model_constants_dict[\"NUM_STRINGENCY_LEVELS\"]\n self.death_rate = self.np_float_dtype(model_constants_dict[\"SIR_MORTALITY\"])\n self.gamma = self.np_float_dtype(model_constants_dict[\"SIR_GAMMA\"])\n self.gdp_per_capita = self.np_float_dtype(\n model_constants_dict[\"GDP_PER_CAPITA\"]\n )\n\n def load_fitted_params(self, path_to_fitted_params):\n filename = \"fitted_params.json\"\n assert filename in os.listdir(path_to_fitted_params), (\n \"Unable to locate '{}' in '{}'.\\nIf you ran the \"\n \"'gather_real_world_data.ipynb' notebook to download the latest \"\n \"real-world data, please also run the \"\n \"'fit_parameters.ipynb' notebook.\".format(filename, path_to_fitted_params)\n )\n with open(os.path.join(path_to_fitted_params, filename), \"r\") as fp:\n fitted_params_dict = json.load(fp)\n fp.close()\n self.policy_start_date = datetime.strptime(\n fitted_params_dict[\"POLICY_START_DATE\"], self.date_format\n )\n self.value_of_life = self.np_int_dtype(fitted_params_dict[\"VALUE_OF_LIFE\"])\n self.beta_delay = self.np_int_dtype(fitted_params_dict[\"BETA_DELAY\"])\n self.beta_slopes = np.array(\n fitted_params_dict[\"BETA_SLOPES\"], dtype=self.np_float_dtype\n )\n self.beta_intercepts = np.array(\n fitted_params_dict[\"BETA_INTERCEPTS\"], dtype=self.np_float_dtype\n )\n self.min_marginal_agent_health_index = np.array(\n fitted_params_dict[\"MIN_MARGINAL_AGENT_HEALTH_INDEX\"],\n dtype=self.np_float_dtype,\n )\n self.max_marginal_agent_health_index = np.array(\n fitted_params_dict[\"MAX_MARGINAL_AGENT_HEALTH_INDEX\"],\n dtype=self.np_float_dtype,\n )\n self.min_marginal_agent_economic_index = np.array(\n fitted_params_dict[\"MIN_MARGINAL_AGENT_ECONOMIC_INDEX\"],\n dtype=self.np_float_dtype,\n )\n self.max_marginal_agent_economic_index = np.array(\n fitted_params_dict[\"MAX_MARGINAL_AGENT_ECONOMIC_INDEX\"],\n dtype=self.np_float_dtype,\n )\n self.min_marginal_planner_health_index = self.np_float_dtype(\n fitted_params_dict[\"MIN_MARGINAL_PLANNER_HEALTH_INDEX\"]\n )\n self.max_marginal_planner_health_index = self.np_float_dtype(\n fitted_params_dict[\"MAX_MARGINAL_PLANNER_HEALTH_INDEX\"]\n )\n self.min_marginal_planner_economic_index = self.np_float_dtype(\n fitted_params_dict[\"MIN_MARGINAL_PLANNER_ECONOMIC_INDEX\"]\n )\n self.max_marginal_planner_economic_index = self.np_float_dtype(\n fitted_params_dict[\"MAX_MARGINAL_PLANNER_ECONOMIC_INDEX\"]\n )\n self.inferred_weightage_on_agent_health_index = np.array(\n fitted_params_dict[\"INFERRED_WEIGHTAGE_ON_AGENT_HEALTH_INDEX\"],\n dtype=self.np_float_dtype,\n )\n self.inferred_weightage_on_planner_health_index = self.np_float_dtype(\n fitted_params_dict[\"INFERRED_WEIGHTAGE_ON_PLANNER_HEALTH_INDEX\"]\n )\n self.filter_len = self.np_int_dtype(fitted_params_dict[\"FILTER_LEN\"])\n self.conv_lambdas = np.array(\n fitted_params_dict[\"CONV_LAMBDAS\"], dtype=self.np_float_dtype\n )\n self.unemployment_bias = np.array(\n fitted_params_dict[\"UNEMPLOYMENT_BIAS\"], dtype=self.np_float_dtype\n )\n self.grouped_convolutional_filter_weights = np.array(\n fitted_params_dict[\"GROUPED_CONVOLUTIONAL_FILTER_WEIGHTS\"],\n dtype=self.np_float_dtype,\n )\n\n def scenario_metrics(self):\n # End of episode metrics\n # ----------------------\n metrics_dict = {}\n\n # State-level metrics\n for agent in self.world.agents:\n state_name = self.us_state_idx_to_state_name[str(agent.idx)]\n\n for field in [\n \"infected\",\n \"recovered\",\n \"deaths\",\n ]:\n metric_key = \"{}/{} (millions)\".format(state_name, field)\n metrics_dict[metric_key] = (\n agent.state[\"Total \" + field.capitalize()] / 1e6\n )\n\n metrics_dict[\"{}/mean_unemployment_rate (%)\".format(state_name)] = (\n np.mean(\n self.world.global_state[\"Unemployed\"][1:, agent.idx],\n axis=0,\n )\n / self.us_state_population[agent.idx]\n * 100\n )\n\n metrics_dict[\n \"{}/mean_open_close_stringency_level\".format(state_name)\n ] = np.mean(\n self.world.global_state[\"Stringency Level\"][1:, agent.idx],\n axis=0,\n )\n\n metrics_dict[\"{}/total_productivity (billion $)\".format(state_name)] = (\n np.sum(\n self.world.global_state[\"Postsubsidy Productivity\"][1:, agent.idx],\n )\n / 1e9\n )\n\n metrics_dict[\n \"{}/health_index_at_end_of_episode\".format(state_name)\n ] = agent.state[\"Health Index\"]\n metrics_dict[\n \"{}/economic_index_at_end_of_episode\".format(state_name)\n ] = agent.state[\"Economic Index\"]\n\n # USA-level metrics\n metrics_dict[\"usa/vaccinated (% of population)\"] = (\n np.sum(\n self.world.global_state[\"Vaccinated\"][self.world.timestep],\n axis=0,\n )\n / self.us_population\n * 100\n )\n metrics_dict[\"usa/deaths (thousands)\"] = (\n np.sum(\n self.world.global_state[\"Deaths\"][self.world.timestep],\n axis=0,\n )\n / 1e3\n )\n\n metrics_dict[\"usa/mean_unemployment_rate (%)\"] = (\n np.mean(\n np.sum(\n self.world.global_state[\"Unemployed\"][1:],\n axis=1,\n )\n / self.us_population,\n axis=0,\n )\n * 100\n )\n metrics_dict[\"usa/total_amount_subsidized (trillion $)\"] = (\n np.sum(\n self.world.global_state[\"Subsidy\"][1:],\n axis=(0, 1),\n )\n / 1e12\n )\n metrics_dict[\"usa/total_productivity (trillion $)\"] = (\n np.sum(\n self.world.global_state[\"Postsubsidy Productivity\"][1:],\n axis=(0, 1),\n )\n / 1e12\n )\n\n metrics_dict[\"usa/health_index_at_end_of_episode\"] = self.world.planner.state[\n \"Health Index\"\n ]\n metrics_dict[\"usa/economic_index_at_end_of_episode\"] = self.world.planner.state[\n \"Economic Index\"\n ]\n\n return metrics_dict\n",
"# Copyright (c) 2020, salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root\n# or https://opensource.org/licenses/BSD-3-Clause\n\nfrom copy import deepcopy\n\nimport numpy as np\nfrom scipy import signal\n\nfrom ai_economist.foundation.base.base_env import BaseEnvironment, scenario_registry\nfrom ai_economist.foundation.scenarios.utils import rewards, social_metrics\n\n\n@scenario_registry.add\nclass LayoutFromFile(BaseEnvironment):\n \"\"\"\n World containing stone and wood with stochastic regeneration. Refers to a fixed\n layout file (see ./map_txt/ for examples) to determine the spatial arrangement of\n stone, wood, and water tiles.\n\n Args:\n planner_gets_spatial_obs (bool): Whether the planner agent receives spatial\n observations from the world.\n full_observability (bool): Whether the mobile agents' spatial observation\n includes the full world view or is instead an egocentric view.\n mobile_agent_observation_range (int): If not using full_observability,\n the spatial range (on each side of the agent) that is visible in the\n spatial observations.\n env_layout_file (str): Name of the layout file in ./map_txt/ to use.\n Note: The world dimensions of that layout must match the world dimensions\n argument used to construct the environment.\n resource_regen_prob (float): Probability that an empty source tile will\n regenerate a new resource unit.\n fixed_four_skill_and_loc (bool): Whether to use a fixed set of build skills and\n starting locations, with agents grouped into starting locations based on\n which skill quartile they are in. False, by default.\n True, for experiments in https://arxiv.org/abs/2004.13332.\n Note: Requires that the environment uses the \"Build\" component with\n skill_dist=\"pareto\".\n starting_agent_coin (int, float): Amount of coin agents have at t=0. Defaults\n to zero coin.\n isoelastic_eta (float): Parameter controlling the shape of agent utility\n wrt coin endowment.\n energy_cost (float): Coefficient for converting labor to negative utility.\n energy_warmup_constant (float): Decay constant that controls the rate at which\n the effective energy cost is annealed from 0 to energy_cost. Set to 0\n (default) to disable annealing, meaning that the effective energy cost is\n always energy_cost. The units of the decay constant depend on the choice of\n energy_warmup_method.\n energy_warmup_method (str): How to schedule energy annealing (warmup). If\n \"decay\" (default), use the number of completed episodes. If \"auto\",\n use the number of timesteps where the average agent reward was positive.\n planner_reward_type (str): The type of reward used for the planner. Options\n are \"coin_eq_times_productivity\" (default),\n \"inv_income_weighted_coin_endowment\", and \"inv_income_weighted_utility\".\n mixing_weight_gini_vs_coin (float): Degree to which equality is ignored w/\n \"coin_eq_times_productivity\". Default is 0, which weights equality and\n productivity equally. If set to 1, only productivity is rewarded.\n \"\"\"\n\n name = \"layout_from_file/simple_wood_and_stone\"\n agent_subclasses = [\"BasicMobileAgent\", \"BasicPlanner\"]\n required_entities = [\"Wood\", \"Stone\", \"Water\"]\n\n def __init__(\n self,\n *base_env_args,\n planner_gets_spatial_info=True,\n full_observability=False,\n mobile_agent_observation_range=5,\n env_layout_file=\"quadrant_25x25_20each_30clump.txt\",\n resource_regen_prob=0.01,\n fixed_four_skill_and_loc=False,\n starting_agent_coin=0,\n isoelastic_eta=0.23,\n energy_cost=0.21,\n energy_warmup_constant=0,\n energy_warmup_method=\"decay\",\n planner_reward_type=\"coin_eq_times_productivity\",\n mixing_weight_gini_vs_coin=0.0,\n **base_env_kwargs,\n ):\n super().__init__(*base_env_args, **base_env_kwargs)\n\n # Whether agents receive spatial information in their observation tensor\n self._planner_gets_spatial_info = bool(planner_gets_spatial_info)\n\n # Whether the (non-planner) agents can see the whole world map\n self._full_observability = bool(full_observability)\n\n self._mobile_agent_observation_range = int(mobile_agent_observation_range)\n\n # Load in the layout\n path_to_layout_file = (\n \"/\".join(__file__.split(\"/\")[:-1]) + \"/map_txt/\" + env_layout_file\n )\n with open(path_to_layout_file, \"r\") as f:\n self.env_layout_string = f.read()\n self.env_layout = self.env_layout_string.split(\";\")\n\n # Convert the layout to landmark maps\n landmark_lookup = {\"W\": \"Wood\", \"S\": \"Stone\", \"@\": \"Water\"}\n self._source_maps = {\n r: np.zeros(self.world_size) for r in landmark_lookup.values()\n }\n for r, symbol_row in enumerate(self.env_layout):\n for c, symbol in enumerate(symbol_row):\n landmark = landmark_lookup.get(symbol, None)\n if landmark:\n self._source_maps[landmark][r, c] = 1\n\n # For controlling how resource regeneration behavior\n self.layout_specs = dict(\n Wood={\n \"regen_weight\": float(resource_regen_prob),\n \"regen_halfwidth\": 0,\n \"max_health\": 1,\n },\n Stone={\n \"regen_weight\": float(resource_regen_prob),\n \"regen_halfwidth\": 0,\n \"max_health\": 1,\n },\n )\n assert 0 <= self.layout_specs[\"Wood\"][\"regen_weight\"] <= 1\n assert 0 <= self.layout_specs[\"Stone\"][\"regen_weight\"] <= 1\n\n # How much coin do agents begin with at upon reset\n self.starting_agent_coin = float(starting_agent_coin)\n assert self.starting_agent_coin >= 0.0\n\n # Controls the diminishing marginal utility of coin.\n # isoelastic_eta=0 means no diminishing utility.\n self.isoelastic_eta = float(isoelastic_eta)\n assert 0.0 <= self.isoelastic_eta <= 1.0\n\n # The amount that labor is weighted in utility computation\n # (once annealing is finished)\n self.energy_cost = float(energy_cost)\n assert self.energy_cost >= 0\n\n # Which method to use for calculating the progress of energy annealing\n # If method = 'decay': #completed episodes\n # If method = 'auto' : #timesteps where avg. agent reward > 0\n self.energy_warmup_method = energy_warmup_method.lower()\n assert self.energy_warmup_method in [\"decay\", \"auto\"]\n # Decay constant for annealing to full energy cost\n # (if energy_warmup_constant == 0, there is no annealing)\n self.energy_warmup_constant = float(energy_warmup_constant)\n assert self.energy_warmup_constant >= 0\n self._auto_warmup_integrator = 0\n\n # Which social welfare function to use\n self.planner_reward_type = str(planner_reward_type).lower()\n\n # How much to weight equality if using SWF=eq*prod:\n # 0 -> SWF=eq * prod\n # 1 -> SWF=prod\n self.mixing_weight_gini_vs_coin = float(mixing_weight_gini_vs_coin)\n assert 0 <= self.mixing_weight_gini_vs_coin <= 1.0\n\n # Use this to calculate marginal changes and deliver that as reward\n self.init_optimization_metric = {agent.idx: 0 for agent in self.all_agents}\n self.prev_optimization_metric = {agent.idx: 0 for agent in self.all_agents}\n self.curr_optimization_metric = {agent.idx: 0 for agent in self.all_agents}\n\n \"\"\"\n Fixed Four Skill and Loc\n ------------------------\n \"\"\"\n self.agent_starting_pos = {agent.idx: [] for agent in self.world.agents}\n\n self.fixed_four_skill_and_loc = bool(fixed_four_skill_and_loc)\n if self.fixed_four_skill_and_loc:\n bm = self.get_component(\"Build\")\n assert bm.skill_dist == \"pareto\"\n pmsm = bm.payment_max_skill_multiplier\n\n # Temporarily switch to a fixed seed for controlling randomness\n seed_state = np.random.get_state()\n np.random.seed(seed=1)\n\n # Generate a batch (100000) of num_agents (sorted/clipped) Pareto samples.\n pareto_samples = np.random.pareto(4, size=(100000, self.n_agents))\n clipped_skills = np.minimum(pmsm, (pmsm - 1) * pareto_samples + 1)\n sorted_clipped_skills = np.sort(clipped_skills, axis=1)\n # The skill level of the i-th skill-ranked agent is the average of the\n # i-th ranked samples throughout the batch.\n average_ranked_skills = sorted_clipped_skills.mean(axis=0)\n self._avg_ranked_skill = average_ranked_skills * bm.payment\n\n np.random.set_state(seed_state)\n\n # Fill in the starting location associated with each skill rank\n starting_ranked_locs = [\n # Worst group of agents goes in top right\n (0, self.world_size[1] - 1),\n # Second-worst group of agents goes in bottom left\n (self.world_size[0] - 1, 0),\n # Second-best group of agents goes in top left\n (0, 0),\n # Best group of agents goes in bottom right\n (self.world_size[1] - 1, self.world_size[1] - 1),\n ]\n self._ranked_locs = []\n\n # Based on skill, assign each agent to one of the location groups\n skill_groups = np.floor(\n np.arange(self.n_agents) * (4 / self.n_agents),\n ).astype(np.int)\n n_in_group = np.zeros(4, dtype=np.int)\n for g in skill_groups:\n # The position within the group is given by the number of agents\n # counted in the group thus far.\n g_pos = n_in_group[g]\n\n # Top right\n if g == 0:\n r = starting_ranked_locs[g][0] + (g_pos // 4)\n c = starting_ranked_locs[g][1] - (g_pos % 4)\n self._ranked_locs.append((r, c))\n\n # Bottom left\n elif g == 1:\n r = starting_ranked_locs[g][0] - (g_pos // 4)\n c = starting_ranked_locs[g][1] + (g_pos % 4)\n self._ranked_locs.append((r, c))\n\n # Top left\n elif g == 2:\n r = starting_ranked_locs[g][0] + (g_pos // 4)\n c = starting_ranked_locs[g][1] + (g_pos % 4)\n self._ranked_locs.append((r, c))\n\n # Bottom right\n elif g == 3:\n r = starting_ranked_locs[g][0] - (g_pos // 4)\n c = starting_ranked_locs[g][1] - (g_pos % 4)\n self._ranked_locs.append((r, c))\n\n else:\n raise ValueError\n\n # Count the agent we just placed.\n n_in_group[g] = n_in_group[g] + 1\n\n @property\n def energy_weight(self):\n \"\"\"\n Energy annealing progress. Multiply with self.energy_cost to get the\n effective energy coefficient.\n \"\"\"\n if self.energy_warmup_constant <= 0.0:\n return 1.0\n\n if self.energy_warmup_method == \"decay\":\n return float(1.0 - np.exp(-self._completions / self.energy_warmup_constant))\n\n if self.energy_warmup_method == \"auto\":\n return float(\n 1.0\n - np.exp(-self._auto_warmup_integrator / self.energy_warmup_constant)\n )\n\n raise NotImplementedError\n\n def get_current_optimization_metrics(self):\n \"\"\"\n Compute optimization metrics based on the current state. Used to compute reward.\n\n Returns:\n curr_optimization_metric (dict): A dictionary of {agent.idx: metric}\n with an entry for each agent (including the planner) in the env.\n \"\"\"\n curr_optimization_metric = {}\n # (for agents)\n for agent in self.world.agents:\n curr_optimization_metric[agent.idx] = rewards.isoelastic_coin_minus_labor(\n coin_endowment=agent.total_endowment(\"Coin\"),\n total_labor=agent.state[\"endogenous\"][\"Labor\"],\n isoelastic_eta=self.isoelastic_eta,\n labor_coefficient=self.energy_weight * self.energy_cost,\n )\n # (for the planner)\n if self.planner_reward_type == \"coin_eq_times_productivity\":\n curr_optimization_metric[\n self.world.planner.idx\n ] = rewards.coin_eq_times_productivity(\n coin_endowments=np.array(\n [agent.total_endowment(\"Coin\") for agent in self.world.agents]\n ),\n equality_weight=1 - self.mixing_weight_gini_vs_coin,\n )\n elif self.planner_reward_type == \"inv_income_weighted_coin_endowments\":\n curr_optimization_metric[\n self.world.planner.idx\n ] = rewards.inv_income_weighted_coin_endowments(\n coin_endowments=np.array(\n [agent.total_endowment(\"Coin\") for agent in self.world.agents]\n ),\n )\n elif self.planner_reward_type == \"inv_income_weighted_utility\":\n curr_optimization_metric[\n self.world.planner.idx\n ] = rewards.inv_income_weighted_utility(\n coin_endowments=np.array(\n [agent.total_endowment(\"Coin\") for agent in self.world.agents]\n ),\n utilities=np.array(\n [curr_optimization_metric[agent.idx] for agent in self.world.agents]\n ),\n )\n else:\n print(\"No valid planner reward selected!\")\n raise NotImplementedError\n return curr_optimization_metric\n\n # The following methods must be implemented for each scenario\n # -----------------------------------------------------------\n\n def reset_starting_layout(self):\n \"\"\"\n Part 1/2 of scenario reset. This method handles resetting the state of the\n environment managed by the scenario (i.e. resource & landmark layout).\n\n Here, reset to the layout in the fixed layout file\n \"\"\"\n self.world.maps.clear()\n for landmark, landmark_map in self._source_maps.items():\n self.world.maps.set(landmark, landmark_map)\n if landmark in [\"Stone\", \"Wood\"]:\n self.world.maps.set(landmark + \"SourceBlock\", landmark_map)\n\n def reset_agent_states(self):\n \"\"\"\n Part 2/2 of scenario reset. This method handles resetting the state of the\n agents themselves (i.e. inventory, locations, etc.).\n\n Here, empty inventories and place mobile agents in random, accessible\n locations to start. Note: If using fixed_four_skill_and_loc, the starting\n locations will be overridden in self.additional_reset_steps.\n \"\"\"\n self.world.clear_agent_locs()\n for agent in self.world.agents:\n agent.state[\"inventory\"] = {k: 0 for k in agent.inventory.keys()}\n agent.state[\"escrow\"] = {k: 0 for k in agent.inventory.keys()}\n agent.state[\"endogenous\"] = {k: 0 for k in agent.endogenous.keys()}\n # Add starting coin\n agent.state[\"inventory\"][\"Coin\"] = float(self.starting_agent_coin)\n\n self.world.planner.state[\"inventory\"] = {\n k: 0 for k in self.world.planner.inventory.keys()\n }\n self.world.planner.state[\"escrow\"] = {\n k: 0 for k in self.world.planner.escrow.keys()\n }\n\n for agent in self.world.agents:\n r = np.random.randint(0, self.world_size[0])\n c = np.random.randint(0, self.world_size[1])\n n_tries = 0\n while not self.world.can_agent_occupy(r, c, agent):\n r = np.random.randint(0, self.world_size[0])\n c = np.random.randint(0, self.world_size[1])\n n_tries += 1\n if n_tries > 200:\n raise TimeoutError\n r, c = self.world.set_agent_loc(agent, r, c)\n\n def scenario_step(self):\n \"\"\"\n Update the state of the world according to whatever rules this scenario\n implements.\n\n This gets called in the 'step' method (of base_env) after going through each\n component step and before generating observations, rewards, etc.\n\n In this class of scenarios, the scenario step handles stochastic resource\n regeneration.\n \"\"\"\n\n resources = [\"Wood\", \"Stone\"]\n\n for resource in resources:\n d = 1 + (2 * self.layout_specs[resource][\"regen_halfwidth\"])\n kernel = (\n self.layout_specs[resource][\"regen_weight\"] * np.ones((d, d)) / (d ** 2)\n )\n\n resource_map = self.world.maps.get(resource)\n resource_source_blocks = self.world.maps.get(resource + \"SourceBlock\")\n spawnable = (\n self.world.maps.empty + resource_map + resource_source_blocks\n ) > 0\n spawnable *= resource_source_blocks > 0\n\n health = np.maximum(resource_map, resource_source_blocks)\n respawn = np.random.rand(*health.shape) < signal.convolve2d(\n health, kernel, \"same\"\n )\n respawn *= spawnable\n\n self.world.maps.set(\n resource,\n np.minimum(\n resource_map + respawn, self.layout_specs[resource][\"max_health\"]\n ),\n )\n\n def generate_observations(self):\n \"\"\"\n Generate observations associated with this scenario.\n\n A scenario does not need to produce observations and can provide observations\n for only some agent types; however, for a given agent type, it should either\n always or never yield an observation. If it does yield an observation,\n that observation should always have the same structure/sizes!\n\n Returns:\n obs (dict): A dictionary of {agent.idx: agent_obs_dict}. In words,\n return a dictionary with an entry for each agent (which can including\n the planner) for which this scenario provides an observation. For each\n entry, the key specifies the index of the agent and the value contains\n its associated observation dictionary.\n\n Here, non-planner agents receive spatial observations (depending on the env\n config) as well as the contents of their inventory and endogenous quantities.\n The planner also receives spatial observations (again, depending on the env\n config) as well as the inventory of each of the mobile agents.\n \"\"\"\n obs = {}\n curr_map = self.world.maps.state\n\n owner_map = self.world.maps.owner_state\n loc_map = self.world.loc_map\n agent_idx_maps = np.concatenate([owner_map, loc_map[None, :, :]], axis=0)\n agent_idx_maps += 2\n agent_idx_maps[agent_idx_maps == 1] = 0\n\n agent_locs = {\n str(agent.idx): {\n \"loc-row\": agent.loc[0] / self.world_size[0],\n \"loc-col\": agent.loc[1] / self.world_size[1],\n }\n for agent in self.world.agents\n }\n agent_invs = {\n str(agent.idx): {\n \"inventory-\" + k: v * self.inv_scale for k, v in agent.inventory.items()\n }\n for agent in self.world.agents\n }\n\n obs[self.world.planner.idx] = {\n \"inventory-\" + k: v * self.inv_scale\n for k, v in self.world.planner.inventory.items()\n }\n if self._planner_gets_spatial_info:\n obs[self.world.planner.idx].update(\n dict(map=curr_map, idx_map=agent_idx_maps)\n )\n\n # Mobile agents see the full map. Convey location info via one-hot map channels.\n if self._full_observability:\n for agent in self.world.agents:\n my_map = np.array(agent_idx_maps)\n my_map[my_map == int(agent.idx) + 2] = 1\n sidx = str(agent.idx)\n obs[sidx] = {\n \"map\": curr_map,\n \"idx_map\": my_map,\n }\n obs[sidx].update(agent_invs[sidx])\n\n # Mobile agents only see within a window around their position\n else:\n w = (\n self._mobile_agent_observation_range\n ) # View halfwidth (only applicable without full observability)\n\n padded_map = np.pad(\n curr_map,\n [(0, 1), (w, w), (w, w)],\n mode=\"constant\",\n constant_values=[(0, 1), (0, 0), (0, 0)],\n )\n\n padded_idx = np.pad(\n agent_idx_maps,\n [(0, 0), (w, w), (w, w)],\n mode=\"constant\",\n constant_values=[(0, 0), (0, 0), (0, 0)],\n )\n\n for agent in self.world.agents:\n r, c = [c + w for c in agent.loc]\n visible_map = padded_map[\n :, (r - w) : (r + w + 1), (c - w) : (c + w + 1)\n ]\n visible_idx = np.array(\n padded_idx[:, (r - w) : (r + w + 1), (c - w) : (c + w + 1)]\n )\n\n visible_idx[visible_idx == int(agent.idx) + 2] = 1\n\n sidx = str(agent.idx)\n\n obs[sidx] = {\n \"map\": visible_map,\n \"idx_map\": visible_idx,\n }\n obs[sidx].update(agent_locs[sidx])\n obs[sidx].update(agent_invs[sidx])\n\n # Agent-wise planner info (gets crunched into the planner obs in the\n # base scenario code)\n obs[\"p\" + sidx] = agent_invs[sidx]\n if self._planner_gets_spatial_info:\n obs[\"p\" + sidx].update(agent_locs[sidx])\n\n return obs\n\n def compute_reward(self):\n \"\"\"\n Apply the reward function(s) associated with this scenario to get the rewards\n from this step.\n\n Returns:\n rew (dict): A dictionary of {agent.idx: agent_obs_dict}. In words,\n return a dictionary with an entry for each agent in the environment\n (including the planner). For each entry, the key specifies the index of\n the agent and the value contains the scalar reward earned this timestep.\n\n Rewards are computed as the marginal utility (agents) or marginal social\n welfare (planner) experienced on this timestep. Ignoring discounting,\n this means that agents' (planner's) objective is to maximize the utility\n (social welfare) associated with the terminal state of the episode.\n \"\"\"\n\n # \"curr_optimization_metric\" hasn't been updated yet, so it gives us the\n # utility from the last step.\n utility_at_end_of_last_time_step = deepcopy(self.curr_optimization_metric)\n\n # compute current objectives and store the values\n self.curr_optimization_metric = self.get_current_optimization_metrics()\n\n # reward = curr - prev objectives\n rew = {\n k: float(v - utility_at_end_of_last_time_step[k])\n for k, v in self.curr_optimization_metric.items()\n }\n\n # store the previous objective values\n self.prev_optimization_metric.update(utility_at_end_of_last_time_step)\n\n # Automatic Energy Cost Annealing\n # -------------------------------\n avg_agent_rew = np.mean([rew[a.idx] for a in self.world.agents])\n # Count the number of timesteps where the avg agent reward was > 0\n if avg_agent_rew > 0:\n self._auto_warmup_integrator += 1\n\n return rew\n\n # Optional methods for customization\n # ----------------------------------\n\n def additional_reset_steps(self):\n \"\"\"\n Extra scenario-specific steps that should be performed at the end of the reset\n cycle.\n\n For each reset cycle...\n First, reset_starting_layout() and reset_agent_states() will be called.\n\n Second, <component>.reset() will be called for each registered component.\n\n Lastly, this method will be called to allow for any final customization of\n the reset cycle.\n\n For this scenario, this method resets optimization metric trackers. If using\n fixed_four_skill_and_loc, this is where each agent gets assigned to one of\n the four fixed skill/loc combinations. The agent-->skill/loc assignment is\n permuted so that all four skill/loc combinations are used.\n \"\"\"\n if self.fixed_four_skill_and_loc:\n self.world.clear_agent_locs()\n for i, agent in enumerate(self.world.get_random_order_agents()):\n self.world.set_agent_loc(agent, *self._ranked_locs[i])\n agent.state[\"build_payment\"] = self._avg_ranked_skill[i]\n\n # compute current objectives\n curr_optimization_metric = self.get_current_optimization_metrics()\n\n self.curr_optimization_metric = deepcopy(curr_optimization_metric)\n self.init_optimization_metric = deepcopy(curr_optimization_metric)\n self.prev_optimization_metric = deepcopy(curr_optimization_metric)\n\n def scenario_metrics(self):\n \"\"\"\n Allows the scenario to generate metrics (collected along with component metrics\n in the 'metrics' property).\n\n To have the scenario add metrics, this function needs to return a dictionary of\n {metric_key: value} where 'value' is a scalar (no nesting or lists!)\n\n Here, summarize social metrics, endowments, utilities, and labor cost annealing.\n \"\"\"\n metrics = dict()\n\n coin_endowments = np.array(\n [agent.total_endowment(\"Coin\") for agent in self.world.agents]\n )\n metrics[\"social/productivity\"] = social_metrics.get_productivity(\n coin_endowments\n )\n metrics[\"social/equality\"] = social_metrics.get_equality(coin_endowments)\n\n utilities = np.array(\n [self.curr_optimization_metric[agent.idx] for agent in self.world.agents]\n )\n metrics[\n \"social_welfare/coin_eq_times_productivity\"\n ] = rewards.coin_eq_times_productivity(\n coin_endowments=coin_endowments, equality_weight=1.0\n )\n metrics[\n \"social_welfare/inv_income_weighted_coin_endow\"\n ] = rewards.inv_income_weighted_coin_endowments(coin_endowments=coin_endowments)\n metrics[\n \"social_welfare/inv_income_weighted_utility\"\n ] = rewards.inv_income_weighted_utility(\n coin_endowments=coin_endowments, utilities=utilities\n )\n\n for agent in self.all_agents:\n for resource, quantity in agent.inventory.items():\n metrics[\n \"endow/{}/{}\".format(agent.idx, resource)\n ] = agent.total_endowment(resource)\n\n if agent.endogenous is not None:\n for resource, quantity in agent.endogenous.items():\n metrics[\"endogenous/{}/{}\".format(agent.idx, resource)] = quantity\n\n metrics[\"util/{}\".format(agent.idx)] = self.curr_optimization_metric[\n agent.idx\n ]\n\n # Labor weight\n metrics[\"labor/weighted_cost\"] = self.energy_cost * self.energy_weight\n metrics[\"labor/warmup_integrator\"] = int(self._auto_warmup_integrator)\n\n return metrics\n\n\n@scenario_registry.add\nclass SplitLayout(LayoutFromFile):\n \"\"\"\n Extends layout_from_file/simple_wood_and_stone to impose a row of water midway\n through the map, uses a fixed set of pareto-distributed building skills (requires a\n Build component), and places agents in the top/bottom depending on skill rank.\n\n Args:\n water_row (int): Row of the map where the water barrier is placed. Defaults\n to half the world height.\n skill_rank_of_top_agents (int, float, tuple, list): Index/indices specifying\n which agent(s) to place in the top of the map. Indices refer to the skill\n ranking, with 0 referring to the highest-skilled agent. Defaults to only\n the highest-skilled agent in the top.\n planner_gets_spatial_obs (bool): Whether the planner agent receives spatial\n observations from the world.\n full_observability (bool): Whether the mobile agents' spatial observation\n includes the full world view or is instead an egocentric view.\n mobile_agent_observation_range (int): If not using full_observability,\n the spatial range (on each side of the agent) that is visible in the\n spatial observations.\n env_layout_file (str): Name of the layout file in ./map_txt/ to use.\n Note: The world dimensions of that layout must match the world dimensions\n argument used to construct the environment.\n resource_regen_prob (float): Probability that an empty source tile will\n regenerate a new resource unit.\n starting_agent_coin (int, float): Amount of coin agents have at t=0. Defaults\n to zero coin.\n isoelastic_eta (float): Parameter controlling the shape of agent utility\n wrt coin endowment.\n energy_cost (float): Coefficient for converting labor to negative utility.\n energy_warmup_constant (float): Decay constant that controls the rate at which\n the effective energy cost is annealed from 0 to energy_cost. Set to 0\n (default) to disable annealing, meaning that the effective energy cost is\n always energy_cost. The units of the decay constant depend on the choice of\n energy_warmup_method.\n energy_warmup_method (str): How to schedule energy annealing (warmup). If\n \"decay\" (default), use the number of completed episodes. If \"auto\",\n use the number of timesteps where the average agent reward was positive.\n planner_reward_type (str): The type of reward used for the planner. Options\n are \"coin_eq_times_productivity\" (default),\n \"inv_income_weighted_coin_endowment\", and \"inv_income_weighted_utility\".\n mixing_weight_gini_vs_coin (float): Degree to which equality is ignored w/\n \"coin_eq_times_productivity\". Default is 0, which weights equality and\n productivity equally. If set to 1, only productivity is rewarded.\n \"\"\"\n\n name = \"split_layout/simple_wood_and_stone\"\n\n def __init__(\n self,\n *args,\n water_row=None,\n skill_rank_of_top_agents=None,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n\n if self.fixed_four_skill_and_loc:\n raise ValueError(\n \"The split layout scenario does not support \"\n \"fixed_four_skill_and_loc. Set this to False.\"\n )\n\n # Augment the fixed layout to include a row of water through the middle\n if water_row is None:\n self._water_line = self.world_size[0] // 2\n else:\n self._water_line = int(water_row)\n assert 0 < self._water_line < self.world_size[0] - 1\n for landmark, landmark_map in self._source_maps.items():\n landmark_map[self._water_line, :] = 1 if landmark == \"Water\" else 0\n self._source_maps[landmark] = landmark_map\n\n # Controls logic for which agents (by skill rank) get placed on the top\n if skill_rank_of_top_agents is None:\n skill_rank_of_top_agents = [0]\n\n if isinstance(skill_rank_of_top_agents, (int, float)):\n self.skill_rank_of_top_agents = [int(skill_rank_of_top_agents)]\n elif isinstance(skill_rank_of_top_agents, (tuple, list)):\n self.skill_rank_of_top_agents = list(set(skill_rank_of_top_agents))\n else:\n raise TypeError(\n \"skill_rank_of_top_agents must be a scalar \"\n \"index, or a list of scalar indices.\"\n )\n for rank in self.skill_rank_of_top_agents:\n assert 0 <= rank < self.n_agents\n assert 0 < len(self.skill_rank_of_top_agents) < self.n_agents\n\n # Set the skill associated with each skill rank\n bm = self.get_component(\"Build\")\n assert bm.skill_dist == \"pareto\"\n pmsm = bm.payment_max_skill_multiplier\n # Generate a batch (100000) of num_agents (sorted/clipped) Pareto samples.\n pareto_samples = np.random.pareto(4, size=(100000, self.n_agents))\n clipped_skills = np.minimum(pmsm, (pmsm - 1) * pareto_samples + 1)\n sorted_clipped_skills = np.sort(clipped_skills, axis=1)\n # The skill level of the i-th skill-ranked agent is the average of the\n # i-th ranked samples throughout the batch.\n average_ranked_skills = sorted_clipped_skills.mean(axis=0)\n self._avg_ranked_skill = average_ranked_skills * bm.payment\n # Reverse the order so index 0 is the highest-skilled\n self._avg_ranked_skill = self._avg_ranked_skill[::-1]\n\n def additional_reset_steps(self):\n \"\"\"\n Extra scenario-specific steps that should be performed at the end of the reset\n cycle.\n\n For each reset cycle...\n First, reset_starting_layout() and reset_agent_states() will be called.\n\n Second, <component>.reset() will be called for each registered component.\n\n Lastly, this method will be called to allow for any final customization of\n the reset cycle.\n\n For this scenario, this method resets optimization metric trackers. This is\n where each agent gets assigned to one of the skills and the starting\n locations are reset according to self.skill_rank_of_top_agents.\n \"\"\"\n self.world.clear_agent_locs()\n for i, agent in enumerate(self.world.get_random_order_agents()):\n agent.state[\"build_payment\"] = self._avg_ranked_skill[i]\n if i in self.skill_rank_of_top_agents:\n r_min, r_max = 0, self._water_line\n else:\n r_min, r_max = self._water_line + 1, self.world_size[0]\n\n r = np.random.randint(r_min, r_max)\n c = np.random.randint(0, self.world_size[1])\n n_tries = 0\n while not self.world.can_agent_occupy(r, c, agent):\n r = np.random.randint(r_min, r_max)\n c = np.random.randint(0, self.world_size[1])\n n_tries += 1\n if n_tries > 200:\n raise TimeoutError\n self.world.set_agent_loc(agent, r, c)\n\n # compute current objectives\n curr_optimization_metric = self.get_current_optimization_metrics()\n\n self.curr_optimization_metric = deepcopy(curr_optimization_metric)\n self.init_optimization_metric = deepcopy(curr_optimization_metric)\n self.prev_optimization_metric = deepcopy(curr_optimization_metric)\n"
] |
[
[
"numpy.maximum",
"numpy.ones_like",
"numpy.abs",
"numpy.min",
"numpy.arange",
"numpy.ones",
"numpy.logical_or",
"numpy.random.permutation",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
],
[
"numpy.maximum",
"numpy.minimum",
"numpy.pad",
"numpy.clip",
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.mean",
"numpy.zeros_like",
"numpy.array",
"numpy.exp",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
],
[
"numpy.random.get_state",
"numpy.random.pareto",
"numpy.minimum",
"numpy.maximum",
"numpy.random.seed",
"numpy.pad",
"numpy.arange",
"numpy.sort",
"scipy.signal.convolve2d",
"numpy.concatenate",
"numpy.ones",
"numpy.random.set_state",
"numpy.mean",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
itsalexis962/pycroscopy
|
[
"8a6557408ffdc332cef102616be16e26a396532f"
] |
[
"tests/learn/dl/test_trainer.py"
] |
[
"import sys\nimport pytest\nimport numpy as np\nimport torch\nfrom numpy.testing import assert_\n\nsys.path.append(\"../../../\")\n\nfrom pycroscopy.learn import Trainer, models\n\n\ndef assert_weights_equal(m1, m2):\n eq_w = []\n for p1, p2 in zip(m1.values(), m2.values()):\n eq_w.append(np.array_equal(\n p1.detach().cpu().numpy(),\n p2.detach().cpu().numpy()))\n return all(eq_w)\n\n\[email protected](\"dim, size\", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])\ndef test_trainer(dim, size):\n # Initialize a model\n in_dim = (1, *size)\n model = models.AutoEncoder(\n in_dim, layers_per_block=[1, 1], nfilters=2)\n weights_before = model.state_dict()\n # Create dummy train set\n X_train = torch.randn(5, *in_dim)\n # Initialize trainer\n t = Trainer(model, X_train, X_train, batch_size=2)\n # train and compare model params before and after\n t.fit(num_epochs=2)\n weights_after = model.state_dict()\n assert_(not assert_weights_equal(weights_before, weights_after))\n\n\[email protected](\"dim, size\", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])\ndef test_trainer_determenism(dim, size):\n in_dim = (1, *size)\n # Create dummy train set\n torch.manual_seed(0)\n X_train = torch.randn(5, *in_dim)\n # Initialize a model\n model1 = models.AutoEncoder(\n in_dim, layers_per_block=[1, 1], nfilters=2,\n upsampling_mode=\"nearest\")\n # Initialize trainer\n t = Trainer(model1, X_train, X_train, batch_size=2)\n # train\n t.fit(num_epochs=4)\n # Reininitiaize model and train again\n torch.manual_seed(0)\n X_train = torch.randn(5, *in_dim)\n model2 = models.AutoEncoder(\n in_dim, layers_per_block=[1, 1], nfilters=2,\n upsampling_mode=\"nearest\")\n t = Trainer(model2, X_train, X_train, batch_size=2)\n t.fit(num_epochs=4)\n assert_(assert_weights_equal(model1.state_dict(), model2.state_dict()))\n"
] |
[
[
"torch.randn",
"torch.manual_seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anntzer/mplcairo-asv
|
[
"7d03686411600e808e95875cb78deda9b8e8d4a7"
] |
[
"benchmarks/benchmarks.py"
] |
[
"import matplotlib as mpl\nfrom matplotlib.figure import Figure\nimport numpy as np\n\nfrom mplcairo import antialias_t\nfrom mplcairo.base import FigureCanvasCairo\n\n\nmpl.rcdefaults()\n\n\ndef get_axes():\n axes = Figure().subplots()\n axes.set(xticks=[], yticks=[])\n for spine in axes.spines.values():\n spine.set_visible(False)\n return axes\n\n\ndef get_sample_vectors():\n return np.random.RandomState(0).random_sample((2, 10000))\n\n\ndef get_sample_image():\n return np.random.RandomState(0).random_sample((100, 100))\n\n\nclass _TimeBase:\n def setup(self):\n self.axes = get_axes()\n self.axes.figure.canvas = FigureCanvasCairo(self.axes.figure)\n\n def time_draw(self, *args):\n self.axes.figure.canvas.draw()\n\n def teardown(self, *args):\n mpl.rcdefaults()\n\n\nclass TimeAxes(_TimeBase):\n pass\n\n\nclass TimeLine(_TimeBase):\n param_names = (\"antialiased\", \"joinstyle\")\n params = (list(antialias_t.__members__.values()),\n [\"miter\", \"round\", \"bevel\"])\n\n def setup(self, antialiased, joinstyle):\n mpl.rcParams[\"agg.path.chunksize\"] = 0\n super().setup()\n self.axes.plot(*get_sample_vectors(),\n antialiased=antialiased, solid_joinstyle=joinstyle)\n\n\n# For the marker tests, try both square and round markers, as we have a special\n# code path for circles which may not be representative of general performance.\n\n\nclass TimeMarkers(_TimeBase):\n param_names = (\"threshold\", \"marker\")\n params = ([1 / 8, 0],\n [\"o\", \"s\"])\n\n def setup(self, threshold, marker):\n mpl.rcParams[\"path.simplify_threshold\"] = threshold\n super().setup()\n self.axes.plot(*get_sample_vectors(), marker=marker)\n\n\nclass TimeScatterMulticolor(_TimeBase):\n param_names = (\"thresold\", \"marker\")\n params = ([1 / 8, 0],\n [\"o\", \"s\"])\n\n def setup(self, threshold, marker):\n mpl.rcParams[\"path.simplify_threshold\"] = threshold\n super().setup()\n a, b = get_sample_vectors()\n self.axes.scatter(a, a, c=b, marker=marker)\n\n\nclass TimeScatterMultisize(_TimeBase):\n param_names = (\"thresold\", \"marker\")\n params = ([1 / 8, 0],\n [\"o\", \"s\"])\n\n def setup(self, threshold, marker):\n mpl.rcParams[\"path.simplify_threshold\"] = threshold\n super().setup()\n a, b = get_sample_vectors()\n self.axes.scatter(a, a, s=100 * b ** 2, marker=marker)\n\n\nclass TimeImage(_TimeBase):\n def setup(self):\n super().setup()\n self.axes.imshow(get_sample_image())\n"
] |
[
[
"matplotlib.rcdefaults",
"numpy.random.RandomState",
"matplotlib.figure.Figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
noashin/Ising_model_gibbs_sampler
|
[
"f285bf24aa4157c3f2d7d6c0ce3d93d058953c42"
] |
[
"sampler.py"
] |
[
"import time\nimport random\n\nimport numpy as np\nimport pypolyagamma as pypolyagamma\n\n\ndef calculate_C_w(S, w_i):\n w_mat = np.diag(w_i)\n\n return np.dot(S.T, np.dot(w_mat, S))\n\n\ndef sample_w_i(S, J_i):\n \"\"\"\n\n :param S: observation matrix\n :param J_i: neuron i's couplings\n :return: samples for w_i from a polyagamma distribution\n \"\"\"\n nthreads = pypolyagamma.get_omp_num_threads()\n seeds = np.random.randint(2 ** 16, size=nthreads)\n ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds]\n\n T = S.shape[0]\n A = np.ones(T)\n w_i = np.zeros(T)\n # print 'will sample w'\n # print nthreads\n # ppg.pgdrawv(A, 2. * np.dot(S, J_i), w_i)\n pypolyagamma.pgdrawvpar(ppgs, A, np.dot(S, J_i), w_i)\n # print 'sampled w'\n return w_i\n\n\ndef sample_J_i(S, C, D_i, w_i, gamma_i, sigma_J):\n N = S.shape[1]\n J_i = np.zeros(N)\n\n included_ind = list(np.where(gamma_i > 0)[0])\n\n if len(included_ind) == 0:\n return J_i\n\n cov_mat = (1. / sigma_J) * np.identity(N)\n\n C_gamma = C[:, included_ind][included_ind, :]\n cov_mat_gamma = cov_mat[included_ind, :][:, included_ind]\n D_i_gamma = D_i[included_ind]\n\n cov = np.linalg.inv(C_gamma + cov_mat_gamma)\n mean = np.dot(cov, D_i_gamma)\n\n J_i_gamma = np.random.multivariate_normal(mean, cov)\n\n J_i[included_ind] = J_i_gamma\n\n return J_i\n\n\ndef calc_block_dets(C_gamma, j_rel, sigma_J, num_active):\n cov_mat = (1. / sigma_J) * np.identity(num_active)\n mat = cov_mat + C_gamma\n\n A = mat[:j_rel, :j_rel]\n\n B_1 = mat[:j_rel, j_rel:]\n C_1 = mat[j_rel:, :j_rel]\n D_1 = mat[j_rel:, j_rel:]\n\n B_0 = mat[:j_rel, j_rel + 1:]\n C_0 = mat[j_rel + 1:, :j_rel]\n D_0 = mat[j_rel + 1:, j_rel + 1:]\n\n det_cov_1 = float(num_active) * sigma_J\n det_cov_0 = float(num_active - 1) * sigma_J\n\n # import ipdb;ipdb.set_trace()\n # If the matrix is small don't bother to split\n if mat.shape[0] < 5.:\n pre_factor_1 = (det_cov_1 / np.linalg.det(mat))\n pre_factor_0 = (det_cov_0 / np.linalg.det(np.delete(np.delete(mat, j_rel, 0), j_rel, 1)))\n\n elif j_rel == 0:\n pre_factor_0 = (det_cov_0 / np.linalg.det(D_0))\n pre_factor_1 = (det_cov_1 / np.linalg.det(mat))\n elif j_rel == num_active - 1:\n pre_factor_0 = (det_cov_0 / np.linalg.det(A))\n pre_factor_1 = (det_cov_1 / np.linalg.det(mat))\n else:\n det_A = np.linalg.det(A)\n A_inv = np.linalg.inv(A)\n pre_factor_0 = det_cov_0 / (det_A * np.linalg.det(D_0 - np.dot(C_0, np.dot(A_inv, B_0))))\n pre_factor_1 = det_cov_1 / (det_A * np.linalg.det(D_1 - np.dot(C_1, np.dot(A_inv, B_1))))\n\n return np.sqrt(pre_factor_0), np.sqrt(pre_factor_1)\n\n\ndef calc_gamma_prob(sigma_J, C_gamma, D_i_gamma, ro, j_rel):\n # import ipdb; ipdb.set_trace()\n num_active = D_i_gamma.shape[0] # How manny gammas are equal to 1\n cov_mat = 1. / sigma_J * np.identity(num_active)\n mat = cov_mat + C_gamma\n mat_inv = np.linalg.inv(mat)\n\n mat_0_inv = np.linalg.inv(np.delete(np.delete(mat, j_rel, 0), j_rel, 1))\n D_i_gamma_0 = np.delete(D_i_gamma, j_rel)\n\n # calculate determinant with and without j in block form\n prefactor_0, prefactor_1 = calc_block_dets(C_gamma, j_rel, sigma_J, num_active)\n # prefactor_1 = np.sqrt(np.linalg.det(mat_inv) * np.linalg.det(cov_mat))\n # prefactor_0 = np.sqrt(np.linalg.det(mat_0_inv) * np.linalg.det(np.delete(np.delete(cov_mat, j_rel, 0), j_rel, 1)))\n\n sq_1 = 0.5 * np.dot(D_i_gamma.T, np.dot(mat_inv, D_i_gamma))\n sq_0 = 0.5 * np.dot(D_i_gamma_0.T, np.dot(mat_0_inv, D_i_gamma_0))\n\n new_ro = 1. / (1. + np.exp(sq_0 - sq_1 + np.log(1. - ro) - np.log(ro) +\n np.log(prefactor_0) - np.log(prefactor_1)))\n\n return new_ro\n\n\ndef sample_gamma_i(gamma_i, D_i, C, ro, sigmma_J):\n N = C.shape[0]\n\n for j in range(N):\n # import ipdb; ipdb.set_trace()\n gamma_i[j] = 1.\n active_indices = np.where(gamma_i > 0)[0]\n\n # Don't allow a network with 0 connections\n if len(active_indices) == 1.:\n continue\n\n j_rel = j - np.where(gamma_i[:j] == 0)[0].shape[0]\n D_i_gamma = D_i[active_indices]\n C_gamma = C[:, active_indices][active_indices, :]\n\n new_ro = calc_gamma_prob(sigmma_J, C_gamma, D_i_gamma, ro, j_rel)\n # import ipdb; ipdb.set_trace()\n # try:\n gamma_i[j] = np.random.binomial(1, new_ro, 1)\n # except ValueError:\n # import ipdb;\n # ipdb.set_trace()\n\n return gamma_i\n\n\ndef sample_neuron(samp_num, burnin, sigma_J, S, D_i, ro, thin=0, save_all=True):\n \"\"\" This function uses the Gibbs sampler to sample from w, gamma and J\n\n :param samp_num: Number of samples to be drawn\n :param burnin: Number of samples to burn in\n :param sigma_J: variance of the J slab\n :param S: Neurons' activity matrix. Including S0. (T + 1) x N\n :param C: observation correlation matrix. N x N\n :param D_i: time delay correlations of neuron i. N\n :return: samp_num samples (each one of length K (time_steps)) from the posterior distribution for w,x,z.\n \"\"\"\n\n # random.seed(seed)\n\n T, N = S.shape\n\n # actual number of samples needed with thining and burin-in\n if (thin != 0):\n N_s = samp_num * thin + burnin\n else:\n N_s = samp_num + burnin\n\n samples_w_i = np.zeros((N_s, T), dtype=np.float32)\n samples_J_i = np.zeros((N_s, N), dtype=np.float32)\n samples_gamma_i = np.zeros((N_s, N), dtype=np.float32)\n\n # gamma_i = np.random.binomial(1, ro, N)\n gamma_i = np.ones(N)\n J_i = np.multiply(gamma_i, np.random.normal(0, sigma_J, N))\n\n for i in xrange(N_s):\n # import ipdb; ipdb.set_trace()\n w_i = sample_w_i(S, J_i)\n C_w_i = calculate_C_w(S, w_i)\n gamma_i = sample_gamma_i(gamma_i, D_i, C_w_i, ro, sigma_J)\n J_i = sample_J_i(S, C_w_i, D_i, w_i, gamma_i, sigma_J)\n\n samples_w_i[i, :] = w_i\n samples_J_i[i, :] = J_i\n samples_gamma_i[i, :] = gamma_i\n\n if thin == 0:\n return samples_w_i[burnin:, :], samples_J_i[burnin:, :], samples_gamma_i[burnin:, :]\n else:\n return samples_w_i[burnin:N_s:thin, :], samples_J_i[burnin:N_s:thin, :], \\\n samples_gamma_i[burnin:N_s:thin, :]\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.random.multivariate_normal",
"numpy.ones",
"numpy.linalg.det",
"numpy.delete",
"numpy.random.normal",
"numpy.identity",
"numpy.random.binomial",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amohant4/RSA_TF
|
[
"5b6705e167ca4f7039e900f9921b90087c9542fd"
] |
[
"lib/networks/leNet_sram.py"
] |
[
"# file: leNet_sram.py\n# Author\t: Abinash Mohanty\n# Date\t\t: 06/22/2017\n# Project\t: RRAM training NN\n\n# This class implements leNet. \n# It is a convolution neural network with 2 conv and 3 fc layers. \n# For more details:\n\nimport tensorflow as tf\nfrom rram_NN.config import cfg\nfrom networks.network import Network\n\nn_classes = 10\n\nclass leNet_sram(Network):\n\tdef __init__(self, trainable=True):\n\t\tself.inputs = []\n\t\tself.x = tf.placeholder(tf.float32, shape=[None, 784]) \n\t\tself.y_ = tf.placeholder(tf.float32, shape=[None, 10])\n\t\tself.keep_prob = tf.placeholder(tf.float32)\n\t\tself.layers = dict({'x': self.x, 'y_': self.y_, 'keep_prob':self.keep_prob})\n\t\tself.trainable = trainable\n\t\tself.name = \"leNet\"\n\t\tself.setup()\n\t\tself._accuracy = None\n\t\tself._global_step = None\n\t\tself._lr = None\n\t\tself._gradients = None\n\t\tself._optimizer = None\n\n\tdef setup(self):\n\t\t(self.feed('x')\n\t\t\t .reshape_leNet(name='reshape_to_image')\n\t\t\t .pad_leNet(name='pad_2_leNet_compatible')\n\t\t\t .conv(5, 5, 6, 1 ,1, name='conv_1', relu=True, trainable=False))\n\t\t(self.feed('pad_2_leNet_compatible')\n\t\t\t .conv(5, 5, 6, 1 ,1, name='conv_1_sram', relu=True, trainable=True))\n\t\t(self.feed('conv_1','conv_1_sram')\n\t\t\t .sum_fc_ops(name='conv1_op')\n\t\t\t .max_pool(2, 2, 2 ,2, name='max_pool_1')\n\t\t\t .conv(5, 5, 16, 1, 1, name='conv_2', relu=True, trainable=False))\n\t\t(self.feed('max_pool_1')\n\t\t\t .conv(5, 5, 16, 1, 1, name='conv_2_sram', relu=True, trainable=True))\n\t\t(self.feed('conv_2', 'conv_2_sram')\n\t\t\t .sum_fc_ops(name='conv2_op')\n\t\t\t .max_pool(2, 2, 2, 2, name='max_pool_2') \n\t\t\t .fc(120, name='fc_1', relu=True, trainable=False))\n\t\t(self.feed('max_pool_2')\n\t\t\t .fc(120, name='fc_1_sram', relu=True, trainable=True))\n\t\t(self.feed('fc_1', 'fc_1_sram')\n\t\t\t .sum_fc_ops(name='fc1_op')\n\t\t\t .fc(84, name='fc_2', relu=True, trainable=False))\n\t\t(self.feed('fc1_op')\n\t\t\t .fc(84, name='fc_2_sram', relu=True, trainable=True))\n\t\t(self.feed('fc_2','fc_2_sram')\n\t\t\t .sum_fc_ops(name='fc2_op')\n\t\t\t .fc(n_classes, name='class_pred', relu=False, trainable=False))\n\t\t(self.feed('fc2_op')\n\t\t\t .fc(n_classes, name='fc_3_sram', relu=False, trainable=True))\n\t\t(self.feed('class_pred','fc_3_sram')\n\t\t\t .sum_fc_ops(name='class_pred_sram')\t\n\t\t)\n\n\t@property\n\tdef accuracy(self):\n\t\t\"\"\"\n\t\tComputes accuracy of the network\n\t\t\"\"\"\t\n\t\tif self._accuracy is None:\n\t\t\ty = self.get_output('class_pred_sram')\n\t\t\ty_ = self.get_output('y_')\t\t\n\t\t\tcorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n\t\t\tself._accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\t\t\tif cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:\n\t\t\t\ttf.summary.scalar('accuracy', self._accuracy)\n\t\treturn self._accuracy\t\n\n\t@property\n\tdef global_step(self):\n\t\t\"\"\"\n\t\tFunction to ensure that the global_step is not created\n\t\tmany times during experiments.\n\t\t\"\"\"\n\t\tif self._global_step is None:\n\t\t\t self._global_step = tf.Variable(0, trainable=False, name='global_step')\n\t\treturn self._global_step\n\n\t@property\n\tdef optimizer(self):\n\t\t\"\"\"\n\t\tOptimizer used to minimize error.\t\n\t\t\"\"\"\t\t\n\t\tif self._optimizer is None:\n\t\t\tlr = tf.train.exponential_decay(cfg.TRAIN.LEARNING_RATE, self.global_step, \n\t\t\t\t\t\tcfg.TRAIN.DECAY_STEPS, cfg.TRAIN.DECAY_RATE, staircase=True, name='lr')\n\t\t\tif cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:\t\t\t\n\t\t\t\ttf.summary.scalar('lr', lr)\t\t\t\n\t\t\tself._optimizer = tf.train.GradientDescentOptimizer(lr)\n\t\treturn self._optimizer\n\n\t@property\n\tdef gradients(self):\n\t\t\"\"\"\n\t\tComputes gradients !\n\t\t\"\"\"\n\t\tif self._gradients is None:\n\t\t\ty = self.get_output('class_pred_sram')\n\t\t\ty_ = self.get_output('y_')\n\t\t\tcross_entropy = tf.reduce_mean(\n\t\t\t\ttf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))\n\t\t\tif cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:\t\n\t\t\t\ttf.summary.scalar('cross_entropy', cross_entropy)\t\n\t\t\tvars = tf.trainable_variables()\n\t\t\tself._gradients = tf.gradients(cross_entropy, vars)\t\t\t\n\t\treturn self._gradients\n"
] |
[
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.Variable",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.placeholder",
"tensorflow.train.exponential_decay",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.summary.scalar"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ypix/fight-churn-nb
|
[
"fcaaf17a30d69851be42a54c0e68ca4444fb97e1"
] |
[
"churnmodels/helpers.py"
] |
[
"import os\nimport sys\nimport pandas as pd\n\n# alternative to the interval functionality in postgres is to create a tempory table that can be joined to\nfrom datetime import timedelta, datetime\n\nimport sqlparse\n\n\ndef make_day_interval(d_start_date, d_end_date, periods, freq_str):\n # we let pandas do find the starting date which is\n # new-start-date = start-date - (periods * frequency)\n seq=pd.date_range(d_start_date, periods=periods+1, freq=f\"-{freq_str}\")\n new_start_date=seq[-1]\n end_dates=pd.date_range(d_start_date, d_end_date, freq=freq_str)\n start_dates=pd.date_range(new_start_date, periods=len(end_dates), freq=freq_str)\n df=pd.DataFrame({\"start_date\":start_dates,\"end_date\":end_dates})\n df.index.rename(\"id\")\n return df\n\n\n\ndef required_envvar(envvar, errtext):\n \"\"\"\n return the environment variable envvar.\n If not given print error text and exit.\n :param envvar:\n :type envvar:\n :param errtext:\n :type errtext:\n :return:\n :rtype:\n \"\"\"\n if envvar not in os.environ:\n print(errtext)\n exit()\n return os.getenv(envvar)\n\n\ndef progressBar_old(iterable, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n total = len(iterable)\n\n # Progress Bar Printing Function\n def printProgressBar(iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)\n\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()\n\n\ndef progressBar(iteration, total, prefix='', suffix='', decimals=1, bar_length=100, fill='█', head=\">\", printEnd=\"\\r\"):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n if iteration == total:\n head = fill\n bar = fill * filled_length + head + '-' * (bar_length - filled_length)\n\n sys.stdout.write('%s%s |%s| %s%s %s' % (printEnd, prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef days_between(d1, d2):\n d1 = datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.strptime(d2, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\ndef pretty_sql(engine, q1):\n # debug: looking at the SQL pretty printed\n text1=str(q1.statement.compile(engine, compile_kwargs={\"literal_binds\": True}))\n text2=sqlparse.format(text1, reindent=True, keyword_case='upper')\n return text2\n #print(text2)\n"
] |
[
[
"pandas.DataFrame",
"pandas.date_range"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
heraclex12/CLMR
|
[
"9616022ea9a6e0ccfec97dccb4c76e76d2e7d92a"
] |
[
"clmr/models/sample_cnn_xl.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom .model import Model\n\n\nclass SampleCNNXL(Model):\n def __init__(self, strides, supervised, out_dim):\n super(SampleCNN, self).__init__()\n\n self.strides = strides\n self.supervised = supervised\n self.sequential = [\n nn.Sequential(\n nn.Conv1d(1, 128, kernel_size=3, stride=3, padding=0),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n )\n ]\n\n self.hidden = [\n [128, 128],\n [128, 128],\n [128, 256],\n [256, 256],\n [256, 512],\n [512, 512],\n [512, 1024],\n [1024, 1024],\n [1024, 2048],\n ]\n\n assert len(self.hidden) == len(\n self.strides\n ), \"Number of hidden layers and strides are not equal\"\n for stride, (h_in, h_out) in zip(self.strides, self.hidden):\n self.sequential.append(\n nn.Sequential(\n nn.Conv1d(h_in, h_out, kernel_size=stride, stride=1, padding=1),\n nn.BatchNorm1d(h_out),\n nn.ReLU(),\n nn.MaxPool1d(stride, stride=stride),\n )\n )\n\n # 1 x 512\n self.sequential.append(\n nn.Sequential(\n nn.Conv1d(2048, 2048, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm1d(2048),\n nn.ReLU(),\n )\n )\n\n self.sequential = nn.Sequential(*self.sequential)\n\n if self.supervised:\n self.dropout = nn.Dropout(0.5)\n self.fc = nn.Linear(2048, out_dim)\n\n def forward(self, x):\n out = self.sequential(x)\n if self.supervised:\n out = self.dropout(out)\n\n out = out.reshape(x.shape[0], out.size(1) * out.size(2))\n logit = self.fc(out)\n return logit\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.MaxPool1d",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
knutss/PyWake
|
[
"6576bd8079d033cc4a1e7162cf8a803f3312fb57",
"10812cd962a8bcfa0d80d516fbc000f5a20d9863",
"6576bd8079d033cc4a1e7162cf8a803f3312fb57"
] |
[
"py_wake/tests/check_speed.py",
"py_wake/tests/test_ground_models/test_mirror.py",
"py_wake/tests/test_sites/test_distances.py"
] |
[
"from datetime import datetime\nimport functools\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom py_wake import NOJ\nfrom py_wake.deficit_models import fuga\nfrom py_wake.deficit_models.gaussian import IEA37SimpleBastankhahGaussian\nfrom py_wake.examples.data.hornsrev1 import wt_x, wt_y, HornsrevV80, Hornsrev1Site\nfrom py_wake.tests import npt\nfrom py_wake.tests.test_files import tfp\nfrom pandas.plotting import register_matplotlib_converters\nimport sys\nregister_matplotlib_converters()\n\n\ndef timeit(func, min_time=0, min_runs=1, verbose=False, line_profile=False, profile_funcs=[]):\n @functools.wraps(func)\n def newfunc(*args, **kwargs):\n if line_profile and getattr(sys, 'gettrace')() is None:\n from line_profiler import LineProfiler\n lp = LineProfiler()\n lp.timer_unit = 1e-6\n for f in profile_funcs:\n lp.add_function(f)\n lp_wrapper = lp(func)\n t = time.time()\n res = lp_wrapper(*args, **kwargs)\n t = time.time() - t\n if verbose:\n lp.print_stats()\n return res, [t]\n else:\n t_lst = []\n for i in range(100000):\n startTime = time.time()\n res = func(*args, **kwargs)\n t_lst.append(time.time() - startTime)\n if sum(t_lst) > min_time and len(t_lst) >= min_runs:\n if hasattr(func, '__name__'):\n fn = func.__name__\n else:\n fn = \"Function\"\n if verbose:\n print('%s: %f +/-%f (%d runs)' % (fn, np.mean(t_lst), np.std(t_lst), i + 1))\n return res, t_lst\n return newfunc\n\n\ndef check_speed_Hornsrev(WFModel):\n assert getattr(sys, 'gettrace')() is None, \"Skipping speed check, In debug mode!!!\"\n wt = HornsrevV80()\n site = Hornsrev1Site()\n wf_model = WFModel(site, wt)\n aep, t_lst = timeit(lambda x, y: wf_model(x, y).aep().sum(), min_runs=3)(wt_x, wt_y)\n\n fn = tfp + \"speed_check/%s.txt\" % WFModel.__name__\n if os.path.isfile(fn):\n with open(fn) as fid:\n lines = fid.readlines()\n\n # check aep\n npt.assert_almost_equal(float(lines[-1].split(\";\")[1]), aep)\n\n timings = np.array([(np.mean(eval(l.split(\";\")[2])), np.std(eval(l.split(\";\")[2]))) for l in lines])\n dates = [np.datetime64(l.split(\";\")[0]) for l in lines]\n dates = np.r_[dates, datetime.now()]\n y = np.r_[timings[:, 0], np.mean(t_lst)]\n\n error = np.r_[timings[:, 1], np.std(t_lst)]\n fig, axes = plt.subplots(2, 1)\n fig.suptitle(WFModel.__name__)\n for x, ax in zip([dates, np.arange(len(dates))], axes):\n ax.fill_between(x, y - 2 * error, y + 2 * error)\n ax.plot(x, y, '.-k')\n ax.axhline(y[:-1].mean() + 2 * error[:-1].mean(), ls='--', color='gray')\n\n if y[-1] > (y[:-1].mean() + 2 * error[:-1].mean()):\n raise Exception(\"Simulation time too slow, %f > %f\" % (y[-1], (y[:-1].mean() + 2 * error[:-1].mean())))\n\n if getattr(sys, 'gettrace')() is None:\n with open(fn, 'a') as fid:\n fid.write(\"%s;%.10f;%s\\n\" % (datetime.now(), aep, t_lst))\n\n\ndef test_check_speed():\n path = tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+0/'\n\n def Fuga(site, wt):\n return fuga.Fuga(path, site, wt)\n\n for WFModel in [NOJ, IEA37SimpleBastankhahGaussian, Fuga]:\n try:\n check_speed_Hornsrev(WFModel)\n except Exception as e:\n print(e)\n raise e\n if 1:\n plt.show()\n\n\nif __name__ == '__main__':\n path = tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+0/'\n\n def Fuga(site, wt):\n return fuga.Fuga(path, site, wt)\n\n for WFModel in [NOJ, IEA37SimpleBastankhahGaussian, Fuga]:\n try:\n check_speed_Hornsrev(WFModel)\n except Exception as e:\n print(e)\n raise e\n\n plt.show()\n",
"from py_wake.site._site import UniformSite\nfrom py_wake.examples.data.hornsrev1 import V80\nfrom py_wake.ground_models import Mirror\nfrom py_wake.deficit_models.noj import NOJ, NOJDeficit\nimport matplotlib.pyplot as plt\nfrom py_wake.flow_map import YZGrid\nimport numpy as np\nfrom py_wake.tests import npt\nfrom py_wake.wind_turbines import WindTurbines\nfrom py_wake.superposition_models import LinearSum, SquaredSum\nfrom py_wake.wind_farm_models.engineering_models import PropagateDownwind, All2AllIterative\nimport pytest\nfrom py_wake.deficit_models.gaussian import ZongGaussianDeficit\nfrom py_wake.turbulence_models.stf import STF2017TurbulenceModel\nfrom py_wake.ground_models.ground_models import MirrorSquaredSum\n\n\ndef test_Mirror_NOJ():\n # Compare points in flow map with ws of WT at same position\n site = UniformSite([1], ti=0.1)\n V80_D0 = V80()\n V80_D0._diameters = [0]\n wt = WindTurbines.from_WindTurbines([V80(), V80_D0])\n wfm = NOJ(site, wt, k=.5, groundModel=Mirror())\n sim_res = wfm([0], [0], h=[50], wd=0)\n fm_ref = sim_res.flow_map(YZGrid(x=0, y=np.arange(-70, 0, 20), z=10))\n ref = fm_ref.WS_eff_xylk[:, 0, 0, 0].values\n\n res = np.array([wfm([0, 0], [0, y], [50, 10], type=[0, 1], wd=0).WS_eff.sel(wt=1).item()\n for y in fm_ref.X[0]])\n\n if 0:\n fm_res = sim_res.flow_map(YZGrid(x=0, y=np.arange(-100, 10, 1)))\n fm_res.plot_wake_map()\n plt.plot(fm_ref.X[0], fm_ref.Y[0], '.')\n plt.plot(fm_ref.X[0], ref * 10, label='ref, WS*10')\n plt.plot(fm_ref.X[0], res * 10, label='Res, WS*10')\n\n plt.legend()\n plt.show()\n plt.close('all')\n npt.assert_array_equal(res, ref)\n\n\[email protected]('wfm_cls', [PropagateDownwind, All2AllIterative])\ndef test_Mirror(wfm_cls):\n # Compare points in flow map with ws of WT at same position. All2Alliterative failing with NOJ and WT.diameter=0\n # and therefore this cannot be tested above\n site = UniformSite([1], ti=0.1)\n wt = V80()\n wfm = wfm_cls(site, wt, ZongGaussianDeficit(a=[0, 1]),\n turbulenceModel=STF2017TurbulenceModel(), groundModel=Mirror())\n sim_res = wfm([0], [0], h=[50], wd=0,)\n fm_ref = sim_res.flow_map(YZGrid(x=0, y=np.arange(-70, 0, 20), z=10))\n ref = fm_ref.WS_eff_xylk[:, 0, 0, 0].values\n\n res = np.array([wfm([0, 0], [0, y], [50, 10], wd=0).WS_eff.sel(wt=1).item()\n for y in fm_ref.X[0]])\n\n if 0:\n fm_res = sim_res.flow_map(YZGrid(x=0, y=np.arange(-100, 10, 1)))\n fm_res.plot_wake_map()\n plt.plot(fm_ref.X[0], fm_ref.Y[0], '.')\n plt.plot(fm_ref.X[0], ref * 10, label='ref, WS*10')\n plt.plot(fm_ref.X[0], res * 10, label='Res, WS*10')\n\n plt.legend()\n plt.show()\n plt.close('all')\n npt.assert_array_equal(res, ref)\n\n\[email protected]('wfm_cls', [PropagateDownwind, All2AllIterative])\[email protected]('groundModel,superpositionModel', [(Mirror(), LinearSum()),\n (MirrorSquaredSum(), SquaredSum())])\ndef test_Mirror_flow_map(wfm_cls, groundModel, superpositionModel):\n site = UniformSite([1], ti=0.1)\n wt = V80()\n wfm = NOJ(site, wt, k=.5, superpositionModel=superpositionModel)\n\n fm_ref = wfm([0, 0 + 1e-20], [0, 0 + 1e-20], wd=0, h=[50, -50]\n ).flow_map(YZGrid(x=0, y=np.arange(-100, 100, 1) + .1, z=np.arange(1, 100)))\n fm_ref.plot_wake_map()\n plt.title(\"Underground WT added manually\")\n\n plt.figure()\n wfm = wfm_cls(site, wt, NOJDeficit(k=.5), groundModel=groundModel, superpositionModel=superpositionModel)\n fm_res = wfm([0], [0], wd=0, h=[50]).flow_map(YZGrid(x=0, y=np.arange(-100, 100, 1) + .1, z=np.arange(1, 100)))\n fm_res.plot_wake_map()\n plt.title(\"With Mirror GroundModel\")\n\n if 0:\n plt.show()\n plt.close('all')\n npt.assert_array_equal(fm_ref.WS_eff, fm_res.WS_eff)\n",
"import numpy as np\nfrom numpy import newaxis as na\nfrom py_wake.tests import npt\nfrom py_wake.site.distance import StraightDistance, TerrainFollowingDistance, TerrainFollowingDistance2\nfrom py_wake.site._site import UniformSite\nimport pytest\nfrom py_wake.examples.data.iea37._iea37 import IEA37_WindTurbines\nfrom py_wake import NOJ\nfrom py_wake.examples.data.ParqueFicticio import ParqueFicticioSite\nfrom py_wake.flow_map import HorizontalGrid\nimport matplotlib.pyplot as plt\nfrom py_wake.tests.check_speed import timeit\nfrom py_wake.examples.data.hornsrev1 import Hornsrev1Site\n\n\nclass FlatSite(UniformSite):\n def __init__(self, distance):\n UniformSite.__init__(self, p_wd=[1], ti=.075)\n self.distance = distance\n\n\nclass HalfCylinder(UniformSite):\n def __init__(self, height, distance_resolution):\n self.height = height\n super().__init__(p_wd=[1], ti=0)\n self.distance = TerrainFollowingDistance(distance_resolution=distance_resolution)\n\n def elevation(self, x_i, y_i):\n return np.sqrt(np.maximum(self.height**2 - x_i**2, 0))\n\n\nclass Rectangle(TerrainFollowingDistance, UniformSite):\n def __init__(self, height, width, distance_resolution):\n self.height = height\n self.width = width\n super().__init__(p_wd=[1], ti=0)\n self.distance = TerrainFollowingDistance(distance_resolution=distance_resolution)\n\n def elevation(self, x_i, y_i):\n return np.where(np.abs(x_i) < self.width / 2, self.height, 0)\n\n\[email protected]('distance', [StraightDistance(),\n TerrainFollowingDistance()\n ])\ndef test_flat_distances(distance):\n x = [0, 50, 100, 100, 0]\n y = [100, 100, 100, 0, 0]\n h = [0, 10, 20, 30, 0]\n wdirs = [0, 30, 90]\n\n site = FlatSite(distance=distance)\n site.distance.setup(src_x_i=x, src_y_i=y, src_h_i=h)\n dw_ijl, hcw_ijl, dh_ijl = site.distance(wd_il=np.array(wdirs)[na], src_idx=[0, 1, 2, 3], dst_idx=[4])\n dw_indices_l = site.distance.dw_order_indices(np.array(wdirs))\n\n if 0:\n distance.plot(wd_il=np.array(wdirs)[na], src_i=[0, 1, 2, 3], dst_i=[4])\n plt.show()\n\n npt.assert_array_almost_equal(dw_ijl, [[[100, 86.6025404, 0]],\n [[100, 111.602540, 50]],\n [[100, 136.602540, 100]],\n [[0, 50, 100]]])\n npt.assert_array_almost_equal(hcw_ijl, [[[0, 50, 100]],\n [[-50, 6.69872981, 100]],\n [[-100, -36.6025404, 100]],\n [[-100, -86.6025404, 0]]])\n npt.assert_array_almost_equal(dh_ijl, [[[0, 0, 0]],\n [[-10, -10, -10]],\n [[-20, -20, -20]],\n [[-30, -30, -30]]])\n npt.assert_array_equal(dw_indices_l[:, :4], [[2, 1, 0, 3],\n [2, 1, 0, 3],\n [2, 3, 1, 0]])\n\n\[email protected]('distance', [StraightDistance(),\n TerrainFollowingDistance()])\ndef test_flat_distances_src_neq_dst(distance):\n x = [0, 50, 100]\n y = [100, 100, 0]\n h = [0, 10, 20]\n wdirs = [0, 30]\n\n site = FlatSite(distance=distance)\n site.distance.setup(src_x_i=x, src_y_i=y, src_h_i=h, dst_xyh_j=(x, y, [1, 2, 3]))\n dw_ijl, hcw_ijl, dh_ijl = site.distance(wd_il=np.array(wdirs)[na])\n dw_indices_l = distance.dw_order_indices(wdirs)\n if 0:\n distance.plot(wd_il=np.array(wdirs)[na])\n plt.show()\n\n # check down wind distance wind from North and 30 deg\n npt.assert_array_almost_equal(dw_ijl[:, :, 0], [[0, 0, 100],\n [0, 0, 100],\n [-100, -100, 0]])\n npt.assert_array_almost_equal(dw_ijl[:, :, 1], [[0, -25, 36.60254038],\n [25, 0, 61.60254038],\n [-36.60254038, -61.60254038, 0]])\n\n # check cross wind distance wind from North and 30 deg\n npt.assert_array_almost_equal(hcw_ijl[:, :, 0], [[0, 50, 100],\n [-50, 0, 50],\n [-100, -50, 0]])\n npt.assert_array_almost_equal(hcw_ijl[:, :, 1], [[0, 43.30127019, 136.60254038],\n [-43.30127019, 0., 93.30127019],\n [-136.60254038, -93.30127019, 0.]])\n # check cross wind distance wind from North\n npt.assert_array_almost_equal(dh_ijl[:, :, 0], [[1, 2, 3],\n [-9, -8, -7],\n [-19, -18, -17]])\n # check dw indices\n npt.assert_array_equal(dw_indices_l, [[1, 0, 2],\n [1, 0, 2]])\n\n\ndef test_iea37_distances():\n from py_wake.examples.data.iea37 import IEA37Site\n n_wt = 16 # must be 9, 16, 36, 64\n site = IEA37Site(n_wt)\n x, y = site.initial_position.T\n lw = site.local_wind(x_i=x, y_i=y,\n wd=site.default_wd,\n ws=site.default_ws)\n site.distance.setup(x, y, np.zeros_like(x))\n dw_iil, hcw_iil, _ = site.wt2wt_distances(wd_il=lw.WD_ilk.mean(2))\n # Wind direction.\n wdir = np.rad2deg(np.arctan2(hcw_iil, dw_iil))\n npt.assert_allclose(\n wdir[:, 0, 0],\n [180, -90, -18, 54, 126, -162, -90, -54, -18, 18, 54, 90, 126, 162, -162, -126],\n atol=1e-4)\n\n if 0:\n _, ax = plt.subplots()\n ax.scatter(x, y)\n for i, txt in enumerate(np.arange(len(x))):\n ax.annotate(txt, (x[i], y[i]), fontsize='large')\n plt.show()\n\n\ndef test_terrain_following_half_cylinder():\n\n hc = HalfCylinder(height=100, distance_resolution=100000)\n\n src_x, src_y = np.array([-100, -50, 0]), [0, 0, 0]\n dst_x, dst_y = np.array([100, 200, 300, 400]), [0, 0, 0, 0]\n x = np.arange(-150, 150)\n\n hc.distance.setup(src_x_i=src_x, src_y_i=src_y, src_h_i=src_x * 0,\n dst_xyh_j=(dst_x, dst_y, dst_x * 0))\n dw_ijl, hcw_ijl, _ = hc.distance(wd_il=np.array([0, 90])[na])\n\n if 0:\n plt.plot(x, hc.elevation(x_i=x, y_i=x * 0))\n plt.plot(src_x, hc.elevation(x_i=src_x, y_i=src_y), '.')\n plt.plot(dst_x, dst_y, 'o')\n plt.axis('equal')\n plt.show()\n\n dist2flat = np.pi * np.array([1, 2 / 3, .5]) * 100\n dist2flat = dist2flat[:, na] + (np.arange(4) * 100)\n npt.assert_array_almost_equal(dw_ijl[:, :, 1], -dist2flat, 2)\n npt.assert_array_almost_equal(hcw_ijl[:, :, 0], [[200., 300., 400., 500.],\n [150., 250., 350., 450.],\n [100., 200., 300., 400.]], 2)\n\n # down wind distance for 0 deg and cross wind distance for 30 deg ~ 0\n npt.assert_array_almost_equal(dw_ijl[:, :, 0], 0)\n npt.assert_array_almost_equal(hcw_ijl[:, :, 1], 0)\n\n\ndef test_distance_over_rectangle():\n x, y = [-100, 50], [200, -100]\n windTurbines = IEA37_WindTurbines()\n site = Rectangle(height=200, width=100, distance_resolution=100)\n wf_model = NOJ(site, windTurbines)\n sim_res = wf_model(x, y, wd=[270], ws=[9])\n x_j = np.linspace(-100, 500, 50)\n y_j = np.linspace(-200, 300, 50)\n flow_map = sim_res.flow_map(HorizontalGrid(x_j, y_j))\n Z = flow_map.WS_eff_xylk[:, :, 0, 0]\n X, Y = flow_map.X, flow_map.Y\n\n my = np.argmin(np.abs(Y[:, 0] - 200))\n my2 = np.argmin(np.abs(Y[:, 0] + 100))\n\n if 0:\n flow_map.plot_wake_map()\n H = site.elevation(X, Y)\n plt.plot(X[my], Z[my] * 10, label='wsp*10')\n plt.plot(X[my2], Z[my2] * 10, label='wsp*10')\n plt.contour(X, Y, H)\n plt.plot(X[my, :50:4], Z[my, :50:4] * 10, '.')\n plt.plot(x_j, site.elevation(x_j, x_j * 0), label='terrain level')\n plt.legend()\n plt.show()\n\n ref = [9., 3.42, 3.8, 6.02, 6.17, 6.31, 6.43, 7.29, 7.35, 7.41, 7.47, 7.53, 7.58]\n npt.assert_array_almost_equal(Z[my, :25:2], ref, 2)\n\n\ndef test_distance_plot():\n\n x = [0, 50, 100, 100, 0]\n y = [100, 100, 100, 0, 0]\n h = [0, 10, 20, 30, 0]\n wdirs = [0, 30, 90]\n distance = StraightDistance()\n distance.setup(src_x_i=x, src_y_i=y, src_h_i=h)\n distance.plot(wd_il=np.array(wdirs)[na], src_idx=[0], dst_idx=[3])\n if 0:\n plt.show()\n plt.close('all')\n\n\n# ======================================================================================================================\n# TerrainFollowingDistance2\n# ======================================================================================================================\n\nclass ParqueFicticioSiteTerrainFollowingDistance2(ParqueFicticioSite):\n def __init__(self):\n ParqueFicticioSite.__init__(self, distance=TerrainFollowingDistance2())\n# def __init__():\n# site = ParqueFicticioSite(distance=TerrainFollowingDistance2())\n# x, y = site.initial_position.T\n# return site, x, y\n\n\ndef test_distances_ri():\n site = ParqueFicticioSiteTerrainFollowingDistance2()\n x, y = site.initial_position.T\n site.calc_all = False\n site.r_i = np.ones(len(x)) * 90\n site.distance.setup(src_x_i=x, src_y_i=y, src_h_i=np.array([70]),\n dst_xyh_j=(x, y, np.array([70])))\n dw_ijl, cw_ijl, dh_ijl = site.distance(wd_il=np.array([[180]]))\n npt.assert_almost_equal(dw_ijl[0, :, 0], np.array([0., -207., -477., -710., -1016., -1236., -1456., -1799.]))\n npt.assert_almost_equal(cw_ijl[:, 1, 0], np.array([-236.1, 0., 131.1, 167.8, 204.5, 131.1, 131.1, 45.4]))\n npt.assert_almost_equal(dh_ijl, np.zeros_like(dh_ijl))\n\n\ndef test_distance2_outside_map_WestEast():\n site = ParqueFicticioSiteTerrainFollowingDistance2()\n\n x = np.arange(-1500, 1000, 500) + 264777\n h = x * 0\n y = h + 6505450\n site.distance.setup(src_x_i=x, src_y_i=y, src_h_i=h,\n dst_xyh_j=(x, y, h * 0))\n dw = site.distance(wd_il=[270])[0]\n\n if 0:\n site.ds.Elevation.plot()\n plt.plot(x, y, '.-', label='Terrain line')\n plt.plot(x, y + site.elevation(x, y))\n plt.legend()\n plt.show()\n # distance between points should be >500 m due to terrain, except last point which is outside map\n npt.assert_array_equal(np.round(np.diff(dw[0, :, 0])), [527, 520, 505, 500.])\n\n\ndef test_distance2_outside_map_NorthSouth():\n site = ParqueFicticioSiteTerrainFollowingDistance2()\n y = np.arange(-1500, 1000, 500) + 6506613.0\n h = y * 0\n x = h + 264200\n site.distance.setup(src_x_i=x, src_y_i=y, src_h_i=h,\n dst_xyh_j=(x, y, h * 0))\n dw = site.distance(wd_il=[180])[0]\n\n if 0:\n site.ds.Elevation.plot()\n plt.plot(x, y, '.-', label='Terrain line')\n plt.plot(x + site.elevation(x, y), y)\n plt.legend()\n plt.show()\n # distance between points should be >500 m due to terrain, except last point which is outside map\n npt.assert_array_equal(np.round(np.diff(dw[0, :, 0])), [510, 505, 507, 500])\n"
] |
[
[
"matplotlib.pyplot.subplots",
"numpy.std",
"numpy.mean",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"numpy.maximum",
"numpy.abs",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.arctan2",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.diff",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abdalazizrashid/Theano-PyMC
|
[
"90fa750461e91fb6281d494ae86404e2153fd7eb",
"90fa750461e91fb6281d494ae86404e2153fd7eb",
"90fa750461e91fb6281d494ae86404e2153fd7eb",
"90fa750461e91fb6281d494ae86404e2153fd7eb",
"90fa750461e91fb6281d494ae86404e2153fd7eb",
"90fa750461e91fb6281d494ae86404e2153fd7eb",
"90fa750461e91fb6281d494ae86404e2153fd7eb"
] |
[
"aesara/tensor/opt.py",
"tests/tensor/test_inplace.py",
"tests/gof/test_compute_test_value.py",
"aesara/misc/burn_gpu.py",
"tests/compile/test_pfunc.py",
"tests/gpuarray/test_blas.py",
"tests/gpuarray/test_opt.py"
] |
[
"\"\"\" Tensor optimizations addressing the ops in basic.py.\"\"\"\n# TODO: intelligent merge for mul/add\n# TODO: 0*x -> 0\n\nimport itertools\nimport logging\nimport operator\nimport sys\nimport time\nimport traceback\nimport warnings\nfrom collections import defaultdict\nfrom functools import reduce\nfrom io import StringIO\n\nimport numpy as np\n\nimport aesara\nimport aesara.scalar.basic as ts\nfrom aesara import compile, config, gof # to register the optimizer built by this file\nfrom aesara.compile.ops import Shape, Shape_i\nfrom aesara.gof import (\n Constant,\n InconsistencyError,\n LocalOptimizer,\n OpRemove,\n PatternSub,\n TopoOptimizer,\n Variable,\n graph,\n opt,\n toolbox,\n)\nfrom aesara.gof.op import Op\nfrom aesara.gof.opt import (\n Optimizer,\n copy_stack_trace,\n in2out,\n local_optimizer,\n pre_constant_merge,\n pre_greedy_local_optimizer,\n)\nfrom aesara.gof.utils import MethodNotDefined, TestValueError\nfrom aesara.gradient import DisconnectedType\n\n# Work-around for Python 3.6 issue that prevents `import aesara.tensor as tt`\nfrom aesara.tensor import basic as tt\nfrom aesara.tensor.basic import (\n Alloc,\n AllocEmpty,\n Dot,\n Flatten,\n Join,\n NotScalarConstantError,\n Rebroadcast,\n Reshape,\n ScalarFromTensor,\n ShapeError,\n Split,\n TensorFromScalar,\n Tile,\n abs_,\n add,\n alloc,\n erf,\n erfc,\n extract_constant,\n fill,\n get_scalar_constant_value,\n int_div,\n inv,\n log,\n log1p,\n mul,\n neg,\n pow,\n sub,\n tensor_copy,\n true_div,\n)\nfrom aesara.tensor.elemwise import (\n All,\n Any,\n CAReduce,\n DimShuffle,\n Elemwise,\n Prod,\n ProdWithoutZeros,\n Sum,\n)\nfrom aesara.tensor.sort import TopKOp\nfrom aesara.tensor.subtensor import (\n AdvancedIncSubtensor,\n AdvancedIncSubtensor1,\n AdvancedSubtensor1,\n IncSubtensor,\n Subtensor,\n advanced_inc_subtensor1,\n advanced_subtensor,\n advanced_subtensor1,\n as_index_constant,\n get_canonical_form_slice,\n get_idx_list,\n)\nfrom aesara.tensor.type import (\n values_eq_approx_remove_inf,\n values_eq_approx_remove_inf_nan,\n values_eq_approx_remove_nan,\n)\n\n\n# import aesara.tensor.basic as tt\n\n\n_logger = logging.getLogger(\"aesara.tensor.opt\")\n\n\ndef _fill_chain(new_out, orig_inputs):\n for i in orig_inputs:\n new_out = fill(i, new_out)\n return [new_out]\n\n\ndef encompasses_broadcastable(b1, b2):\n \"\"\"\n\n Parameters\n ----------\n b1\n The broadcastable attribute of a tensor type.\n b2\n The broadcastable attribute of a tensor type.\n\n Returns\n -------\n bool\n True if the broadcastable patterns b1 and b2 are such that b2 is\n broadcasted to b1's shape and not the opposite.\n\n \"\"\"\n if len(b1) < len(b2):\n return False\n b1 = b1[-len(b2) :]\n return not any(v1 and not v2 for v1, v2 in zip(b1, b2))\n\n\ndef merge_broadcastables(broadcastables):\n return [all(bcast) for bcast in zip(*broadcastables)]\n\n\ndef scalarconsts_rest(inputs, elemwise=True, only_process_constants=False):\n \"\"\"Partition a list of variables into two kinds:\n scalar constants, and the rest.\"\"\"\n consts = []\n origconsts = []\n nonconsts = []\n for i in inputs:\n try:\n v = get_scalar_constant_value(\n i, elemwise=elemwise, only_process_constants=only_process_constants\n )\n consts.append(v)\n origconsts.append(i)\n except NotScalarConstantError:\n nonconsts.append(i)\n return consts, origconsts, nonconsts\n\n\ndef broadcast_like(value, template, fgraph, dtype=None):\n \"\"\"\n Return a Variable with the same shape and dtype as the template,\n filled by broadcasting value through it. `value` will be cast as\n necessary.\n\n \"\"\"\n value = tt.as_tensor_variable(value)\n if value.type == template.type:\n return value\n if template not in fgraph.variables:\n raise NotImplementedError(\n \"broadcast_like currently requires the \"\n \"template Variable to be in the fgraph already\"\n )\n if dtype is None:\n dtype = template.dtype\n value = tt.cast(value, dtype)\n if value.type == template.type:\n return value\n if hasattr(fgraph, \"shape_feature\"):\n new_shape = fgraph.shape_feature.shape_of[template]\n else:\n new_shape = template.shape\n rval = alloc(value, *new_shape)\n # the template may have 1s in its shape without being broadcastable\n if rval.broadcastable != template.broadcastable:\n rval = tt.unbroadcast(\n rval,\n *[\n i\n for i in range(rval.ndim)\n if rval.broadcastable[i] and not template.broadcastable[i]\n ],\n )\n assert rval.type.dtype == dtype\n\n if rval.type.broadcastable != template.broadcastable:\n raise AssertionError(\n \"rval.type.broadcastable is \"\n + str(rval.type.broadcastable)\n + \" but template.broadcastable is\"\n + str(template.broadcastable)\n )\n\n return rval\n\n\nclass InplaceElemwiseOptimizer(Optimizer):\n \"\"\"\n We parametrise it to make it work for Elemwise and GpuElemwise op.\n \"\"\"\n\n def __init__(self, OP):\n self.op = OP\n\n def add_requirements(self, fgraph):\n fgraph.attach_feature(gof.destroyhandler.DestroyHandler())\n\n @staticmethod\n def print_profile(stream, prof, level=0):\n blanc = \" \" * level\n print(blanc, \"InplaceElemwiseOptimizer \", prof[\"opt\"].op, file=stream)\n for k in [\n \"node_before\",\n \"nb_call_replace\",\n \"nb_call_validate\",\n \"nb_inconsistent\",\n ]:\n print(blanc, k, prof[k], file=stream)\n ndim = prof[\"ndim\"]\n if ndim:\n print(blanc, \"ndim\", \"nb\", file=stream)\n for n in sorted(ndim.keys()):\n print(blanc, n, ndim[n], file=stream)\n\n def apply(self, fgraph):\n \"\"\"\n Usage: InplaceElemwiseOptimizer(op).optimize(fgraph)\n\n Attempts to replace all Broadcast ops by versions of them\n that operate inplace. It operates greedily: for each Broadcast\n Op that is encountered, for each output, tries each input to\n see if it can operate inplace on that input. If so, makes the\n change and go to the next output or Broadcast Op.\n\n Examples\n --------\n\n `x + y + z -> x += y += z`\n\n `(x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)`\n\n \"\"\"\n # We should not validate too often as this takes too much time to\n # execute!\n # It is the _dfs_toposort() fct in aesara/gof/destroyhandler.py\n # that takes so much time.\n # Should we try to use another lib that does toposort?\n # igraph: http://igraph.sourceforge.net/\n # networkx: https://networkx.lanl.gov/\n # Should we try to use cython?\n # Compiling only that fct is not enough, should we try to add the\n # deque class too?\n # And init the deque and other list to an upper bound number of\n # elements?\n # Maybe Aesara should do online toposort as in\n # http://code.google.com/p/acyclic\n #\n # The next longest optimizer is the canonizer phase.\n # Then I think it is the [io_?]toposort (need to validate) so check if\n # the solution is also applicable there.\n\n # We execute `validate` after this number of change.\n prof = {\n \"opt\": self,\n \"node_before\": len(fgraph.apply_nodes),\n \"nb_call_replace\": 0,\n \"nb_call_validate\": 0,\n \"nb_inconsistent\": 0,\n \"ndim\": defaultdict(lambda: 0),\n }\n\n check_each_change = config.tensor.insert_inplace_optimizer_validate_nb\n if check_each_change == -1:\n if len(fgraph.apply_nodes) > 500:\n check_each_change = 10\n else:\n check_each_change = 1\n\n nb_change_no_validate = 0\n chk = fgraph.checkpoint()\n\n if fgraph.update_mapping:\n update_outs = [fgraph.outputs[i] for i in fgraph.update_mapping]\n else:\n update_outs = []\n\n protected_inputs = [\n f.protected\n for f in fgraph._features\n if isinstance(f, aesara.compile.function_module.Supervisor)\n ]\n protected_inputs = sum(protected_inputs, []) # flatten the list\n protected_inputs.extend(fgraph.outputs)\n for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):\n op = node.op\n # gpuarray GpuElemwise inherit from Elemwise\n if not type(op) == self.op:\n continue\n # If big graph and the outputs are scalar, do not make it\n # inplace.\n if (\n check_each_change != 1\n and\n # If multiple outputs, they must all have the same size,\n # so only check the first.\n getattr(node.outputs[0].type, \"ndim\", -1) == 0\n ):\n continue\n\n if op.inplace_pattern:\n # Maybe this isn't needed anymore, but I don't want to\n # rish regression now. This case only happen if the\n # original node add already some inplace patter and we\n # still try to add more pattern.\n\n baseline = op.inplace_pattern\n candidate_outputs = [\n i for i in range(len(node.outputs)) if i not in baseline\n ]\n # node inputs that are Constant, already destroyed,\n # or fgraph protected inputs and fgraph outputs can't be used as\n # inplace target.\n # Remove here as faster.\n candidate_inputs = [\n i\n for i in range(len(node.inputs))\n if i not in baseline.values()\n and not isinstance(node.inputs[i], Constant)\n and\n # the next line should not be costly most of the time.\n not fgraph.has_destroyers([node.inputs[i]])\n and node.inputs[i] not in protected_inputs\n ]\n else:\n baseline = []\n candidate_outputs = list(range(len(node.outputs)))\n # node inputs that are Constant, already destroyed,\n # fgraph protected inputs and fgraph outputs can't be used as inplace\n # target.\n # Remove here as faster.\n candidate_inputs = [\n i\n for i in range(len(node.inputs))\n if not isinstance(node.inputs[i], Constant)\n and not fgraph.has_destroyers([node.inputs[i]])\n and node.inputs[i] not in protected_inputs\n ]\n\n verbose = False\n\n raised_warning = not verbose\n\n for candidate_output in candidate_outputs:\n\n # If the output of the node can be established as an update\n # output of the fgraph, visit the candidate_inputs in an order\n # that will improve the chances of making the node operate\n # inplace on the input it's meant to update\n candidate_out_var = node.outputs[candidate_output]\n sorted_candidate_inputs = candidate_inputs\n\n if candidate_out_var in update_outs:\n\n # The candidate output is an update. Sort the\n # variables in candidate_inputs in the following order:\n # - Vars corresponding to the actual updated input\n # (best case scenario is for the node that procudes\n # an update to operate inplace on the variable to\n # update)\n # - Vars computed inplace on the updates input (second\n # best scenario if for the node to work inplace on\n # a variable obtained by a chain of inplace on the\n # variable to update. In some cases, this will be\n # equivalent to operating inplace on the variable to\n # update)\n # - Remaining variables\n updated_inputs = []\n for i, f_out in enumerate(fgraph.outputs):\n if f_out is candidate_out_var and i in fgraph.update_mapping:\n updated_inp_idx = fgraph.update_mapping[i]\n updated_inputs.append(fgraph.inputs[updated_inp_idx])\n\n updated_vars = []\n vars_from_inplace = []\n other_vars = []\n for inp_idx in candidate_inputs:\n inp = node.inputs[inp_idx]\n if inp in updated_inputs:\n # the candidate input is the actual updated input\n updated_vars.append(inp_idx)\n elif (\n hasattr(fgraph, \"destroy_handler\")\n and inp.owner\n and any(\n [\n fgraph.destroy_handler.root_destroyer.get(\n up_inp, None\n )\n is inp.owner\n for up_inp in updated_inputs\n ]\n )\n ):\n\n # the candidate input is a variable computed\n # inplace on the updated input via a sequence of\n # one or more inplace operations\n vars_from_inplace.append(inp_idx)\n else:\n other_vars.append(inp_idx)\n\n sorted_candidate_inputs = (\n updated_vars + vars_from_inplace + other_vars\n )\n\n for candidate_input in sorted_candidate_inputs:\n # remove inputs that don't have the same dtype as the output\n if (\n node.inputs[candidate_input].type\n != node.outputs[candidate_output].type\n ):\n continue\n\n inplace_pattern = dict(baseline)\n inplace_pattern[candidate_output] = candidate_input\n try:\n if hasattr(op.scalar_op, \"make_new_inplace\"):\n new_scal = op.scalar_op.make_new_inplace(\n ts.transfer_type(\n *[\n inplace_pattern.get(i, o.dtype)\n for i, o in enumerate(node.outputs)\n ]\n )\n )\n else:\n new_scal = op.scalar_op.__class__(\n ts.transfer_type(\n *[\n inplace_pattern.get(i, None)\n for i in range(len(node.outputs))\n ]\n )\n )\n new_outputs = self.op(new_scal, inplace_pattern)(\n *node.inputs, **dict(return_list=True)\n )\n new_node = new_outputs[0].owner\n\n for r, new_r in zip(node.outputs, new_outputs):\n prof[\"nb_call_replace\"] += 1\n fgraph.replace(\n r, new_r, reason=\"inplace_elemwise_optimizer\"\n )\n nb_change_no_validate += 1\n prof[\"ndim\"][candidate_out_var.ndim] += 1\n if nb_change_no_validate >= check_each_change:\n prof[\"nb_call_validate\"] += 1\n fgraph.validate()\n chk = fgraph.checkpoint()\n nb_change_no_validate = 0\n except (ValueError, InconsistencyError) as e:\n prof[\"nb_inconsistent\"] += 1\n if check_each_change != 1 and not raised_warning:\n print(\n (\n \"Some inplace optimization was not \"\n \"performed due to unexpected error:\"\n ),\n file=sys.stderr,\n )\n print(e, file=sys.stderr)\n raised_warning = True\n fgraph.revert(chk)\n continue\n candidate_inputs.remove(candidate_input)\n node = new_node\n baseline = inplace_pattern\n break\n\n if nb_change_no_validate > 0:\n try:\n fgraph.validate()\n except Exception:\n if not raised_warning:\n print(\n (\n \"Some inplace optimization was not \"\n \"performed due to unexpected error\"\n ),\n file=sys.stderr,\n )\n fgraph.revert(chk)\n return prof\n\n def print_summary(self, stream=sys.stdout, level=0, depth=-1):\n print(\n \"{}{} ({})\".format((\" \" * level), self.__class__.__name__, self.op),\n file=stream,\n )\n return inplace_elemwise_optimizer\n\n\ninplace_elemwise_optimizer = InplaceElemwiseOptimizer(Elemwise)\ncompile.optdb.register(\n \"inplace_elemwise_opt\",\n inplace_elemwise_optimizer,\n 75,\n \"inplace_opt\", # for historic reason\n \"inplace_elemwise_optimizer\",\n \"fast_run\",\n \"inplace\",\n)\n\n\ndef register_useless(lopt, *tags, **kwargs):\n if type(lopt) == str:\n\n def register(inner_lopt):\n return register_useless(inner_lopt, lopt, *tags, **kwargs)\n\n return register\n else:\n name = kwargs.pop(\"name\", None) or lopt.__name__\n\n compile.mode.local_useless.register(\n name, lopt, \"last\", \"fast_run\", *tags, **kwargs\n )\n return lopt\n\n\ndef register_canonicalize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n\n def register(inner_lopt):\n return register_canonicalize(inner_lopt, lopt, *tags, **kwargs)\n\n return register\n else:\n name = kwargs.pop(\"name\", None) or lopt.__name__\n compile.optdb[\"canonicalize\"].register(name, lopt, \"fast_run\", *tags, **kwargs)\n return lopt\n\n\ndef register_stabilize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n\n def register(inner_lopt):\n return register_stabilize(inner_lopt, lopt, *tags, **kwargs)\n\n return register\n else:\n name = kwargs.pop(\"name\", None) or lopt.__name__\n compile.optdb[\"stabilize\"].register(name, lopt, \"fast_run\", *tags, **kwargs)\n return lopt\n\n\ndef register_specialize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n\n def register(inner_lopt):\n return register_specialize(inner_lopt, lopt, *tags, **kwargs)\n\n return register\n else:\n name = kwargs.pop(\"name\", None) or lopt.__name__\n compile.optdb[\"specialize\"].register(name, lopt, \"fast_run\", *tags, **kwargs)\n return lopt\n\n\ndef register_uncanonicalize(lopt, *tags, **kwargs):\n if type(lopt) == str:\n\n def register(inner_lopt):\n return register_uncanonicalize(inner_lopt, lopt, *tags, **kwargs)\n\n return register\n else:\n name = (kwargs and kwargs.pop(\"name\", None)) or lopt.__name__\n compile.optdb[\"uncanonicalize\"].register(\n name, lopt, \"fast_run\", *tags, **kwargs\n )\n return lopt\n\n\ndef register_specialize_device(lopt, *tags, **kwargs):\n if type(lopt) == str:\n\n def register(inner_lopt):\n return register_specialize_device(inner_lopt, lopt, *tags, **kwargs)\n\n return register\n else:\n name = (kwargs and kwargs.pop(\"name\", None)) or lopt.__name__\n compile.optdb[\"specialize_device\"].register(\n name, lopt, \"fast_run\", *tags, **kwargs\n )\n return lopt\n\n\n#####################\n# Dot optimizations #\n#####################\n\n\n@register_canonicalize\n@register_stabilize\n@local_optimizer([Dot])\ndef local_0_dot_x(node):\n if not isinstance(node.op, Dot):\n return False\n\n x = node.inputs[0]\n y = node.inputs[1]\n replace = False\n try:\n if get_scalar_constant_value(x, only_process_constants=True) == 0:\n replace = True\n except NotScalarConstantError:\n pass\n\n try:\n if get_scalar_constant_value(y, only_process_constants=True) == 0:\n replace = True\n except NotScalarConstantError:\n pass\n\n if replace:\n constant_zero = tt.constant(0, dtype=node.outputs[0].type.dtype)\n if x.ndim == 2 and y.ndim == 2:\n constant_zero = assert_(constant_zero, tt.eq(x.shape[1], y.shape[0]))\n return [alloc(constant_zero, x.shape[0], y.shape[1])]\n elif x.ndim == 1 and y.ndim == 2:\n constant_zero = assert_(constant_zero, tt.eq(x.shape[0], y.shape[0]))\n return [alloc(constant_zero, y.shape[1])]\n elif x.ndim == 2 and y.ndim == 1:\n constant_zero = assert_(constant_zero, tt.eq(x.shape[1], y.shape[0]))\n return [alloc(constant_zero, x.shape[0])]\n elif x.ndim == 1 and y.ndim == 1:\n constant_zero = assert_(constant_zero, tt.eq(x.shape[0], y.shape[0]))\n return [constant_zero]\n else:\n _logger.warning(\n \"Optimization Warning: \"\n \"Optimization aesara/opt.py:local_0_dot_x Found \"\n \"that it could apply, but was not implemented \"\n \"for dot product with these input types:\\n\"\n \"(%s, %s)\",\n x.type,\n y.type,\n )\n\n\n######################\n# DimShuffle lifters #\n######################\n\n\ndef apply_local_dimshuffle_lift(var):\n # return var\n # lift recursively\n if not var.owner:\n return var\n new = local_dimshuffle_lift.transform(var.owner)\n if new:\n return new[0]\n return var\n\n\n# Checks for two types of useless dimshuffles:\n# 1 - dimshuffle all dimensions in order.\n# 2 - dimshuffle a broadcastable dimension.\ndef is_dimshuffle_useless(new_order, input):\n is_useless = True\n if len(new_order) == input.type.ndim:\n all_broadcastable_dims = [\n i\n for (i, is_broadcastable) in enumerate(input.type.broadcastable)\n if is_broadcastable\n ] + [\"x\"]\n for i in range(input.type.ndim):\n if new_order[i] == i or (\n i in all_broadcastable_dims and new_order[i] in all_broadcastable_dims\n ):\n is_useless = True\n else:\n is_useless = False\n break\n else:\n is_useless = False\n return is_useless\n\n\n@local_optimizer([DimShuffle])\ndef local_dimshuffle_lift(node):\n \"\"\"\n \"Lifts\" DimShuffle through Elemwise operations and merges\n consecutive DimShuffles. Basically, applies the following\n transformations on the whole graph:\n\n DimShuffle(Elemwise(x, y)) => Elemwise(DimShuffle(x), DimShuffle(y))\n DimShuffle(DimShuffle(x)) => DimShuffle(x)\n DimShuffle{0,1,...}(x) => x (when the dimshuffle do nothing)\n\n After this transform, clusters of Elemwise operations are\n void of DimShuffle operations.\n\n \"\"\"\n op = node.op\n if not isinstance(op, DimShuffle):\n return False\n\n input = node.inputs[0]\n inode = input.owner\n new_order = op.new_order\n if inode and isinstance(inode.op, Elemwise) and (len(input.clients) == 1):\n # Don't use make_node to have tag.test_value set.\n new_inputs = []\n for inp in inode.inputs:\n new_inp = op.__class__(inp.type.broadcastable, op.new_order)(inp)\n new_inputs.append(apply_local_dimshuffle_lift(new_inp))\n copy_stack_trace(node.outputs[0], new_inputs)\n ret = inode.op(*new_inputs, **dict(return_list=True))\n return ret\n if inode and isinstance(inode.op, DimShuffle):\n new_order = [x == \"x\" and \"x\" or inode.op.new_order[x] for x in new_order]\n input = inode.inputs[0]\n\n if is_dimshuffle_useless(new_order, input):\n return [input]\n elif inode and isinstance(inode.op, DimShuffle):\n ret = op.__class__(input.type.broadcastable, new_order)(input)\n ret = apply_local_dimshuffle_lift(ret)\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n@register_canonicalize\n@local_optimizer([Reshape])\ndef local_useless_dimshuffle_in_reshape(node):\n \"\"\"\n Removes useless DimShuffle operation inside Reshape:\n\n reshape(vector.dimshuffle('x', 0), shp) => reshape(vector, shp)\n reshape(matrix.dimshuffle('x', 0, 'x', 1), shp) => reshape(matrix, shp)\n reshape(row.dimshuffle(1, 'x'), shp) => reshape(row, shp)\n reshape(col.dimshuffle(0), shp) => reshape(col, shp)\n\n \"\"\"\n op = node.op\n if not isinstance(op, Reshape):\n return False\n if not (\n node.inputs[0].owner is not None\n and isinstance(node.inputs[0].owner.op, DimShuffle)\n ):\n return False\n\n new_order = node.inputs[0].owner.op.new_order\n input = node.inputs[0].owner.inputs[0]\n broadcastables = node.inputs[0].broadcastable\n new_order_of_nonbroadcast = []\n for i, bd in zip(new_order, broadcastables):\n if not bd:\n new_order_of_nonbroadcast.append(i)\n no_change_in_order = all(\n new_order_of_nonbroadcast[i] <= new_order_of_nonbroadcast[i + 1]\n for i in range(len(new_order_of_nonbroadcast) - 1)\n )\n if no_change_in_order:\n shape = node.inputs[1]\n ret = op.__class__(node.outputs[0].ndim)(input, shape)\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n@register_canonicalize\n@local_optimizer([DimShuffle])\ndef local_lift_transpose_through_dot(node):\n \"\"\"\n dot(x,y).T -> dot(y.T, x.T)\n\n These optimizations \"lift\" (propagate towards the inputs) DimShuffle\n through dot product. It allows to put the graph in a more standard shape,\n and to later merge consecutive DimShuffles.\n\n The transformation should be apply whether or not the transpose is\n inplace. The newly-introduced transpositions are not inplace, this will\n be taken care of in a later optimization phase.\n\n \"\"\"\n if not (isinstance(node.op, tt.DimShuffle) and node.op.new_order == (1, 0)):\n return False\n if not (node.inputs[0].owner and isinstance(node.inputs[0].owner.op, Dot)):\n return False\n x, y = node.inputs[0].owner.inputs\n\n if x.ndim == y.ndim == 2:\n # Output is dot product of transposed inputs in reverse order\n ret = [tt.dot(y.T, x.T)]\n\n # Copy over stack trace to output from result of dot-product\n copy_stack_trace(node.inputs[0], ret)\n return ret\n\n\nregister_canonicalize(local_dimshuffle_lift)\nregister_specialize(local_dimshuffle_lift)\n\n######################\n# Casting operations #\n######################\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([TensorFromScalar])\ndef local_tensor_scalar_tensor(node):\n \"\"\"tensor_from_scalar(scalar_from_tensor(x)) -> x\"\"\"\n if isinstance(node.op, TensorFromScalar):\n s = node.inputs[0]\n if s.owner and isinstance(s.owner.op, ScalarFromTensor):\n t = s.owner.inputs[0]\n\n # We don't need to copy over any stack traces here\n return [t]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([ScalarFromTensor])\ndef local_scalar_tensor_scalar(node):\n \"\"\"scalar_from_tensor(tensor_from_scalar(x)) -> x\"\"\"\n if isinstance(node.op, ScalarFromTensor):\n t = node.inputs[0]\n if t.owner and isinstance(t.owner.op, TensorFromScalar):\n s = t.owner.inputs[0]\n\n # We don't need to copy over any stack traces here\n return [s]\n\n\n#####################################\n# ShapeFeature, Shape optimizations\n#####################################\n\n\nclass MakeVector(Op):\n \"\"\"Concatenate a number of scalars together into a vector.\n\n This is a simple version of stack() that introduces far less cruft\n into the graph. Should work with 0 inputs. The constant_folding\n optimization will remove it.\n\n \"\"\"\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype=\"int64\"):\n self.dtype = dtype\n\n def make_node(self, *inputs):\n inputs = list(map(tt.as_tensor_variable, inputs))\n if not all(a.type == inputs[0].type for a in inputs) or (\n len(inputs) > 0 and inputs[0].dtype != self.dtype\n ):\n dtype = ts.upcast(self.dtype, *[i.dtype for i in inputs])\n # upcast the input to the determined dtype,\n # but don't downcast anything\n assert dtype == self.dtype, (\n \"The upcast of the inputs to MakeVector should match the \"\n \"dtype given in __init__.\"\n )\n if not all(self.dtype == tt.cast(i, dtype=dtype).dtype for i in inputs):\n raise TypeError(\n \"MakeVector.make_node expected inputs\"\n \" upcastable to %s. got %s\"\n % (self.dtype, str([i.dtype for i in inputs]))\n )\n inputs = [tt.cast(i, dtype=dtype) for i in inputs]\n assert all(self.dtype == a.dtype for a in inputs)\n assert all(a.ndim == 0 for a in inputs)\n\n if inputs:\n dtype = inputs[0].type.dtype\n else:\n dtype = self.dtype\n # bcastable = (len(inputs) == 1)\n bcastable = False\n otype = tt.TensorType(broadcastable=(bcastable,), dtype=dtype)\n return tt.Apply(self, inputs, [otype()])\n\n def perform(self, node, inputs, out_):\n (out,) = out_\n # not calling aesara._asarray as optimization\n if (out[0] is None) or (out[0].size != len(inputs)):\n out[0] = aesara._asarray(inputs, dtype=node.outputs[0].dtype)\n else:\n # assume that out has correct dtype. there is no cheap way to check\n out[0][...] = inputs\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_code(self, node, name, inp, out_, props):\n (out,) = out_\n # Shouldn't use PyArray_TYPE(inp[0]) for the dtype\n # when len(inp) == 0 (we need to support this case.\n # So there will be (1 * nb_dtype) + ((nb len(inp) - 1 ))\n # different c code with the following algo\n out_shape = len(inp)\n out_num = np.dtype(node.outputs[0].dtype).num\n # don't use dtype_%(out)s as when check_input=False, it isn't defined.\n out_dtype = node.outputs[0].type.dtype_specs()[1]\n if len(inp) > 0:\n assert self.dtype == node.inputs[0].dtype\n out_num = \"PyArray_TYPE(%s)\" % inp[0]\n\n ret = (\n \"\"\"\n npy_intp dims[1];\n dims[0] = %(out_shape)s;\n if(!%(out)s || PyArray_DIMS(%(out)s)[0] != %(out_shape)s){\n Py_XDECREF(%(out)s);\n %(out)s = (PyArrayObject*)PyArray_EMPTY(1, dims, %(out_num)s, 0);\n }\n \"\"\"\n % locals()\n )\n for idx, i in enumerate(inp):\n ret += (\n \"\"\"\n *((%(out_dtype)s *)PyArray_GETPTR1(%(out)s, %(idx)s)) = *((%(out_dtype)s *) PyArray_DATA(%(i)s));\n \"\"\"\n % locals()\n )\n return ret\n\n def infer_shape(self, node, ishapes):\n return [(len(ishapes),)]\n\n def grad(self, inputs, output_gradients):\n # If the output is of an integer dtype, no gradient shall pass\n if self.dtype in tt.discrete_dtypes:\n return [ipt.zeros_like().astype(aesara.config.floatX) for ipt in inputs]\n\n grads = []\n for i, inp in enumerate(inputs):\n grads.append(output_gradients[0][i])\n return grads\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self.make_node(*eval_points).outputs\n\n\nmake_vector = MakeVector()\n\n\nclass MakeVectorPrinter:\n def process(self, r, pstate):\n if r.owner is None:\n raise TypeError(\"Can only print make_vector.\")\n elif isinstance(r.owner.op, MakeVector):\n old_precedence = getattr(pstate, \"precedence\", None)\n try:\n pstate.precedence = 1000\n s = [pstate.pprinter.process(input) for input in r.owner.inputs]\n finally:\n pstate.precedence = old_precedence\n return \"[%s]\" % \", \".join(s)\n else:\n raise TypeError(\"Can only print make_vector.\")\n\n\ntt.pprint.assign(MakeVector, MakeVectorPrinter())\n\n\nclass ShapeFeature:\n \"\"\"Graph optimizer for removing all calls to shape().\n\n This optimizer replaces all Shapes and Subtensors of Shapes with\n Shape_i and MakeVector Ops.\n\n This optimizer has several goals:\n\n 1. to 'lift' Shapes to as close to the inputs as possible.\n\n 2. to infer the shape of every node in the graph in terms of the\n input shapes.\n\n 3. remove all fills (T.second, T.fill) from the graph\n\n Lifting shapes as close to the inputs as possible is important for\n canonicalization because it is very bad form to have to compute\n something just to know how big it will be. Firstly, it is a waste\n of time to compute such outputs. But it is important to get rid\n of these outputs as early as possible in the compilation process\n because the extra computations make it appear as if many internal\n graph nodes have multiple clients. Many optimizations refuse to\n work on nodes with multiple clients.\n\n Lifting is done by using an `<Op>.infer_shape` function if one is\n present, or else using a conservative default. An Op that\n supports shape-lifting should define a infer_shape(self, node,\n input_shapes) function. The argument input_shapes is a tuple of\n tuples... there is an interior tuple for each input to the node.\n The tuple has as many elements as dimensions. The element in\n position i of tuple j represents the i'th shape component of the\n j'th input. The function should return a tuple of tuples. One\n output tuple for each node.output. Again, the i'th element of the\n j'th output tuple represents the output[j].shape[i] of the\n function. If an output is not a TensorType, then None should be\n returned instead of a tuple for that output.\n\n For example the infer_shape for a matrix-matrix product would accept\n input_shapes=((x0,x1), (y0,y1)) and return ((x0, y1),).\n\n Inferring the shape of internal nodes in the graph is important\n for doing size-driven optimizations. If we know how big various\n intermediate results will be, we can estimate the cost of many Ops\n accurately, and generate c-code that is specific [e.g. unrolled]\n to particular sizes.\n\n In cases where you cannot figure out the shape, raise a ShapeError.\n\n Notes\n -----\n Right now there is only the ConvOp that could really take\n advantage of this shape inference, but it is worth it even\n just for the ConvOp. All that's necessary to do shape\n inference is 1) to mark shared inputs as having a particular\n shape, either via a .tag or some similar hacking; and 2) to\n add an optional In() argument to promise that inputs will\n have a certain shape (or even to have certain shapes in\n certain dimensions). We can't automatically infer the shape of\n shared variables as they can change of shape during the\n execution by default. (NOT IMPLEMENTED YET, BUT IS IN TRAC)\n\n\n **Using Shape information in Optimizations**\n\n To use this shape information in OPTIMIZATIONS, use the\n ``shape_of`` dictionary.\n\n For example:\n\n .. code-block:: python\n\n try:\n shape_of = node.fgraph.shape_feature.shape_of\n except AttributeError:\n # This can happen when the mode doesn't include the ShapeFeature.\n return\n\n shape_of_output_zero = shape_of[node.output[0]]\n\n The ``shape_of_output_zero`` symbol will contain a tuple, whose\n elements are either integers or symbolic integers.\n\n TODO: check to see if the symbols are necessarily\n non-constant... or are integer literals sometimes Aesara\n constants?? That would be confusing.\n\n \"\"\"\n\n def get_node_infer_shape(self, node):\n try:\n shape_infer = node.op.infer_shape\n except AttributeError:\n shape_infer = self.default_infer_shape\n\n try:\n o_shapes = shape_infer(node, [self.shape_of[r] for r in node.inputs])\n except ShapeError:\n o_shapes = self.default_infer_shape(\n node, [self.shape_of[r] for r in node.inputs]\n )\n except NotImplementedError as e:\n raise NotImplementedError(\n \"Code called by infer_shape failed raising a \"\n \"NotImplementedError. Raising NotImplementedError to \"\n \"indicate that a shape cannot be computed is no longer \"\n \"supported, and one should now use tensor.ShapeError \"\n \"instead. The original exception message is: %s\" % e\n ).with_traceback(e.__traceback__)\n except Exception as e:\n msg = (\n \"Failed to infer_shape from Op %s.\\nInput shapes: \"\n \"%s\\nException encountered during infer_shape: \"\n \"%s\\nException message: %s\\nTraceback: %s\"\n ) % (\n node.op,\n [self.shape_of[r] for r in node.inputs],\n type(e),\n str(e),\n traceback.format_exc(),\n )\n if config.on_shape_error == \"raise\":\n raise Exception(msg).with_traceback(e.__traceback__)\n else:\n _logger.warning(msg)\n o_shapes = self.default_infer_shape(\n node, [self.shape_of[r] for r in node.inputs]\n )\n\n return o_shapes\n\n def get_shape(self, var, idx):\n \"\"\"Optimization can call this to get the current shape_i\n\n It is better to call this then use directly shape_of[var][idx]\n as this method should update shape_of if needed.\n\n TODO: Up to now, we don't update it in all cases. Update in all cases.\n \"\"\"\n r = self.shape_of[var][idx]\n if (\n r.owner\n and isinstance(r.owner.op, Shape_i)\n and r.owner.inputs[0] not in var.fgraph.variables\n ):\n assert var.owner\n node = var.owner\n # recur on inputs\n for i in node.inputs:\n if getattr(i, \"ndim\", None) > 0:\n self.get_shape(i, 0)\n o_shapes = self.get_node_infer_shape(node)\n assert len(o_shapes) == len(node.outputs)\n\n # Only change the variables and dimensions that would introduce\n # extra computation\n for new_shps, out in zip(o_shapes, node.outputs):\n if not hasattr(out, \"ndim\"):\n continue\n\n merged_shps = list(self.shape_of[out])\n changed = False\n for i in range(out.ndim):\n n_r = merged_shps[i]\n if (\n n_r.owner\n and isinstance(n_r.owner.op, Shape_i)\n and n_r.owner.inputs[0] not in var.fgraph.variables\n ):\n changed = True\n merged_shps[i] = new_shps[i]\n if changed:\n self.set_shape(out, merged_shps, override=True)\n r = self.shape_of[var][idx]\n return r\n\n def shape_ir(self, i, r):\n \"\"\"Return symbolic r.shape[i] for tensor variable r, int i.\"\"\"\n if hasattr(r.type, \"broadcastable\") and r.type.broadcastable[i]:\n return self.lscalar_one\n else:\n # Do not call make_node for test_value\n s = Shape_i(i)(r)\n try:\n s = get_scalar_constant_value(s)\n except NotScalarConstantError:\n pass\n return s\n\n def shape_tuple(self, r):\n \"\"\"Return a tuple of symbolic shape vars for tensor variable r.\"\"\"\n if not hasattr(r, \"ndim\"):\n # This happen for NoneConst.\n return None\n return tuple([self.shape_ir(i, r) for i in range(r.ndim)])\n\n def default_infer_shape(self, node, i_shapes):\n \"\"\"Return a list of shape tuple or None for the outputs of node.\n\n This function is used for Ops that don't implement infer_shape.\n Ops that do implement infer_shape should use the i_shapes parameter,\n but this default implementation ignores it.\n\n \"\"\"\n rval = []\n for r in node.outputs:\n try:\n rval.append(self.shape_tuple(r))\n except AttributeError:\n rval.append(None)\n return rval\n\n def unpack(self, s_i, var):\n \"\"\"Return a symbolic integer scalar for the shape element s_i.\n\n The s_i argument was produced by the infer_shape() of an Op subclass.\n\n var: the variable that correspond to s_i. This is just for\n error reporting.\n\n \"\"\"\n # unpack the s_i that the Op returned\n assert s_i is not None\n if s_i == 1:\n # don't make the optimizer merge a zillion ones together\n # by always returning the same object to represent 1\n return self.lscalar_one\n if isinstance(s_i, float) and int(s_i) == s_i:\n s_i = int(s_i)\n if isinstance(s_i, (np.integer, int)) or (\n isinstance(s_i, np.ndarray) and s_i.ndim == 0\n ):\n # this shape is a constant\n if s_i < 0:\n msg = \"There is a negative shape in the graph!\"\n msg += gof.utils.get_variable_trace_string(var)\n # The rest of the pipeline don't handle correctly this\n # case. So we have 2 choices, stop compilation or\n # consider the shape as unknow. As we have more\n # chance to give the stack trace here then later, I\n # choose that options as it would give better error\n # message.\n raise AssertionError(msg)\n return tt.constant(s_i, dtype=\"int64\")\n if isinstance(s_i, (tuple, list)):\n # this dimension is the same as many of the inputs\n # which tells us that if one of the inputs is known,\n # the others all become known.\n # TODO: should be implemented in Elemwise, and Dot\n #\n # worst case, we loop over shape_of and replace things\n raise NotImplementedError(s_i)\n\n # s_i is x.shape[i] for some x, we change it to shape_of[x][i]\n if (\n s_i.owner\n and isinstance(s_i.owner.op, Subtensor)\n and s_i.owner.inputs[0].owner\n and isinstance(s_i.owner.inputs[0].owner.op, Shape)\n ):\n assert s_i.ndim == 0\n assert len(s_i.owner.op.idx_list) == 1\n\n # The current Subtensor always put constant index in the graph.\n # This was not True in the past. So call the Subtensor function\n # that will return the right index.\n idx = get_idx_list(s_i.owner.inputs, s_i.owner.op.idx_list)\n assert len(idx) == 1\n idx = idx[0]\n try:\n i = get_scalar_constant_value(idx)\n except NotScalarConstantError:\n pass\n else:\n # Executed only if no exception was raised\n x = s_i.owner.inputs[0].owner.inputs[0]\n # x should already have been imported, and should be in shape_of.\n s_i = self.shape_of[x][i]\n\n if s_i.type.dtype in tt.integer_dtypes:\n if getattr(s_i.type, \"ndim\", 0):\n raise TypeError(\"Shape element must be scalar\", s_i)\n return s_i\n else:\n raise TypeError(\n \"Unsupported shape element\", s_i, type(s_i), getattr(s_i, \"type\", None)\n )\n\n def set_shape(self, r, s, override=False):\n \"\"\"Assign the shape `s` to previously un-shaped variable `r`.\n\n Parameters\n ----------\n r : a variable\n s : None or a tuple of symbolic integers\n override : If False, it mean r is a new object in the fgraph.\n If True, it mean r is already in the fgraph and we want to\n override its shape.\n\n \"\"\"\n if not override:\n assert r not in self.shape_of, \"r already in shape_of\"\n if s is None:\n self.shape_of[r] = s\n else:\n if not isinstance(s, (tuple, list)):\n raise TypeError(\"shapes must be tuple/list\", (r, s))\n\n if r.ndim != len(s):\n sio = StringIO()\n aesara.printing.debugprint(r, file=sio, print_type=True)\n raise AssertionError(\n \"Something inferred a shape with %d dimensions \"\n \"for a variable with %d dimensions\"\n \" for the variable:\\n%s\" % (len(s), r.ndim, sio.getvalue())\n )\n\n shape_vars = []\n for i in range(r.ndim):\n if hasattr(r.type, \"broadcastable\") and r.type.broadcastable[i]:\n shape_vars.append(self.lscalar_one)\n else:\n shape_vars.append(self.unpack(s[i], r))\n assert all(\n [\n not hasattr(r.type, \"broadcastable\")\n or not r.type.broadcastable[i]\n or\n # The two following comparison are a speed optimization\n # But we never timed this speed optimization!\n self.lscalar_one.equals(shape_vars[i])\n or self.lscalar_one.equals(tt.extract_constant(shape_vars[i]))\n for i in range(r.ndim)\n ]\n )\n self.shape_of[r] = tuple(shape_vars)\n for sv in shape_vars:\n self.shape_of_reverse_index.setdefault(sv, set()).add(r)\n\n def update_shape(self, r, other_r):\n \"\"\"Replace shape of r by shape of other_r.\n\n If, on some dimensions, the shape of other_r is not informative,\n keep the shape of r on those dimensions.\n\n \"\"\"\n # other_r should already have a shape\n assert other_r in self.shape_of, (\"other_r not in shape_of\", other_r)\n other_shape = self.shape_of[other_r]\n\n # If other_shape has no information, call is pointless.\n if other_shape is None:\n return\n\n if r in self.shape_of:\n r_shape = self.shape_of[r]\n else:\n # If no info is known on r's shape, use other_shape\n self.set_shape(r, other_shape)\n return\n if (\n other_r.owner\n and r.owner\n and other_r.owner.inputs == r.owner.inputs\n and other_r.owner.op == r.owner.op\n ):\n # We are doing a merge. So the 2 shapes graph will be the\n # same. This is only a speed optimization to call\n # ancestors() less frequently.\n return\n\n # Merge other_shape with r_shape, giving the priority to other_shape\n merged_shape = []\n for i, ps in enumerate(other_shape):\n if r_shape is None and other_shape:\n merged_shape.append(other_shape[i])\n elif (\n ps.owner\n and isinstance(getattr(ps.owner, \"op\", None), Shape_i)\n and ps.owner.op.i == i\n and ps.owner.inputs[0] in (r, other_r)\n ):\n # If other_shape[i] is uninformative, use r_shape[i].\n # For now, we consider 2 cases of uninformative other_shape[i]:\n # - Shape_i(i)(other_r);\n # - Shape_i(i)(r).\n merged_shape.append(r_shape[i])\n elif isinstance(r_shape[i], (Constant, int)):\n # We do this to call less often ancestors and make\n # sure we have the simplest shape possible.\n merged_shape.append(r_shape[i])\n elif isinstance(other_shape[i], (Constant, int)):\n # We do this to call less often ancestors and make\n # sure we have the simplest shape possible.\n merged_shape.append(other_shape[i])\n elif other_shape[i] == r_shape[i]:\n # This mean the shape is equivalent\n # We do not want to do the ancestor check in those cases\n merged_shape.append(r_shape[i])\n elif r_shape[i] in gof.graph.ancestors([other_shape[i]]):\n # Another case where we want to use r_shape[i] is when\n # other_shape[i] actually depends on r_shape[i]. In that case,\n # we do not want to substitute an expression with another that\n # is strictly more complex. Such a substitution could also lead\n # to cycles: if (in the future) r_shape[i] gets replaced by an\n # expression of other_shape[i], other_shape[i] may end up\n # depending on itself.\n merged_shape.append(r_shape[i])\n else:\n merged_shape.append(other_shape[i])\n assert all(\n [\n (\n not hasattr(r.type, \"broadcastable\")\n or not r.type.broadcastable[i]\n and not other_r.type.broadcastable[i]\n )\n or\n # The two following comparison are a speed optimization\n # But we never timed this speed optimization!\n self.lscalar_one.equals(merged_shape[i])\n or self.lscalar_one.equals(\n tt.extract_constant(merged_shape[i], only_process_constants=True)\n )\n for i in range(r.ndim)\n ]\n )\n self.shape_of[r] = tuple(merged_shape)\n for sv in self.shape_of[r]:\n self.shape_of_reverse_index.setdefault(sv, set()).add(r)\n\n def set_shape_i(self, r, i, s_i):\n \"\"\"Replace element i of shape_of[r] by s_i\"\"\"\n assert r in self.shape_of\n prev_shape = self.shape_of[r]\n # prev_shape is a tuple, so we cannot change it inplace,\n # so we build another one.\n new_shape = []\n for j, s_j in enumerate(prev_shape):\n if j == i:\n new_shape.append(self.unpack(s_i, r))\n else:\n new_shape.append(s_j)\n assert all(\n [\n not hasattr(r.type, \"broadcastable\") or not r.type.broadcastable[idx] or\n # The two following comparison are a speed optimization\n # But we never timed this speed optimization!\n self.lscalar_one.equals(new_shape[idx])\n or self.lscalar_one.equals(tt.extract_constant(new_shape[idx]))\n for idx in range(r.ndim)\n ]\n )\n self.shape_of[r] = tuple(new_shape)\n for sv in self.shape_of[r]:\n self.shape_of_reverse_index.setdefault(sv, set()).add(r)\n\n def init_r(self, r):\n \"\"\"Register r's shape in the shape_of dictionary.\"\"\"\n if r not in self.shape_of:\n try:\n self.set_shape(r, self.shape_tuple(r))\n except AttributeError: # XXX: where would this come from?\n self.set_shape(r, None)\n\n def make_vector_shape(self, r):\n return make_vector(*self.shape_of[r])\n\n #\n # Feature interface\n #\n #\n def on_attach(self, fgraph):\n assert not hasattr(fgraph, \"shape_feature\")\n fgraph.shape_feature = self\n # Must be local to the object as otherwise we reuse the same\n # variable for multiple fgraph!\n self.lscalar_one = tt.constant(1, dtype=\"int64\")\n assert self.lscalar_one.type == tt.lscalar\n\n self.shape_of = {}\n # Variable -> tuple(scalars) or None (All tensor vars map to tuple)\n\n self.scheduled = {}\n # Variable ->\n\n self.shape_of_reverse_index = {}\n # shape var -> graph v\n\n for node in fgraph.toposort():\n self.on_import(fgraph, node, reason=\"on_attach\")\n\n def on_detach(self, fgraph):\n self.shape_of = {}\n self.scheduled = {}\n self.shape_of_reverse_index = {}\n del fgraph.shape_feature\n\n def on_import(self, fgraph, node, reason):\n if node.outputs[0] in self.shape_of:\n # this is a revert, not really an import\n for r in node.outputs + node.inputs:\n assert r in self.shape_of\n return\n\n for i, r in enumerate(node.inputs):\n # make sure we have shapes for the inputs\n self.init_r(r)\n\n o_shapes = self.get_node_infer_shape(node)\n\n # this is packed information\n # an element of o_shapes is either None or a tuple\n # elements of the tuple can be either strings, or ints\n if len(o_shapes) != len(node.outputs):\n raise Exception(\n (\n 'The infer_shape method for the Op \"%s\" returned a list '\n + \"with the wrong number of element: len(o_shapes) = %d \"\n + \" != len(node.outputs) = %d\"\n )\n % (str(node.op), len(o_shapes), len(node.outputs))\n )\n\n # Ensure shapes are in 'int64'. This is to make sure the assert\n # found in the `local_useless_subtensor` optimization does not fail.\n for sh_idx, sh in enumerate(o_shapes):\n if sh is None:\n continue\n if not isinstance(sh, (list, tuple)):\n raise ValueError(\n \"infer_shape of %s didn't return a list of\"\n \" list. It returned '%s'\" % (str(node), str(o_shapes))\n )\n new_shape = []\n for i, d in enumerate(sh):\n # Note: we ignore any shape element that is not typed (i.e.,\n # does not have a 'dtype' attribute). This means there may\n # still remain int elements that are int32 on 32-bit platforms,\n # but this works with `local_useless_subtensor`, so for now we\n # keep it this way. See #266 for a better long-term fix.\n if getattr(d, \"dtype\", \"int64\") != \"int64\":\n assert d.dtype in tt.discrete_dtypes, (node, d.dtype)\n assert str(d.dtype) != \"uint64\", node\n new_shape += sh[len(new_shape) : i + 1]\n if isinstance(d, tt.Constant):\n casted_d = tt.constant(d.data, dtype=\"int64\")\n else:\n casted_d = tt.cast(d, \"int64\")\n new_shape[i] = casted_d\n if new_shape:\n # We replace the shape with wrong dtype by the one with\n # 'int64'.\n new_shape += sh[len(new_shape) :]\n o_shapes[sh_idx] = tuple(new_shape)\n\n for r, s in zip(node.outputs, o_shapes):\n self.set_shape(r, s)\n\n def on_change_input(self, fgraph, node, i, r, new_r, reason):\n if new_r not in self.shape_of:\n # It happen that the fgraph didn't called on_import for some\n # new_r. This happen when new_r don't have an\n # owner(i.e. it is a constant or an input of the graph)\n # update_shape suppose that r and new_r are in shape_of.\n self.init_r(new_r)\n\n # This tells us that r and new_r must have the same shape if\n # we didn't know that the shapes are related, now we do.\n self.update_shape(new_r, r)\n\n # change_input happens in two cases:\n # 1) we are trying to get rid of r, or\n # 2) we are putting things back after a failed transaction.\n\n # In case 1, if r has a shape_i client, we will want to\n # replace the shape_i of r with the shape of new_r. Say that\n # r is *scheduled*.\n # At that point, node is no longer a client of r, but of new_r\n for (shpnode, idx) in r.clients + [(node, i)]:\n if isinstance(getattr(shpnode, \"op\", None), Shape_i):\n idx = shpnode.op.i\n repl = self.shape_of[new_r][idx]\n if repl.owner is shpnode:\n # This mean the replacement shape object is\n # exactly the same as the current shape object. So\n # no need for replacement. This happen for example\n # with the InputToGpuOptimizer optimizer.\n continue\n if (\n repl.owner\n and repl.owner.inputs[0] is shpnode.inputs[0]\n and isinstance(repl.owner.op, Shape_i)\n and repl.owner.op.i == shpnode.op.i\n ):\n # The replacement is a shape_i of the same\n # input. So no need to do this equivalent\n # replacement.\n continue\n\n if shpnode.outputs[0] in gof.graph.ancestors([repl]):\n raise InconsistencyError(\n \"This substitution would insert a cycle in the graph:\"\n \"node: %s, i: %i, r: %s, new_r: %s\" % (node, i, r, new_r)\n )\n\n self.scheduled[shpnode] = new_r\n # In case 2, if r is a variable that we've scheduled for shape update,\n # then we should cancel it.\n unscheduled = [k for k, v in self.scheduled.items() if v == r]\n for k in unscheduled:\n del self.scheduled[k]\n\n # In either case, r could be in shape_of.values(), that is, r itself\n # is the shape of something. In that case, we want to update\n # the value in shape_of, to keep it up-to-date.\n for v in self.shape_of_reverse_index.get(r, []):\n # The reverse index is only approximate. It is not updated on\n # deletion of variables, or on change_input so it might be the\n # case that there are a few extra `v`'s in it that no longer have\n # a shape of r or possibly have been deleted from shape_of\n # entirely. The important thing is that it permits to recall\n # all variables with r in their shape.\n for ii, svi in enumerate(self.shape_of.get(v, [])):\n if svi == r:\n self.set_shape_i(v, ii, new_r)\n self.shape_of_reverse_index[r] = set()\n\n def same_shape(self, x, y, dim_x=None, dim_y=None):\n \"\"\"Return True if we are able to assert that x and y have the\n same shape.\n\n dim_x and dim_y are optional. If used, they should be an index\n to compare only 1 dimension of x and y.\n\n \"\"\"\n sx = self.shape_of[x]\n sy = self.shape_of[y]\n if sx is None or sy is None:\n return False\n if dim_x is not None:\n sx = [sx[dim_x]]\n if dim_y is not None:\n sy = [sy[dim_y]]\n assert len(sx) == len(sy)\n\n # We look on each dimensions we want to compare.\n # If any of them can't be asserted to be equal, return False.\n # Otherwise, we return True at the end.\n for dx, dy in zip(sx, sy):\n if dx is dy:\n continue\n # Need to try to find that they are the same shape. We\n # need to compare the full graph. It could be slow. So I\n # just implement for now the case of Shape_i.\n if not dx.owner or not dy.owner:\n return False\n if not isinstance(dx.owner.op, Shape_i) or not isinstance(\n dy.owner.op, Shape_i\n ):\n return False\n opx = dx.owner.op\n opy = dy.owner.op\n if not (opx.i == opy.i):\n return False\n # FB I'm not sure if this handle correctly constants.\n if dx.owner.inputs[0] == dy.owner.inputs[0]:\n continue\n # To be sure to cover all case, call equal_computation.\n # Can't use aesara.gof.graph.is_same_graph(dx, dy)\n # As it currently expect that dx and dy aren't in a FunctionGraph\n from aesara.gof.graph import equal_computations\n\n if not equal_computations([dx], [dy]):\n return False\n return True\n\n\nclass ShapeOptimizer(Optimizer):\n \"\"\"Optimizer that serves to add ShapeFeature as an fgraph feature.\"\"\"\n\n def add_requirements(self, fgraph):\n fgraph.attach_feature(ShapeFeature())\n\n def apply(self, fgraph):\n pass\n\n\nclass UnShapeOptimizer(Optimizer):\n \"\"\"Optimizer remove ShapeFeature as an fgraph feature.\"\"\"\n\n def apply(self, fgraph):\n for feature in fgraph._features:\n if isinstance(feature, ShapeFeature):\n fgraph.remove_feature(feature)\n\n\n# Register it after merge1 optimization at 0. We don't want to track\n# the shape of merged node.\naesara.compile.mode.optdb.register(\n \"ShapeOpt\", ShapeOptimizer(), 0.1, \"fast_run\", \"fast_compile\"\n)\n# Not enabled by default for now. Some crossentropy opt use the\n# shape_feature. They are at step 2.01. uncanonicalize is at step\n# 3. After it goes to 48.5 that move to the gpu. So 10 seem resonable.\naesara.compile.mode.optdb.register(\"UnShapeOpt\", UnShapeOptimizer(), 10)\n\n\ndef local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):\n def local_elemwise_alloc(node):\n \"\"\"\n elemwise(alloc(x, shp), ..., y.TensorType(BROADCAST CONDITION))\n -> elemwise(x, y.TensorType(BROADCAST CONDITION))\n\n elemwise(dimshuffle(alloc(x, shp)),... ,y.TensorType(BROADCAST CONDITION))\n -> elemwise(x.dimshuffle(...), y.TensorType(BROADCAST CONDITION))\n\n BROADCAST CONDITION: the condition is that the one input that are\n not to be optimized to have the same broadcast pattern as the\n output.\n\n We can change the alloc by a dimshuffle as the elemwise\n already have the shape info. The dimshuffle will be faster\n to exec.\n\n \"\"\"\n if not isinstance(node.op, ElemwiseOP):\n return False\n\n if len(node.outputs) > 1:\n # Ensure all outputs have the same broadcast pattern\n # This is a supposition that I'm not sure is always true.\n assert all(\n [\n o.type.broadcastable == node.outputs[0].type.broadcastable\n for o in node.outputs[1:]\n ]\n )\n\n # The broadcast pattern of the ouptut must match the broadcast\n # pattern of at least one of the inputs.\n if not any(\n [\n i.type.broadcastable == node.outputs[0].type.broadcastable\n for i in node.inputs\n ]\n ):\n return False\n\n def dimshuffled_alloc(i):\n return (\n isinstance(i.owner.op, DimShuffleOP)\n and i.owner.inputs[0].owner\n and isinstance(i.owner.inputs[0].owner.op, AllocOP)\n )\n\n # At least one input must have an owner that is either a AllocOP or a\n # DimShuffleOP with an owner that is a AllocOP -- otherwise there is\n # nothing to optimize.\n if not any(\n [\n i.owner and (isinstance(i.owner.op, AllocOP) or dimshuffled_alloc(i))\n for i in node.inputs\n ]\n ):\n return False\n\n # Search for input that we can use as a baseline for the dimensions.\n assert_op_idx = -1\n for idx, i in enumerate(node.inputs):\n if i.type.broadcastable == node.outputs[0].type.broadcastable:\n # Prefer an input that is not a AllocOP nor a DimShuffleOP of a\n # AllocOP so that all allocs can be optimized.\n if not (\n i.owner\n and (isinstance(i.owner.op, AllocOP) or dimshuffled_alloc(i))\n ):\n assert_op_idx = idx\n break\n\n # It may be the case that only AllocOP and DimShuffleOP of AllocOP exist.\n if assert_op_idx < 0:\n # We want to optimize as many allocs as possible. When\n # there is more than one then do all but one. number of\n # inputs with alloc or dimshuffle alloc\n l2 = [\n i\n for i in node.inputs\n if (\n i.owner\n and (isinstance(i.owner.op, AllocOP) or dimshuffled_alloc(i))\n )\n ]\n # If only 1 alloc or dimshuffle alloc, it is the one we\n # will use for the shape. So no alloc would be removed.\n if len(l2) > 1:\n # l containt inputs with alloc or dimshuffle alloc\n # only. Its length will always be at least one, as we\n # checked that before\n l = [\n idx\n for idx, i in enumerate(node.inputs)\n if i.broadcastable == node.outputs[0].broadcastable\n ]\n assert_op_idx = l[0] # The first one is as good as any to use.\n else:\n # Nothing would be optimized!\n return False\n\n assert_op = node.inputs[assert_op_idx]\n cmp_op = assert_op\n new_i = []\n same_shape = node.fgraph.shape_feature.same_shape\n for i in node.inputs:\n # Remove alloc\n if (\n i.owner\n and isinstance(i.owner.op, AllocOP)\n and i.owner.inputs[0].type != i.owner.outputs[0].type\n ):\n # when i.owner.inputs[0].type == i.owner.outputs[0].type we\n # will remove that alloc later\n assert i.type.ndim == cmp_op.ndim\n if aesara.config.experimental.local_alloc_elemwise_assert:\n get_shape = node.fgraph.shape_feature.get_shape\n cond = []\n for idx in range(i.type.ndim):\n if not i.type.broadcastable[idx] and not same_shape(\n i, cmp_op, idx, idx\n ):\n i_shp = get_shape(i, idx)\n cmp_shp = get_shape(cmp_op, idx)\n cond.append(tt.eq(i_shp, cmp_shp))\n if cond:\n assert_op = assert_(assert_op, *cond)\n new_i.append(i.owner.inputs[0])\n\n # Remove Alloc in DimShuffle\n elif i.owner and dimshuffled_alloc(i):\n assert i.type.ndim == cmp_op.type.ndim\n if aesara.config.experimental.local_alloc_elemwise_assert:\n assert_cond = [\n tt.eq(i.shape[idx], cmp_op.shape[idx])\n for idx in range(i.type.ndim)\n if not i.type.broadcastable[idx]\n and not same_shape(i, cmp_op, idx, idx)\n ]\n if assert_cond:\n assert_op = assert_(assert_op, *assert_cond)\n alloc_input = i.owner.inputs[0].owner.inputs[0]\n if alloc_input.ndim != i.owner.inputs[0].ndim:\n # The alloc can add dimension to the value\n # We add a dimshuffle to add them.\n # We let later optimization merge the multiple dimshuffle\n nb_dim_to_add = i.owner.inputs[0].ndim - alloc_input.ndim\n alloc_input = alloc_input.dimshuffle(\n [\"x\"] * nb_dim_to_add + list(range(alloc_input.ndim))\n )\n\n # We need to keep the dimshuffle. It could swap axes or\n # add dimensions anywhere.\n r_i = i.owner.op(alloc_input)\n\n # Copy stack trace from i to new_i\n copy_stack_trace(i, r_i)\n new_i.append(r_i)\n else:\n new_i.append(i)\n new_i[assert_op_idx] = assert_op\n\n ret = node.op(*new_i, return_list=True)\n\n # Copy over stack trace from previous outputs to new outputs.\n copy_stack_trace(node.outputs, ret)\n return ret\n\n return local_elemwise_alloc\n\n\n# TODO, global optimizer that lift the assert to the beginning of the graph.\n# TODO, optimize all inputs when possible -- currently when all inputs have\n# an alloc all but one is optimized.\n\nlocal_elemwise_alloc = register_specialize(\n local_optimizer([Elemwise])(\n local_elemwise_alloc_op(Elemwise, Alloc, tt.DimShuffle)\n ),\n \"local_alloc_elemwise\",\n)\n\n\n@local_optimizer([Elemwise])\ndef local_fill_sink(node):\n \"\"\"\n f(fill(a, b), fill(c, d), e) -> fill(c, fill(a, f(b, d, e)))\n f need to be an elemwise that isn't a fill.\n \"\"\"\n if not hasattr(node, \"op\") or not isinstance(node.op, Elemwise) or node.op == fill:\n return False\n models = []\n inputs = []\n for input in node.inputs:\n if input.owner and input.owner.op == fill:\n models.append(input.owner.inputs[0])\n inputs.append(input.owner.inputs[1])\n else:\n inputs.append(input)\n if not models:\n return False\n c = node.op(*inputs)\n for model in models:\n if model.type != c.type:\n c = fill(model, c)\n\n # The newly created node c doesn't has 'clients',\n # so this iteration is took place with node.outputs[0]\n replacements = {node.outputs[0]: c}\n for client, cl_idx in node.outputs[0].clients:\n if (\n hasattr(client, \"op\")\n and isinstance(client.op, Elemwise)\n and not client.op == fill\n ):\n client_inputs = client.inputs[:]\n client_inputs[cl_idx] = c\n new_client = client.op(*client_inputs)\n\n # Add clients to new_client\n new_client.owner.outputs[0].clients = client.outputs[0].clients\n r = local_fill_sink.transform(new_client.owner)\n if not r:\n continue\n replacements.update(r)\n return replacements\n\n\nregister_canonicalize(local_fill_sink)\n\n\n@register_specialize\n@register_stabilize\n# @register_canonicalize # We make full pass after the canonizer phase.\n@local_optimizer([fill])\ndef local_fill_to_alloc(node):\n \"\"\"fill(s,v) -> alloc(v, shape(s))\n\n This is an important optimization because with the shape_to_shape_i\n optimization, the dependency on 's' is often removed.\n\n \"\"\"\n if node.op == fill:\n r, v = node.inputs\n if v.type == node.outputs[0].type:\n # this is a useless fill, erase it.\n rval = [v]\n elif v.type.broadcastable == node.outputs[0].type.broadcastable:\n # this is a cast\n rval = [tt.cast(v, node.outputs[0].type.dtype)]\n elif r.type.broadcastable == node.outputs[0].type.broadcastable:\n # we are broadcasting v somehow, but not r\n o = broadcast_like(v, r, node.fgraph, dtype=v.dtype)\n copy_stack_trace(node.outputs[0], o)\n rval = [o]\n else:\n # we are broadcasting both v and r,\n # the output shape must be computed\n #\n # TODO: implement this case (including a test!)\n #\n # I think the strategy should be to extend the shorter\n # shape vector with 1s (how?) and then take the\n # elementwise max of the two. - how to flag an error of\n # shape mismatch where broadcasting should be illegal?\n return\n # TODO: cut out un-necessary dimshuffles of v\n\n assert rval[0].type == node.outputs[0].type, (\n \"rval\",\n rval[0].type,\n \"orig\",\n node.outputs[0].type,\n \"node\",\n node,\n ) # aesara.printing.debugprint(node.outputs[0], file='str'))\n return rval\n\n\n# Register this after stabilize at 1.5 to make sure stabilize don't\n# get affected by less canonicalized graph due to alloc.\ncompile.optdb.register(\n \"local_fill_to_alloc\", in2out(local_fill_to_alloc), 1.51, \"fast_run\"\n)\n# Needed to clean some extra alloc added by local_fill_to_alloc\ncompile.optdb.register(\n \"local_elemwise_alloc\", in2out(local_elemwise_alloc), 1.52, \"fast_run\"\n)\n\n\n@register_canonicalize(\"fast_compile\")\n@register_useless\n@local_optimizer([fill])\ndef local_useless_fill(node):\n \"\"\"fill(s,v) -> v\n\n This optimization is only needed in FAST_COMPILE to make the code\n more readable. Normally, it is done by the local_fill_to_alloc\n opt.\n\n \"\"\"\n if node.op == fill:\n r, v = node.inputs\n if v.type == node.outputs[0].type:\n # this is a useless fill, erase it.\n # also, we don't need to copy over any stack traces here\n return [v]\n\n\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@register_useless\n@local_optimizer([alloc])\ndef local_useless_alloc(node):\n \"\"\"\n If the input type is the same as the output type (dtype and broadcast)\n there is no change in the shape of the input. So this is just a simple copy\n of the input. This is not needed.\n\n \"\"\"\n op = node.op\n if not isinstance(op, Alloc):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n\n # Check if dtype and broadcast remain the same.\n if input.type == output.type:\n # We don't need to copy over any stack traces here\n return [input]\n\n\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@local_optimizer([alloc])\ndef local_canonicalize_alloc(node):\n \"\"\"If the input type is the same as the output type (dtype and broadcast)\n there is no change in the shape of the input. So this is just a simple copy\n of the input. This is not needed. (as local_useless_alloc)\n\n Also, it will canonicalize alloc by creating Dimshuffle after the\n alloc to introduce the dimensions of constant size 1.\n\n See https://github.com/Aesara/Aesara/issues/4072 to know why this\n is needed.\n\n \"\"\"\n op = node.op\n if not isinstance(op, Alloc):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n\n # Check if dtype and broadcast remain the same.\n if input.type == output.type:\n # We don't need to copy over any stack traces here\n return [input]\n\n # Allow local_merge_alloc to do its work first\n clients = getattr(output, \"clients\", [])\n for client, i in clients:\n if client != \"output\" and isinstance(client.op, Alloc):\n return\n\n # Check if alloc adds a broadcastable dimension with shape 1.\n\n output_shape = node.inputs[1:]\n num_dims_with_size_1_added_to_left = 0\n for i in range(len(output_shape) - input.ndim):\n if extract_constant(output_shape[i], only_process_constants=True) == 1:\n num_dims_with_size_1_added_to_left += 1\n else:\n break\n new_output_shape = output_shape[num_dims_with_size_1_added_to_left:]\n if num_dims_with_size_1_added_to_left > 0 and len(new_output_shape) >= input.ndim:\n if (\n output.broadcastable[num_dims_with_size_1_added_to_left:]\n == input.broadcastable\n ):\n inner = input\n else:\n inner = op(*([input] + new_output_shape))\n dimshuffle_new_order = [\"x\"] * num_dims_with_size_1_added_to_left + list(\n range(len(new_output_shape))\n )\n return [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]\n\n\n# Don't register by default.\n@local_optimizer([AllocEmpty])\ndef local_alloc_empty_to_zeros(node):\n \"\"\"This convert AllocEmpty to Alloc of 0.\n\n This help investigate NaN with NanGuardMode. Not registered by\n default. To activate it, use the Aesara flag\n optimizer_including=alloc_empty_to_zeros. This also enable\n the GPU version of this optimizations.\n\n \"\"\"\n if isinstance(node.op, AllocEmpty):\n return [tt.zeros(node.inputs, dtype=node.outputs[0].dtype)]\n\n\ncompile.optdb.register(\n \"local_alloc_empty_to_zeros\",\n in2out(local_alloc_empty_to_zeros),\n # After move to gpu and merge2, before inplace.\n 49.3,\n \"alloc_empty_to_zeros\",\n)\n\n\n@register_specialize\n@register_canonicalize\n@local_optimizer([Shape])\ndef local_shape_to_shape_i(node):\n if node.op == tt.shape:\n # This optimization needs ShapeOpt and fgraph.shape_feature\n if not hasattr(node.fgraph, \"shape_feature\"):\n return\n shape_feature = node.fgraph.shape_feature\n ret = shape_feature.make_vector_shape(node.inputs[0])\n\n # We need to copy over stack trace from input to output\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n# TODO: Not sure what type of node we are expecting here\n@register_specialize\n@register_canonicalize\n@local_optimizer(None)\ndef local_track_shape_i(node):\n try:\n shape_feature = node.fgraph.shape_feature\n except AttributeError:\n return\n if node in shape_feature.scheduled:\n # Don't unschedule node as it could be reinserted in the\n # fgraph as we don't change it in the shapefeature internal\n # structure.\n assert isinstance(node.op, Shape_i)\n replacement = shape_feature.scheduled[node]\n return [shape_feature.shape_of[replacement][node.op.i]]\n\n\n@register_specialize\n@register_canonicalize\n@local_optimizer([Subtensor])\ndef local_subtensor_inc_subtensor(node):\n \"\"\"\n Subtensor(SetSubtensor(x, y, idx), idx) -> y\n\n \"\"\"\n if isinstance(node.op, Subtensor):\n x = node.inputs[0]\n if not x.owner or not isinstance(x.owner.op, IncSubtensor):\n return\n if not x.owner.op.set_instead_of_inc:\n return\n\n if x.owner.inputs[2:] == node.inputs[1:] and tuple(\n x.owner.op.idx_list\n ) == tuple(node.op.idx_list):\n out = node.outputs[0]\n y = x.owner.inputs[1]\n # If the dtypes differ, cast y into x.dtype\n if x.dtype != y.dtype:\n y = y.astype(x.dtype)\n if out.type == y.type:\n # if x[idx] and y have the same type, directly return y\n return [y]\n else:\n # The difference is related to broadcasting pattern\n assert out.broadcastable != y.broadcastable\n # We have to alloc y to the shape of x[idx]\n x_subtensor = node.op(x.owner.inputs[0], *x.owner.inputs[2:])\n return [alloc(y, *x_subtensor.shape)]\n else:\n return\n\n\n@register_specialize\n@register_canonicalize\n@local_optimizer([Subtensor])\ndef local_subtensor_remove_broadcastable_index(node):\n \"\"\"\n Remove broadcastable dimension with index 0 or -1\n a[:,:,:,0] -> a.dimshuffle(0,1,2), when\n a.broadcastable = (False, False, False, True)\n a[0,:,-1,:] -> a.dimshuffle(1,3), when\n a.broadcastable = (True, False, True, False)\n\n \"\"\"\n if isinstance(node.op, Subtensor):\n idx = node.op.idx_list\n else:\n return\n\n remove_dim = []\n node_inputs_idx = 1\n for dim, elem in enumerate(idx):\n if isinstance(elem, (ts.Scalar)):\n # The idx is a Scalar, ie a Type. This means the actual index\n # is contained in node.inputs[1]\n dim_index = node.inputs[node_inputs_idx]\n if type(dim_index) == ts.ScalarConstant:\n dim_index = dim_index.value\n if dim_index in [0, -1] and node.inputs[0].broadcastable[dim]:\n remove_dim.append(dim)\n node_inputs_idx += 1\n else:\n return\n elif isinstance(elem, slice):\n if elem != slice(None):\n return\n elif isinstance(elem, (int, np.integer)):\n if elem in [0, -1] and node.inputs[0].broadcastable[dim]:\n remove_dim.append(dim)\n else:\n raise TypeError(\"case not expected\")\n\n if len(remove_dim) == 0:\n return\n else:\n all_dim = range(node.inputs[0].ndim)\n remain_dim = [x for x in all_dim if x not in remove_dim]\n return [node.inputs[0].dimshuffle(tuple(remain_dim))]\n\n\n@register_specialize\n@register_canonicalize(\"fast_compile_gpu\")\n@register_useless\n@local_optimizer([Subtensor, AdvancedSubtensor1])\ndef local_subtensor_make_vector(node):\n \"\"\"\n Replace all subtensor(make_vector) like:\n [a,b,c][0] -> a\n [a,b,c][0:2] -> [a,b]\n\n Replace all AdvancedSubtensor1(make_vector) like:\n [a,b,c][[0,2]] -> [a,c]\n\n We can do this for constant indexes.\n\n \"\"\"\n x = node.inputs[0]\n if not x.owner or x.owner.op != make_vector:\n return\n\n if isinstance(node.op, Subtensor):\n # This optimization needs ShapeOpt and fgraph.shape_feature\n try:\n (idx,) = node.op.idx_list\n except Exception:\n # 'how can you have multiple indexes into a shape?'\n raise\n\n if isinstance(idx, (ts.Scalar, tt.TensorType)):\n # The idx is a Scalar, ie a Type. This means the actual index\n # is contained in node.inputs[1]\n old_idx, idx = idx, node.inputs[1]\n assert idx.type == old_idx\n elif isinstance(node.op, AdvancedSubtensor1):\n idx = node.inputs[1]\n else:\n return\n\n if isinstance(idx, (int, np.integer)):\n # We don't need to copy over any stack traces here\n return [x.owner.inputs[idx]]\n elif isinstance(idx, Variable):\n if idx.ndim == 0:\n # if it is a constant we can do something with it\n try:\n v = get_scalar_constant_value(idx, only_process_constants=True)\n if isinstance(v, np.integer):\n # Python 2.4 wants to index only with Python integers\n v = int(v)\n # We don't need to copy over any stack traces here\n try:\n ret = [x.owner.inputs[v]]\n except IndexError:\n raise NotScalarConstantError(\"Bad user graph!\")\n return ret\n except NotScalarConstantError:\n pass\n elif idx.ndim == 1 and isinstance(idx, tt.Constant):\n values = list(map(int, list(idx.value)))\n ret = make_vector(*[x.owner.inputs[v] for v in values])\n\n # Copy over stack trace from previous output to new output\n copy_stack_trace(node.outputs[0], ret)\n ret = tt.patternbroadcast(ret, node.outputs[0].broadcastable)\n return [ret]\n else:\n raise TypeError(\"case not expected\")\n elif isinstance(idx, slice):\n # it is a slice of ints and/or Variables\n # check subtensor to see if it can contain constant variables, and if\n # it can, then try to unpack them.\n try:\n const_slice = node.op.get_constant_idx(node.inputs, allow_partial=False)[0]\n ret = make_vector(*x.owner.inputs[const_slice])\n # Copy over stack trace from previous outputs to new output\n copy_stack_trace(node.outputs, ret)\n ret = tt.patternbroadcast(ret, node.outputs[0].broadcastable)\n return [ret]\n except NotScalarConstantError:\n pass\n else:\n raise TypeError(\"case not expected\")\n\n\n# TODO: the other optimization for and, or, xor, le and ge see ticket #496.\n\n\n@register_useless\n@register_canonicalize(\"fast_compile\")\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_useless_elemwise(node):\n \"\"\"\n eq(x, x) -> 1\n neq(x, x) -> 0\n mul(x) -> x\n add(x) -> x\n identity(x) -> x\n and(x, 1) -> x (if x.dtype == 'bool')\n and(x, 0) -> zeros_like(x)\n or(x, 0) -> x\n or(x, 1) -> ones_like(x) (if x.dtype == 'bool')\n xor(x, x) -> zeros_like(x)\n\n \"\"\"\n if isinstance(node.op, Elemwise):\n # We call zeros_like and one_like with opt=True to generate a\n # cleaner graph.\n dtype = node.outputs[0].dtype\n\n if node.op.scalar_op == ts.eq and len(node.inputs) == 2:\n if node.inputs[0] == node.inputs[1]:\n # it is the same var in the graph. That will always be true\n ret = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy stack trace from input to constant output\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n elif node.op.scalar_op == ts.neq and len(node.inputs) == 2:\n if node.inputs[0] == node.inputs[1]:\n # it is the same var in the graph. That will always be false\n ret = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy stack trace from input to constant output\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n elif node.op.scalar_op == ts.mul and len(node.inputs) == 1:\n # No need to copy over any stack trace\n return [node.inputs[0]]\n\n elif node.op.scalar_op == ts.add and len(node.inputs) == 1:\n # No need to copy over any stack trace\n return [node.inputs[0]]\n elif node.op.scalar_op == ts.identity and len(node.inputs) == 1:\n return [node.inputs[0]]\n\n elif isinstance(node.op.scalar_op, ts.AND) and len(node.inputs) == 2:\n\n if isinstance(node.inputs[0], tt.TensorConstant):\n const_val = tt.extract_constant(\n node.inputs[0], only_process_constants=True\n )\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [tt.zeros_like(node.inputs[1], dtype=dtype, opt=True)]\n elif node.outputs[0].dtype == \"bool\":\n # If the output is not Boolean, it is the bitwise AND,\n # and this optimization would be wrong\n return [node.inputs[1].astype(node.outputs[0].dtype)]\n\n if isinstance(node.inputs[1], tt.TensorConstant):\n const_val = tt.extract_constant(\n node.inputs[1], only_process_constants=True\n )\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)]\n elif node.outputs[0].dtype == \"bool\":\n # If the output is not Boolean, it is the bitwise AND,\n # and this optimization would be wrong\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n\n elif isinstance(node.op.scalar_op, ts.OR) and len(node.inputs) == 2:\n\n if isinstance(node.inputs[0], tt.TensorConstant):\n const_val = tt.extract_constant(\n node.inputs[0], only_process_constants=True\n )\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [node.inputs[1].astype(node.outputs[0].dtype)]\n elif node.outputs[0].dtype == \"bool\":\n # If the output is not Boolean, it is the bitwise OR,\n # and this optimization would be wrong\n return [tt.ones_like(node.inputs[1], dtype=dtype, opt=True)]\n\n if isinstance(node.inputs[1], tt.TensorConstant):\n const_val = tt.extract_constant(\n node.inputs[1], only_process_constants=True\n )\n if not isinstance(const_val, Variable):\n if const_val == 0:\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n elif node.outputs[0].dtype == \"bool\":\n # If the output is not Boolean, it is the bitwise OR,\n # and this optimization would be wrong\n return [tt.ones_like(node.inputs[0], dtype=dtype, opt=True)]\n\n elif isinstance(node.op.scalar_op, ts.XOR) and len(node.inputs) == 2:\n if node.inputs[0] is node.inputs[1]:\n return [tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)]\n\n\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_alloc_unary(node):\n \"\"\"unary(alloc(x, shp)) -> alloc(unary(x), shp)\"\"\"\n if isinstance(node.op, Elemwise) and len(node.inputs) == 1:\n a = node.inputs[0]\n if a.owner and isinstance(a.owner.op, Alloc):\n x = a.owner.inputs[0]\n shp = a.owner.inputs[1:]\n v = node.op(x)\n # T.alloc does not preserve the stacktrace of v,\n # so we need to copy it over from x.\n copy_stack_trace(node.outputs[0], v)\n ret = alloc(tt.cast(v, node.outputs[0].dtype), *shp)\n\n # T.cast does not preserve the stacktrace of x,\n # so we need to copy it over to the output.\n copy_stack_trace([node.outputs[0], a], ret)\n return [ret]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_cast_cast(node):\n \"\"\"cast(cast(x, dtype1), dtype2)\n\n when those contrain:\n dtype1 == dtype2\n OR the base dtype is the same (int, uint, float, complex)\n and the first cast cause an upcast.\n\n \"\"\"\n if not isinstance(node.op, Elemwise) or not isinstance(node.op.scalar_op, ts.Cast):\n return\n x = node.inputs[0]\n if (\n not x.owner\n or not isinstance(x.owner.op, Elemwise)\n or not isinstance(x.owner.op.scalar_op, ts.Cast)\n ):\n return\n\n type1 = x.owner.op.scalar_op.o_type\n type2 = node.op.scalar_op.o_type\n base = x.owner.inputs[0]\n\n if type1 == type2:\n # We don't need to copy over any stack traces here\n return [x]\n\n if is_an_upcast(base.dtype, type1.dtype):\n # Checking for further redundancy. Eg: int8 -> int32 -> int8\n if type2.dtype == base.dtype:\n return x.owner.inputs\n else:\n # Apply the second cast only\n v = node.op(base)\n # Copy stack trace from the output of the original cast\n copy_stack_trace(node.outputs[0], v)\n return [v]\n\n\ndef is_an_upcast(type1, type2):\n \"\"\"Given two data types (as strings), check if converting to\n type2 from type1 constitutes an upcast.\n Differs from aesara.scalar.upcast\n\n \"\"\"\n category = {\n # The first number in the pair is the dtype (bool, uint, int, float,\n # complex). Conversion from higher to lower is never an upcast.\n # The second number roughly indicates the precision. Again, conversion\n # from higher to lower is never an upcast.\n \"bool\": (0, 0),\n \"uint8\": (1, 1),\n \"uint16\": (1, 2),\n \"uint32\": (1, 3),\n \"uint64\": (1, 4),\n \"int8\": (2, 1),\n \"int16\": (2, 2),\n \"int32\": (2, 3),\n \"int64\": (2, 4),\n \"float16\": (3, 1.5),\n \"float32\": (3, 2.5),\n \"float64\": (3, 3.5),\n \"complex64\": (4, 3),\n \"complex128\": (4, 4),\n }\n\n cat1 = category[type1]\n cat2 = category[type2]\n\n if cat2[0] >= cat1[0] and cat2[1] > cat1[1]:\n return True\n else:\n return False\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_func_inv(node):\n \"\"\"\n Check for two consecutive operations that are functional inverses\n and remove them from the function graph.\n\n \"\"\"\n inv_pairs = (\n (ts.Deg2Rad, ts.Rad2Deg),\n (ts.Cosh, ts.ArcCosh),\n (ts.Tanh, ts.ArcTanh),\n (ts.Sinh, ts.ArcSinh),\n (ts.Conj, ts.Conj),\n (ts.Neg, ts.Neg),\n (ts.Inv, ts.Inv),\n )\n x = node.inputs[0]\n\n if not isinstance(node.op, Elemwise):\n return\n if not x.owner or not isinstance(x.owner.op, Elemwise):\n return\n\n prev_op = x.owner.op.scalar_op\n node_op = node.op.scalar_op\n\n for inv_pair in inv_pairs:\n if is_inverse_pair(node_op, prev_op, inv_pair):\n # We don't need to copy stack trace, because the optimization\n # is trivial and maintains the earlier stack trace\n return x.owner.inputs\n\n return\n\n\ndef is_inverse_pair(node_op, prev_op, inv_pair):\n \"\"\"\n Given two consecutive operations, check if they are the\n provided pair of inverse functions.\n\n \"\"\"\n node_is_op0 = isinstance(node_op, inv_pair[0])\n node_is_op1 = isinstance(node_op, inv_pair[1])\n prev_is_op0 = isinstance(prev_op, inv_pair[0])\n prev_is_op1 = isinstance(prev_op, inv_pair[1])\n\n return (node_is_op0 and prev_is_op1) or (node_is_op1 and prev_is_op0)\n\n\nclass Assert(Op):\n \"\"\"\n Implements assertion in a computational graph.\n\n Returns the first parameter if the condition is true, otherwise, triggers\n AssertionError.\n\n Notes\n -----\n This Op is a debugging feature. It can be removed from the graph\n because of optimizations, and can hide some possible optimizations to\n the optimizer. Specifically, removing happens if it can be determined\n that condition will always be true. Also, the output of the Op must be\n used in the function computing the graph, but it doesn't have to be\n returned.\n\n Examples\n --------\n >>> import aesara\n >>> T = aesara.tensor\n >>> x = T.vector('x')\n >>> assert_op = T.opt.Assert()\n >>> func = aesara.function([x], assert_op(x, x.size<2))\n\n \"\"\"\n\n _f16_ok = True\n __props__ = (\"msg\",)\n view_map = {0: [0]}\n\n check_input = False\n\n def __init__(self, msg=\"Aesara Assert failed!\"):\n self.msg = msg\n\n def __setstate__(self, attrs):\n self.__dict__.update(attrs)\n if not hasattr(self, \"msg\"):\n self.msg = \"Aesara Assert failed!\"\n\n def make_node(self, value, *conds):\n if not isinstance(value, Variable):\n value = tt.as_tensor_variable(value)\n cond = [tt.as_tensor_variable(c) for c in conds]\n assert np.all([c.type.ndim == 0 for c in cond])\n return gof.Apply(self, [value] + cond, [value.type()])\n\n def perform(self, node, inputs, out_):\n (out,) = out_\n v = inputs[0]\n out[0] = v\n assert np.all(inputs[1:]), self.msg\n\n def grad(self, input, output_gradients):\n return output_gradients + [DisconnectedType()()] * (len(input) - 1)\n\n def connection_pattern(self, node):\n return [[1]] + [[0]] * (len(node.inputs) - 1)\n\n def c_code(self, node, name, inames, onames, props):\n value = inames[0]\n out = onames[0]\n check = []\n fail = props[\"fail\"]\n msg = self.msg.replace('\"', '\\\\\"').replace(\"\\n\", \"\\\\n\")\n for idx in range(len(inames) - 1):\n i = inames[idx + 1]\n dtype = node.inputs[idx + 1].dtype\n check.append(\n \"if(!((npy_%(dtype)s*)PyArray_DATA(%(i)s))[0])\"\n '{PyErr_SetString(PyExc_AssertionError,\"%(msg)s\");'\n \"%(fail)s}\" % locals()\n )\n check = \"\\n\".join(check)\n return (\n \"\"\"\n %(check)s\n Py_XDECREF(%(out)s);\n %(out)s = %(value)s;\n Py_INCREF(%(value)s);\n \"\"\"\n % locals()\n )\n\n def c_code_cache_version(self):\n return (3, 0)\n\n def infer_shape(self, node, input_shapes):\n return [input_shapes[0]]\n\n\nassert_ = Assert()\n# Unittest.assert_ is a deprecated name for assertTrue.\n# 2to3 convert tt.opt.assert_ to tt.opt.assertTrue\n# So I define a new name as a work around.\nassert_op = assert_\n\n\n@register_specialize\n@local_optimizer([Assert])\ndef local_remove_useless_assert(node):\n if isinstance(node.op, Assert):\n cond = []\n for c in node.inputs[1:]:\n try:\n const = get_scalar_constant_value(c)\n\n if 0 != const.ndim or const == 0:\n # Should we raise an error here? How to be sure it\n # is not catched?\n cond.append(c)\n except NotScalarConstantError:\n cond.append(c)\n\n if len(cond) == 0:\n # We don't need to copy over any stack traces here\n return [node.inputs[0]]\n if len(cond) != len(node.inputs) - 1:\n ret = assert_(node.inputs[0], *cond)\n\n # We copy over stack trace from the output of the original assert\n copy_stack_trace(node.outputs[0], ret)\n return [ret]\n\n\n@local_optimizer([Assert])\ndef local_remove_all_assert(node):\n \"\"\"An optimization disabled by default that removes all asserts from\n the graph.\n\n Notes\n -----\n See the :ref:`unsafe` section to know how to enable it.\n\n \"\"\"\n if not isinstance(node.op, Assert):\n return\n\n # We don't need to copy over any stack traces here\n return [node.inputs[0]]\n\n\n# Disabled by default\ncompile.optdb[\"canonicalize\"].register(\n \"local_remove_all_assert\",\n local_remove_all_assert,\n \"unsafe\",\n use_db_name_as_tag=False,\n)\ncompile.optdb[\"stabilize\"].register(\n \"local_remove_all_assert\",\n local_remove_all_assert,\n \"unsafe\",\n use_db_name_as_tag=False,\n)\ncompile.optdb[\"specialize\"].register(\n \"local_remove_all_assert\",\n local_remove_all_assert,\n \"unsafe\",\n use_db_name_as_tag=False,\n)\ncompile.optdb[\"useless\"].register(\n \"local_remove_all_assert\",\n local_remove_all_assert,\n \"unsafe\",\n use_db_name_as_tag=False,\n)\n\n#######################\n# Constant Canonicalization\n############################\n\n\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_upcast_elemwise_constant_inputs(node):\n \"\"\"This explicitly upcasts constant inputs to elemwise Ops, when\n those Ops do implicit upcasting anyway.\n\n Rationale: it helps merge things like (1-x) and (1.0 - x).\n\n \"\"\"\n if len(node.outputs) > 1:\n return\n try:\n shape_i = node.fgraph.shape_feature.shape_i\n except AttributeError:\n shape_i = None\n if isinstance(node.op, Elemwise):\n scalar_op = node.op.scalar_op\n # print \"aa\", scalar_op.output_types_preference\n if getattr(scalar_op, \"output_types_preference\", None) in (\n ts.upgrade_to_float,\n ts.upcast_out,\n ):\n # this is the kind of op that we can screw with the input\n # dtypes by upcasting explicitly\n output_dtype = node.outputs[0].type.dtype\n new_inputs = []\n for i in node.inputs:\n if i.type.dtype == output_dtype:\n new_inputs.append(i)\n else:\n try:\n # works only for scalars\n cval_i = get_scalar_constant_value(\n i, only_process_constants=True\n )\n if all(i.broadcastable):\n new_inputs.append(\n tt.shape_padleft(tt.cast(cval_i, output_dtype), i.ndim)\n )\n else:\n if shape_i is None:\n return\n new_inputs.append(\n alloc(\n tt.cast(cval_i, output_dtype),\n *[shape_i(d)(i) for d in range(i.ndim)],\n )\n )\n # print >> sys.stderr, \"AAA\",\n # *[Shape_i(d)(i) for d in range(i.ndim)]\n except NotScalarConstantError:\n # for the case of a non-scalar\n if isinstance(i, tt.TensorConstant):\n new_inputs.append(tt.cast(i, output_dtype))\n else:\n new_inputs.append(i)\n\n if new_inputs != node.inputs:\n rval = [node.op(*new_inputs)]\n if rval[0].type != node.outputs[0].type:\n # This can happen for example when floatX=float32\n # and we do the true division between and int64\n # and a constant that will get typed as int8.\n\n # As this is just to allow merging more case, if\n # the upcast don't work, we can just skip it.\n return\n\n # Copy over output stacktrace from before upcasting\n copy_stack_trace(node.outputs[0], rval)\n return rval\n\n\n##################\n# Subtensor opts #\n##################\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\n@local_optimizer([IncSubtensor])\ndef local_useless_inc_subtensor(node):\n \"\"\"\n Remove IncSubtensor, when we overwrite the full inputs with the\n new value.\n\n \"\"\"\n if not isinstance(node.op, IncSubtensor):\n return\n if node.op.set_instead_of_inc is False:\n # This is an IncSubtensor, so the init value must be zeros\n try:\n c = get_scalar_constant_value(node.inputs[0], only_process_constants=True)\n if c != 0:\n return\n except NotScalarConstantError:\n return\n if (\n node.inputs[0].ndim != node.inputs[1].ndim\n or node.inputs[0].broadcastable != node.inputs[1].broadcastable\n ):\n # FB: I didn't check if this case can happen, but this opt\n # don't support it.\n return\n # We have a SetSubtensor or an IncSubtensor on zeros\n # If is this IncSubtensor useful?\n\n # Check that we keep all the original data.\n # Put the constant inputs in the slice.\n idx_cst = get_idx_list(node.inputs[1:], node.op.idx_list)\n if all(\n isinstance(e, slice)\n and e.start is None\n and e.stop is None\n and (\n e.step is None\n or tt.extract_constant(e.step, only_process_constants=True) == -1\n )\n for e in idx_cst\n ):\n # IncSubtensor broadcast node.inputs[1] on node.inputs[0]\n # based on run time shapes, so we must check they are the same.\n if not hasattr(node.fgraph, \"shape_feature\"):\n return\n if not node.fgraph.shape_feature.same_shape(node.inputs[0], node.inputs[1]):\n return\n # There is no reverse, so we don't need a replacement.\n if all(e.step is None for e in node.op.idx_list):\n # They are the same shape, so we can remore this IncSubtensor\n return [node.inputs[1]]\n ret = Subtensor(node.op.idx_list)(*node.inputs[1:])\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs, ret)\n return [ret]\n\n\n@register_canonicalize\n@local_optimizer([AdvancedIncSubtensor1])\ndef local_set_to_inc_subtensor(node):\n \"\"\"\n AdvancedIncSubtensor1(x, x[ilist]+other, ilist, set_instead_of_inc=True) ->\n AdvancedIncSubtensor1(x, other, ilist, set_instead_of_inc=False)\n\n \"\"\"\n if (\n isinstance(node.op, AdvancedIncSubtensor1)\n and node.op.set_instead_of_inc\n and node.inputs[1].owner\n and isinstance(node.inputs[1].owner.op, Elemwise)\n and isinstance(node.inputs[1].owner.op.scalar_op, ts.Add)\n ):\n addn = node.inputs[1].owner\n subn = None\n other = None\n\n if addn.inputs[0].owner and isinstance(\n addn.inputs[0].owner.op, AdvancedSubtensor1\n ):\n subn = addn.inputs[0].owner\n other = addn.inputs[1]\n elif addn.inputs[1].owner and isinstance(\n addn.inputs[1].owner.op, AdvancedSubtensor1\n ):\n subn = addn.inputs[1].owner\n other = addn.inputs[0]\n else:\n return\n if subn.inputs[1] != node.inputs[2] or subn.inputs[0] != node.inputs[0]:\n return\n ret = advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])\n # Copy over previous output stacktrace\n # Julian: I'm not sure about this at all...\n copy_stack_trace(node.outputs, ret)\n return [ret]\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\n@local_optimizer([Subtensor])\ndef local_useless_slice(node):\n \"\"\"\n Remove Subtensor of the form X[0, :] -> X[0]\n \"\"\"\n if isinstance(node.op, Subtensor):\n slices = get_idx_list(node.inputs, node.op.idx_list)\n last_slice = len(slices)\n for s in slices[::-1]:\n # check if slice and then check slice indices\n if (\n isinstance(s, slice)\n and s.start is None\n and s.stop is None\n and (\n s.step is None\n or tt.extract_constant(s.step, only_process_constants=True) == 1\n )\n ):\n last_slice -= 1\n else:\n break\n # check if we removed something\n if last_slice < len(slices):\n subtens = Subtensor(slices[:last_slice])\n sl_ins = Subtensor.collapse(\n slices[:last_slice], lambda x: isinstance(x, Variable)\n )\n out = subtens(node.inputs[0], *sl_ins)\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs, out)\n return [out]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Subtensor, AdvancedSubtensor1])\ndef local_useless_subtensor(node):\n \"\"\"\n Remove Subtensor/AdvancedSubtensor1 if it takes the full input. In the\n AdvancedSubtensor1 case, the full input is taken when the indices are\n equivalent to `arange(0, input.shape[0], 1)` using either an explicit\n list/vector or the ARange op.\n\n \"\"\"\n\n # If the optimization is tried over a node that is not a part of graph before\n if not hasattr(node, \"fgraph\"):\n return\n\n # This optimization needs ShapeOpt and fgraph.shape_feature\n if not hasattr(node.fgraph, \"shape_feature\"):\n return\n\n shape_of = node.fgraph.shape_feature.shape_of\n\n if isinstance(node.op, Subtensor):\n cdata = node.op.get_constant_idx(\n node.inputs, allow_partial=True, only_process_constants=True\n )\n for pos, idx in enumerate(cdata):\n if not isinstance(idx, slice):\n # If idx is not a slice, this means we remove this dimension\n # from the output, so the subtensor is not useless\n return False\n if idx.start is not None and idx.start != 0:\n # If the start of the slice is different from 0, or is a\n # variable, then we assume the subtensor is not useless\n return False\n if idx.step is not None and idx.step != 1:\n # If we are going backwards, or skipping elements, then this\n # is not a useless subtensor\n return False\n\n for pos, idx in enumerate(cdata):\n\n length_pos = shape_of[node.inputs[0]][pos]\n\n if isinstance(idx.stop, (int, np.integer)):\n length_pos_data = sys.maxsize\n try:\n length_pos_data = get_scalar_constant_value(\n length_pos, only_process_constants=True\n )\n except NotScalarConstantError:\n pass\n\n if idx.stop < length_pos_data:\n return False\n elif isinstance(idx.stop, gof.Variable):\n length_pos_shape_i = idx.stop\n # length_pos is a tensor variable, but length_pos_shape_i\n # is a scalar variable. We try to see if they represent\n # the same underlying variable.\n if length_pos_shape_i.owner and isinstance(\n length_pos_shape_i.owner.op, ScalarFromTensor\n ):\n length_pos_shape_i = length_pos_shape_i.owner.inputs[0]\n elif length_pos.owner and isinstance(\n length_pos.owner.op, TensorFromScalar\n ):\n length_pos = length_pos.owner.inputs[0]\n else:\n # We did not find underlying variables of the same type\n return False\n\n # The type can be different: int32 vs int64. length_pos\n # should always be int64 as that is what the shape\n # tracker keep. Subtensor accept any scalar int{8,16,32,64}\n # as index type.\n assert str(length_pos.type.dtype) == \"int64\"\n assert str(length_pos_shape_i.type.dtype) in [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n ]\n\n # length_pos_shape_i cannot be None\n if length_pos_shape_i != length_pos:\n return False\n elif idx.stop is None:\n pass\n else:\n return False\n elif isinstance(node.op, AdvancedSubtensor1):\n # get length of the indexed tensor along the first axis\n try:\n length = get_scalar_constant_value(\n shape_of[node.inputs[0]][0], only_process_constants=True\n )\n except NotScalarConstantError:\n return False\n\n # get index (which must be a vector by definition)\n idx = node.inputs[1]\n\n # `idx` must be equivalent to [0,1,...,shape[0] - 1] to qualify for\n # this optimization\n if isinstance(idx, tt.Constant):\n idx = idx.value\n if len(idx) != length:\n return False\n if np.any(idx != np.arange(length)):\n return False\n elif idx.owner is not None and isinstance(idx.owner.op, tt.ARange):\n try:\n start, stop, step = map(\n lambda x: get_scalar_constant_value(x, only_process_constants=True),\n idx.owner.inputs,\n )\n except NotScalarConstantError:\n return False\n\n if start != 0:\n return False\n if stop != length:\n return False\n if step != 1:\n return False\n else:\n return False\n else:\n return False\n\n # We don't need to copy over any stacktrace here,\n # because previous stacktrace should suffice.\n return [node.inputs[0]]\n\n\n# fast_compile to allow opt subtensor(cast{float32}(make_vector))\n@register_canonicalize(\"fast_compile\")\n@local_optimizer([Subtensor])\ndef local_subtensor_lift(node):\n \"\"\"\n unary(x)[idx] -> unary(x[idx])#any broadcast pattern.\n\n Handles the following unary ops:\n elemwise(x,...)[idx] -> elemwise(x[idx],...)\n when x,... are broadcasted scalar or not broadcasted at all\n rebroadcast(x)[idx] => rebroadcast(x[idx])\n\n \"\"\"\n if isinstance(node.op, Subtensor):\n u = node.inputs[0]\n if not u.owner or len(u.clients) > 1:\n return False\n\n if isinstance(u.owner.op, Elemwise) and len(u.owner.inputs) == 1:\n idx = node.inputs[1:]\n x_idx = node.op(u.owner.inputs[0], *idx)\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs, x_idx)\n ret = u.owner.op(x_idx)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], ret)\n return [ret]\n\n if isinstance(u.owner.op, Elemwise):\n new_inputs = []\n if all([sum(i.type.broadcastable) == 0 for i in u.owner.inputs]):\n # There is no broadcastable in the inputs\n idx = node.inputs[1:]\n new_inputs = [node.op(i, *idx) for i in u.owner.inputs]\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs[0], new_inputs)\n\n ret = u.owner.op(*new_inputs)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], ret)\n return [ret]\n elif all(\n [sum(i.type.broadcastable) in [i.ndim, 0] for i in u.owner.inputs]\n ):\n # There is no broadcastable in the inputs or it is scalar\n idx = node.inputs[1:]\n new_inputs = []\n for i in u.owner.inputs:\n if sum(i.type.broadcastable) == 0:\n new_inputs.append(node.op(i, *idx))\n else:\n # If the subtensor remove some dims, we must\n # lower the number of dimensions of this scalar.\n if node.outputs[0].ndim == i.ndim:\n new_inputs.append(i)\n else:\n new_inputs.append(\n i.dimshuffle([\"x\"] * node.outputs[0].ndim)\n )\n\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs[0], new_inputs)\n\n ret = u.owner.op(*new_inputs)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], ret)\n return [ret]\n\n if isinstance(u.owner.op, Rebroadcast):\n # make sure that Rebroadcast has only 1 input\n assert len(u.owner.inputs) == 1\n\n # Subtensor might reduce dim., adapt broadcast pattern accordingly\n new_axis = []\n\n # loop through indices being subtensor-ed\n # i indexes broadcastable pattern before subtensor\n # j indexes broadcastable pattern after subtensor\n j = 0\n for (i, x) in enumerate(node.op.idx_list):\n # if its not a slice, it will reduce the dimension, should\n # not appear in the broascastable dimensions\n if isinstance(x, slice):\n new_axis += [(j, u.broadcastable[i])]\n j += 1\n # now keep the broadcastable pattern of all\n # items not appearing in subtensor list\n for i in range(len(node.op.idx_list), len(u.broadcastable)):\n new_axis += [(j, u.broadcastable[i])]\n j += 1\n\n subt_x = node.op(u.owner.inputs[0], *node.inputs[1:])\n # Copy over previous output stacktrace\n copy_stack_trace(node.outputs[0], subt_x)\n\n rbcast_subt_x = Rebroadcast(*new_axis)(subt_x)\n # Copy over previous output stacktrace\n # and stacktrace from previous unary operation\n copy_stack_trace([node.outputs[0], node.inputs[0]], rbcast_subt_x)\n\n return [rbcast_subt_x]\n\n\ndef merge_two_slices(slice1, len1, slice2, len2):\n \"\"\"\n This function merges two slices into a single slice. The code works on\n the assumption that:\n\n a) slice1 is actually a slice and not an index, while slice2\n can be just an index.\n\n b) the two slices **have been applied consecutively** on the same\n tensor\n\n The output slice is **not** in canonical form, but actually just a slice\n that can be applied to a tensor to produce the same output as applying\n the two consecutive slices.\n ``len1`` is the length of the tensor **before** applying the first slice,\n while ``len2`` is the length **after** applying the first slice.\n \"\"\"\n list_opt = [\n local_abs_merge,\n local_mul_switch_sink,\n local_upcast_elemwise_constant_inputs,\n local_useless_switch,\n constant_folding,\n ]\n\n if type(slice1) is not slice:\n raise ValueError(\n (\n \"First provided slice should actually be of type\"\n \"slice and not an index !\"\n ),\n slice1,\n )\n sl1, reverse1 = get_canonical_form_slice(slice1, len1)\n sl2, reverse2 = get_canonical_form_slice(slice2, len2)\n\n if type(sl2) is not slice:\n if reverse1 is None:\n # The first slice is not in reverse, which makes things a lot\n # more clear.\n # In this case we need to take care only of the special cases:\n # len2 <=0 -> throw index error regardless of sl2\n # sl2 > len2 -> throw index error\n # sl2 < -len2 -> throw index error\n # To get a index error we simply use len1+1 to indicate we are\n # out of bounds, because passing this index through the formula\n # of getting the mixed slice is not guaranteed to result in an\n # index error. The **issue though** if that the error will\n # complain about accessing element len1+1 which is probably not\n # too intuitive for the user\n val = sl1.start + sl2 * sl1.step\n val = tt.switch(tt.le(len2, 0), len1 + 1, val)\n val = tt.switch(tt.ge(sl2, len2), len1 + 1, val)\n val = tt.switch(tt.lt(sl2, 0), -len1 - 1, val)\n if sl1.step:\n val = tt.switch(tt.eq(sl1.step, 0), len1 + 1, val)\n val = pre_greedy_local_optimizer(list_opt, val)\n return val\n else:\n # We are in the more complex case when we do not actually know\n # if the first slice was in reverse or not.\n # in case it was not in reverse:\n p_val = sl1.start + sl2 * sl1.step\n # case it was in reverse we need to realize that we do not want\n # the k-th element from sl.start but the k-th element from\n # sl.stop backwards\n n_val = sl1.stop - 1 - sl2 * sl1.step\n if config.warn.subtensor_merge_bug:\n warnings.warning(\n \"Your current code is fine, but Aesara versions \"\n \"prior to 0.5rc2 might have given an incorrect result. \"\n \"To disable this warning, set the Aesara flag \"\n \"warn.subtensor_merge_bug to False.\"\n )\n # we need to pick either n_val or p_val and then follow same\n # steps as above for covering the index error cases\n val = tt.switch(tt.lt(reverse1, 0), n_val, p_val)\n val = tt.switch(tt.le(len2, 0), len1 + 1, val)\n val = tt.switch(tt.ge(sl2, len2), len1 + 1, val)\n val = tt.switch(tt.lt(sl2, 0), -len1 - 1, val)\n if sl1.step:\n val = tt.switch(tt.eq(sl1.step, 0), len1 + 1, val)\n val = pre_greedy_local_optimizer(list_opt, val)\n return val\n else:\n # We are deleaing with two slices that need to be put together\n # according to the two steps we have 4 different combinations of\n # positive/negative. I will denote the case I'm looking at by\n # suffixes to the variables (nn,np,pn,pp):\n flen = sl2.stop - sl2.start\n p_step = sl1.step * sl2.step\n n_step = sl1.step * sl2.step * -1\n\n pp_start = tt.minimum(sl1.start + sl2.start * sl1.step, sl1.stop)\n pp_stop = tt.minimum(sl1.start + sl2.stop * sl1.step, sl1.stop)\n\n pn_stop = sl1.start + (sl2.start - 1) * sl1.step\n pn_stop = tt.switch(\n tt.and_(tt.lt(pn_stop, 0), tt.gt(flen, 0)),\n -len1 - 1,\n tt.minimum(pn_stop, sl1.stop),\n )\n pn_start = sl1.start + (sl2.stop - 1) * sl1.step\n pn_start = tt.minimum(pn_start, sl1.stop)\n pn_start = tt.maximum(pn_start, 0)\n\n np_stop = sl1.stop - sl2.stop * sl1.step - 1\n np_stop = tt.switch(\n tt.and_(tt.lt(np_stop, 0), tt.gt(flen, 0)),\n -len1 - 1,\n tt.maximum(sl1.start - 1, np_stop),\n )\n np_start = tt.maximum(sl1.start, sl1.stop - sl2.start * sl1.step - 1)\n\n nn_start = tt.maximum(sl1.start, (sl1.stop - 1) - (sl2.stop - 1) * sl1.step)\n nn_stop = tt.maximum(sl1.start, sl1.stop - sl2.start * sl1.step)\n\n start = tt.switch(\n tt.lt(reverse2 * reverse1, 0),\n tt.switch(tt.lt(reverse1, 0), np_start, pn_start),\n tt.switch(tt.lt(reverse1, 0), nn_start, pp_start),\n )\n\n stop = tt.switch(\n tt.lt(reverse2 * reverse1, 0),\n tt.switch(tt.lt(reverse1, 0), np_stop, pn_stop),\n tt.switch(tt.lt(reverse1, 0), nn_stop, pp_stop),\n )\n\n step = tt.switch(tt.lt(reverse2 * reverse1, 0), n_step, p_step)\n start = tt.switch(tt.le(flen, 0), 0, start)\n stop = tt.switch(tt.le(flen, 0), 0, stop)\n\n # The canonical form of the slice is pretty complicated\n # and is not simplified. We simplify it in advance here\n # as otherwise this create too many useless optimization that\n # DebugMode must check.\n start = pre_greedy_local_optimizer(list_opt, start)\n stop = pre_greedy_local_optimizer(list_opt, stop)\n step = pre_greedy_local_optimizer(list_opt, step)\n start = pre_greedy_local_optimizer(list_opt, start)\n stop = pre_greedy_local_optimizer(list_opt, stop)\n step = pre_greedy_local_optimizer(list_opt, step)\n\n # Pre merge constant for the same reason.\n start, stop, step = pre_constant_merge([start, stop, step])\n\n return slice(start, stop, step)\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Subtensor])\ndef local_subtensor_merge(node):\n \"\"\"\n Refactored optimization to deal with all cases of tensor merging.\n Given a subgraph of the form Subtensor(Subtensor(u)), the optimization\n expresses all slices in a canonical form, and then merges them together.\n\n \"\"\"\n\n if isinstance(node.op, Subtensor):\n u = node.inputs[0]\n if u.owner and isinstance(u.owner.op, Subtensor):\n # We can merge :)\n # x actual tensor on which we are picking slices\n x = u.owner.inputs[0]\n # slices of the first applied subtensor\n slices1 = get_idx_list(u.owner.inputs, u.owner.op.idx_list)\n slices2 = get_idx_list(node.inputs, node.op.idx_list)\n # Get the shapes of the vectors !\n try:\n # try not to introduce new shape into the graph\n xshape = node.fgraph.shape_feature.shape_of[x]\n ushape = node.fgraph.shape_feature.shape_of[u]\n except AttributeError:\n # Following the suggested use of shape_feature which should\n # consider the case when the compilation mode doesn't\n # include the ShapeFeature\n xshape = x.shape\n ushape = u.shape\n\n merged_slices = []\n pos_2 = 0\n pos_1 = 0\n while (pos_1 < len(slices1)) and (pos_2 < len(slices2)):\n slice1 = slices1[pos_1]\n if type(slice1) is slice:\n merged_slices.append(\n merge_two_slices(\n slice1, xshape[pos_1], slices2[pos_2], ushape[pos_2]\n )\n )\n pos_2 += 1\n else:\n merged_slices.append(slice1)\n pos_1 += 1\n\n if pos_2 < len(slices2):\n merged_slices += slices2[pos_2:]\n else:\n merged_slices += slices1[pos_1:]\n\n merged_slices = tuple(as_index_constant(s) for s in merged_slices)\n subtens = Subtensor(merged_slices)\n\n sl_ins = Subtensor.collapse(\n merged_slices, lambda x: isinstance(x, Variable)\n )\n # Do not call make_node for test_value\n out = subtens(x, *sl_ins)\n\n # Copy over previous output stacktrace\n # and stacktrace from previous slicing operation.\n # Why? Because, the merged slicing operation could have failed\n # because of either of the two original slicing operations\n orig_out = node.outputs[0]\n copy_stack_trace([orig_out, node.inputs[0]], out)\n\n # Restore original broadcastable dimensions that `subtens()` may\n # have been unable to infer again\n if out.type != orig_out.type:\n assert out.dtype == orig_out.dtype\n assert out.ndim == orig_out.ndim\n out = tt.patternbroadcast(out, orig_out.broadcastable)\n copy_stack_trace([orig_out, node.inputs[0]], out)\n return [out]\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\n@local_optimizer([Subtensor])\ndef local_subtensor_of_alloc(node):\n \"\"\"\n\n alloc(val)[x:y] -> alloc(val[...])\n alloc(val)[x:y] -> alloc(val)\n This can be seen as a lift, but it also reduce the number of computation/memory.\n\n \"\"\"\n if not isinstance(node.op, Subtensor):\n return False\n u = node.inputs[0]\n if u.owner is None:\n return False\n if not isinstance(u.owner.op, Alloc):\n return False\n slices = get_idx_list(node.inputs, node.op.idx_list)\n val = u.owner.inputs[0]\n dims = u.owner.inputs[1:]\n assert len(slices) <= len(dims)\n\n # Number of dimensions added to val\n n_added_dims = u.ndim - val.ndim\n # Dimensions of the returned alloc\n nw_dims = []\n # Slices to take from val\n val_slices = []\n\n for i, (sl, dim) in enumerate(zip(slices, dims)):\n # If val was not copied over that dim,\n # we need to take the appropriate subtensor on it.\n if i >= n_added_dims:\n # We check that the corresponding val dimensions was\n # not a broadcasted dimensions.\n if (\n val.type.ndim > (i - n_added_dims)\n and val.type.broadcastable[i - n_added_dims]\n ):\n val_slices.append(slice(None))\n else:\n val_slices.append(sl)\n\n csl, _ = get_canonical_form_slice(sl, dim)\n if type(csl) is not slice:\n # That dimension is removed.\n pass\n else:\n nw_dim = csl.stop - csl.start\n\n if csl.step != 1:\n # Do not add the ceil_intdiv() graphs in the graphs\n # when this is not needed as it prevent detecting the\n # correct broadcast pattern.\n nw_dim = tt.ceil_intdiv(nw_dim, csl.step)\n nw_dims += [nw_dim]\n\n nw_val = val[tuple(val_slices)]\n nw_dims += dims[len(slices) :]\n if nw_val.ndim > len(nw_dims):\n return False\n rval = alloc(nw_val, *nw_dims)\n if type(rval) not in (list, tuple):\n rval = [rval]\n if rval[0].type != node.outputs[0].type:\n # It happen that the make_node() isn't able to infer the same pattern.\n # We know it is safe, so fix that.\n rval[0] = tt.patternbroadcast(rval[0], node.outputs[0].broadcastable)\n\n return rval\n\n\n@register_canonicalize\n@register_stabilize\n@register_specialize\n@local_optimizer([Subtensor])\ndef local_subtensor_of_dot(node):\n \"\"\"\n This optimization translates T.dot(A, B)[idxs] into T.dot(A[idxs_a], B[idxs_b]),\n where idxs_a and idxs_b are defined appropriately.\n\n idxs_a is the first A.ndim-1 entries of idxs,\n and idxs_b is the remaining entries of idxs (if any),\n modified to skip the second-to-last dimension of B\n (because dot sums over this dimension).\n\n \"\"\"\n if not isinstance(node.op, Subtensor):\n return\n if not node.inputs[0].owner or not isinstance(node.inputs[0].owner.op, Dot):\n return\n # If there is other node that use the outputs of the dot\n # We don't want to compute twice the sub part.\n if len(node.inputs[0].clients) > 1:\n return\n\n a = node.inputs[0].owner.inputs[0]\n b = node.inputs[0].owner.inputs[1]\n\n idx_list = get_idx_list(node.inputs, node.op.idx_list)\n\n num_a_indices = min(a.ndim - 1, len(idx_list))\n a_indices = idx_list[:num_a_indices]\n b_indices = idx_list[num_a_indices:]\n\n # This is necessary because np.dot sums the last index of a with the second to last of b\n # so we want to skip the second-to-last index into b.\n # This wasn't necessary for a, because we just omitted the last index.\n # We skip this if b.ndim = 1, since then we just want b_sub = b, not b_sub = b[:]\n # (dot also handles b.ndim < 2 as a special case)\n if b.ndim > 1 and len(b_indices) >= b.ndim - 1:\n b_indices = (\n b_indices[: b.ndim - 2]\n + (slice(None, None, None),)\n + b_indices[b.ndim - 2 :]\n )\n\n a_sub = a.__getitem__(tuple(a_indices))\n b_sub = b.__getitem__(tuple(b_indices)) if b_indices else b\n\n # Copy over previous output stacktrace to a_sub and b_sub,\n # because an error in the subtensor operation (e.g. an index error)\n # on either a or b must correspond to an error in the\n # subtensor operation on their dot product.\n copy_stack_trace(node.outputs[0], [a_sub, b_sub])\n\n # Copy over previous output stacktrace and previous dot product stacktrace,\n # because an error here may correspond to an either in either the original\n # dot product, or in the dot product after the subtensor operation.\n r = tt.dot(a_sub, b_sub)\n copy_stack_trace([node.outputs[0], node.inputs[0]], r)\n\n return [r]\n\n\n@register_canonicalize\n@local_optimizer([add])\ndef local_IncSubtensor_serialize(node):\n \"\"\"\n When using Subtensor, gradient graphs can be ugly.\n\n If we ask for grad(f(a[0]), a), we are going to get something like\n\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[0])), [0])\n\n This might be ugly, but at least it's as fast as you could want.\n If we ask for grad(f(a[0], a[1], a[2]), a), it's much worse...\n\n Elemwise{Add}\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[0])), [0])\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[1])), [1])\n IncSubtensor(Elemwise{second}(a, 0), g(f(a[2])), [2])\n\n This is much worse because this time we have to produce 3 matrices\n the size of 'a', just so we can add them together.\n\n This Op rearranges IncSubtensor's that all work on the same\n initial argument (here, Elemwise{second}(a,0)) into a chain. The\n advantage of the chain structure is that each one can be optimized\n later in the pipeline to operate inplace.\n\n Ideally, the op will do something like this:\n\n #\n # add(x, incsubtensor(b, c), incsubtensor(b, d))\n # -> incsubtensor(incsubtensor(add(x,b,b), c), d)\n\n \"\"\"\n\n def movable(i):\n # Return True iff this is a incsubtensor that we can move\n return (\n i.owner\n and isinstance(\n i.owner.op,\n (\n IncSubtensor,\n AdvancedIncSubtensor1,\n AdvancedIncSubtensor,\n ),\n )\n and i.type == o_type\n and len(i.clients) == 1\n and not i.owner.op.set_instead_of_inc\n )\n\n if node.op == add:\n o_type = node.outputs[0].type\n\n movable_inputs = [i for i in node.inputs if movable(i)]\n\n if movable_inputs:\n new_inputs = [i for i in node.inputs if not movable(i)] + [\n mi.owner.inputs[0] for mi in movable_inputs\n ]\n if len(new_inputs) == 0:\n new_add = new_inputs[0]\n else:\n new_add = add(*new_inputs)\n\n # Copy over stacktrace from original output, as an error\n # (e.g. an index error) in this add operation should\n # correspond to an error in the original add operation.\n copy_stack_trace(node.outputs[0], new_add)\n\n # stack up the new incsubtensors\n tip = new_add\n for mi in movable_inputs:\n assert tip.type == o_type\n assert tip.type == mi.owner.inputs[0].type\n tip = mi.owner.op(tip, *mi.owner.inputs[1:])\n # Copy over stacktrace from outputs of the original\n # \"movable\" operation to the new operation.\n copy_stack_trace(node.outputs + mi.owner.outputs, tip)\n\n return [tip]\n\n # print incsub_inputs, [id(i.owner.inputs[0]) for i in incsub_inputs]\n\n\n# We register it in a TopoOptimizer inside the canonizer EQ optimizer.\n# Otherwise in some cases it was making the EQ optimizer use 45. In\n# the TopoOptimizer, the EQ only use 5 passes.\ncompile.optdb.register(\n \"pre_local_IncSubtensor_serialize\",\n in2out(local_IncSubtensor_serialize),\n # Just before canonizer\n 0.99,\n \"fast_run\",\n)\n\n\n# after priority 50 Destructive inplace operations\n# gemm is the first one now, at priority 70\n\n\n@local_optimizer([IncSubtensor], inplace=True)\ndef local_inplace_setsubtensor(node):\n \"\"\"\n Also work for GpuIncSubtensor.\n\n \"\"\"\n if isinstance(node.op, IncSubtensor) and not node.op.inplace:\n dta = node.op.destroyhandler_tolerate_aliased\n new_op = node.op.__class__(\n node.op.idx_list,\n inplace=True,\n set_instead_of_inc=node.op.set_instead_of_inc,\n destroyhandler_tolerate_aliased=dta,\n )\n new_node = new_op(*node.inputs)\n val = getattr(node.outputs[0].tag, \"nan_guard_mode_check\", True)\n new_node.tag.nan_guard_mode_check = val\n\n # Copy stacktrace from original outputs to new outputs.\n # This is sensible, because the new operation is the\n # same as the old one, but now with different attributes.\n copy_stack_trace(node.outputs, new_node)\n return [new_node]\n return False\n\n\ncompile.optdb.register(\n \"local_inplace_setsubtensor\",\n TopoOptimizer(\n local_inplace_setsubtensor, failure_callback=TopoOptimizer.warn_inplace\n ),\n 60,\n \"fast_run\",\n \"inplace\",\n) # DEBUG\n\n\n@local_optimizer([AdvancedIncSubtensor1], inplace=True)\ndef local_inplace_incsubtensor1(node):\n \"\"\"\n Also work for GpuAdvancedIncSubtensor1.\n\n \"\"\"\n if isinstance(node.op, AdvancedIncSubtensor1) and not node.op.inplace:\n new_op = node.op.clone_inplace()\n new_node = new_op(*node.inputs)\n\n # Copy stacktrace from original outputs to new outputs.\n # This is sensible, because the new operation is the\n # same as the old one, but now with different attributes.\n copy_stack_trace(node.outputs, new_node)\n return [new_node]\n return False\n\n\ncompile.optdb.register(\n \"local_inplace_incsubtensor1\",\n TopoOptimizer(\n local_inplace_incsubtensor1, failure_callback=TopoOptimizer.warn_inplace\n ),\n 60,\n \"fast_run\",\n \"inplace\",\n) # DEBUG\n\n\n# Register old name\n@register_canonicalize(\"local_incsubtensor_of_allocs\")\n@register_stabilize(\"local_incsubtensor_of_allocs\")\n@local_optimizer([IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1])\ndef local_incsubtensor_of_zeros(node):\n \"\"\"\n IncSubtensor(x, zeros, idx) -> x\n\n \"\"\"\n if (\n isinstance(node.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1))\n and not node.op.set_instead_of_inc\n ):\n x = node.inputs[0]\n y = node.inputs[1]\n try:\n # Don't use only_process_constants=True. We need to\n # investigate Alloc of 0s but with non constant shape.\n if get_scalar_constant_value(y, elemwise=False) == 0:\n # No need to copy over the stacktrace,\n # because x should already have a stacktrace\n return [x]\n except NotScalarConstantError:\n return\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([IncSubtensor])\ndef local_incsubtensor_of_zeros_to_setsubtensor(node):\n \"\"\"\n IncSubtensor(zeros, x, ...) -> SetSubtensor(zeros, x, ...)\n \"\"\"\n if isinstance(node.op, (IncSubtensor)) and not node.op.set_instead_of_inc:\n x = node.inputs[0]\n\n if isinstance(x, tt.Constant) and not np.any(x.data):\n return [\n IncSubtensor(\n node.op.idx_list,\n node.op.inplace,\n set_instead_of_inc=True,\n destroyhandler_tolerate_aliased=node.op.destroyhandler_tolerate_aliased,\n )(*node.inputs)\n ]\n\n\n@register_canonicalize(\"local_setsubtensor_of_allocs\")\n@register_stabilize(\"local_setsubtensor_of_allocs\")\n@local_optimizer([IncSubtensor])\ndef local_setsubtensor_of_constants(node):\n \"\"\"\n SetSubtensor(x, x[idx], idx) -> x\n\n when x is constant or alloc.\n\n \"\"\"\n if isinstance(node.op, IncSubtensor) and node.op.set_instead_of_inc:\n x = node.inputs[0]\n y = node.inputs[1]\n\n # Don't use only_process_constants=True. We need to\n # investigate Alloc of 0s but with non constant shape.\n try:\n replace_x = get_scalar_constant_value(x, elemwise=False)\n except NotScalarConstantError:\n return\n\n try:\n replace_y = get_scalar_constant_value(y, elemwise=False)\n except NotScalarConstantError:\n return\n\n if replace_x == replace_y:\n\n # No need to copy over the stacktrace,\n # because x should already have a stacktrace\n return [x]\n else:\n return False\n\n\n@register_canonicalize\n@register_stabilize\n@local_optimizer([AdvancedSubtensor1])\ndef local_adv_sub1_adv_inc_sub1(node):\n \"\"\"Optimize the possible AdvSub1(AdvSetSub1(...), ...).\n\n AdvancedSubtensor1(AdvancedSetSubtensor1(x, y, idx), idx) -> y\n\n Notes\n -----\n This opt add AssertOp. Otherwise, it would remove shape and\n index error. If you want to get rid of them, see the\n :ref:`unsafe_optimization` section.\n\n WARNING:\n A previous version of this optimization also matched\n AdvancedSubtensor1(AdvancedIncSubtensor1(0s, y, idx), idx) -> y\n This is incorrect when there are duplicate indices.\n The current version warns the user about potential past issues.\n\n \"\"\"\n if not isinstance(node.op, AdvancedSubtensor1):\n return\n inp = node.inputs[0]\n if not inp.owner or not isinstance(inp.owner.op, AdvancedIncSubtensor1):\n return\n idx = node.inputs[1]\n idx2 = inp.owner.inputs[2]\n x = inp.owner.inputs[0]\n y = inp.owner.inputs[1]\n if idx is not idx2:\n return\n if (\n not inp.owner.op.set_instead_of_inc\n and\n # Don't use only_process_constants=True. We need to\n # investigate Alloc of 0s but with non constant shape.\n tt.extract_constant(x, elemwise=False) != 0\n ):\n return\n\n if not inp.owner.op.set_instead_of_inc:\n if config.warn.inc_subtensor1_opt:\n warnings.warning(\n \"Your current code is fine, but Aesara versions \"\n \"between 0.7rc1 and 0.10 (or development versions \"\n \"between Nov. 2014 and May 2017) \"\n \"might have given incorrect results. This graph has \"\n \"following pattern: inc_subtensor(zeros[idx], x)[idx], \"\n \"where idx is an array of integers. This used to be \"\n 'optimized to \"x\", which is incorrect if there are '\n \"duplicated indices in idx. \"\n \"To disable this warning, set the Aesara flag \"\n \"warn.inc_subtensor1_opt to False.\"\n )\n return\n\n cond = [tt.all(tt.and_(tt.lt(idx, x.shape[0]), tt.ge(idx, -x.shape[0])))]\n if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0):\n cond.append(tt.eq(idx.shape[0], y.shape[0]))\n r = Assert(\n \"Bad indexing or shapes in a AdvancedIncSubtensor1 \" \"that was optimized away\"\n )(y, *cond)\n copy_stack_trace(y, r)\n\n if r.dtype == node.outputs[0].dtype:\n return [r]\n # It is possible that y is upcast or downcast to x.dtype.\n # In all case, as we set or add with 0, we can just cast y.\n r2 = tt.cast(r, node.outputs[0].dtype)\n\n # Copy over stacktrace from before casting, since\n # we don't expect problems in the casting operation,\n # and any problems in the indexing would have been spotted above.\n copy_stack_trace(r, r2)\n return [r2]\n\n\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@register_useless\n@local_optimizer([IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1])\ndef local_useless_inc_subtensor_alloc(node):\n \"\"\"\n Replaces an [Advanced]IncSubtensor[1], whose increment is an `alloc` of\n a fully or partially broadcastable variable, by one that skips the\n intermediate `alloc` where possible.\n\n \"\"\"\n if isinstance(node.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)):\n x = node.inputs[0]\n y = node.inputs[1]\n i = node.inputs[2:]\n\n if y.owner is not None and isinstance(y.owner.op, Alloc):\n # `z` is the input of the Alloc op, i.e. T.alloc(z, <shape>)\n z = y.owner.inputs[0]\n\n try:\n shape_feature = node.fgraph.shape_feature\n except AttributeError:\n # The shape feature may not be available in some mode, but we\n # need it for this optimization, so don't continue.\n return False\n\n shape_of = shape_feature.shape_of\n same_shape = shape_feature.same_shape\n\n # Get the subtensor of `x` indexed by `i` in order to compare\n # shapes later.\n if isinstance(node.op, IncSubtensor):\n xi = Subtensor(node.op.idx_list)(x, *i)\n elif isinstance(node.op, AdvancedIncSubtensor):\n xi = advanced_subtensor(x, *i)\n elif isinstance(node.op, AdvancedIncSubtensor1):\n xi = advanced_subtensor1(x, *i)\n else:\n raise Exception(\"Should never happen!\")\n\n reason = \"local_useless_incsubtensor_alloc\"\n\n # Add `xi` to the shape feature `fgraph`. This is important for\n # shape inference later because the variable must be part of the\n # function graph in order to call `same_shape` on it.\n if xi not in shape_of:\n shape_feature.on_import(node.fgraph, xi.owner, \"%s: add `xi`\" % reason)\n\n # `xi` may have more dimensions than `y` since the subtensor ops\n # do automatic broadcasting of the increment internally. Thus, we\n # need to make the leading implicitly broadcasted dimensions\n # explicit for shape comparison later.\n if xi.ndim > y.ndim:\n y = tt.shape_padleft(y, xi.ndim - y.ndim)\n if y not in shape_of:\n shape_feature.on_import(\n node.fgraph, y.owner, \"%s: add `y`\" % reason\n )\n\n # Build `z_broad` explicitly to include extra implicit dimensions.\n z_broad = (True,) * (xi.ndim - z.ndim) + z.broadcastable\n\n cond = [\n # The shapes of `y` and `xi` must either agree or `y` may\n # also have shape equal to 1 which may be treated as a\n # broadcastable dimension by the subtensor op.\n tt.or_(tt.eq(y.shape[k], 1), tt.eq(y.shape[k], xi.shape[k]))\n # Loop over all dimensions.\n for k in range(xi.ndim)\n # We need to check the above shapes, if\n # * the pre-alloc increment `z` is broadcastable in\n # dimension `k` (if it isn't, then the shapes of `z` and\n # `y` are the same by the definition of the `Alloc` op in\n # this dimension and replacing `y` by `z` will not hide a\n # shape error), and\n # * `xi` and `y` do not have the same shape in dimension\n # `k` or we cannot infer the shape statically (if the\n # shapes of `xi` and `y` are not the same, then replacing\n # `y` by `z` will hide the shape error of `y`), and\n # * the shape of `y` is not equal to 1 or we cannot infer\n # the shape statically (if the shape of `y` is equal to\n # 1, then `y` is broadcasted by the inc_subtensor op\n # internally, so the shapes of `xi` and `y` do not need\n # to match in dimension `k`; else we need to check at\n # runtime that the shape of `y` is either 1 or the same\n # as `xi` or otherwise replacing `y` by `z` will hide a\n # shape error).\n if (\n z_broad[k]\n and not same_shape(xi, y, dim_x=k, dim_y=k)\n and shape_of[y][k] != 1\n )\n ]\n\n if len(cond) > 0:\n msg = \"`x[i]` and `y` do not have the same shape.\"\n z = Assert(msg)(z, *cond)\n\n r = node.op(x, z, *i)\n # Copy over stacktrace from previous output, since\n # we don't expect problems when removing the intermediate\n # alloc operation and so we still want to point at the line\n # of the inc_subtensor operation.\n copy_stack_trace(node.outputs, r)\n\n return [r]\n\n\n####################\n# Rebroadcast opts #\n####################\n\n\n@register_useless\n@register_canonicalize\n@register_specialize\n@local_optimizer([Rebroadcast])\ndef local_useless_rebroadcast(node):\n \"\"\"\n Remove Rebroadcast if id does not actually change the broadcasting pattern.\n\n \"\"\"\n if isinstance(node.op, Rebroadcast):\n x = node.inputs[0]\n if np.all(x.broadcastable == node.outputs[0].broadcastable):\n # No broadcastable flag was modified\n # No need to copy over stack trace,\n # because x should already have a stack trace.\n return [x]\n else:\n # Keep the flags that modify something\n new_axis = {}\n for dim, bc in list(node.op.axis.items()):\n if x.broadcastable[dim] != bc:\n new_axis[dim] = bc\n if new_axis == node.op.axis:\n # All flags are useful\n return\n else:\n r = Rebroadcast(*list(new_axis.items()))(x)\n # Copy over stacktrace from previous output\n copy_stack_trace(node.outputs, r)\n return [r]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Rebroadcast])\ndef local_rebroadcast_lift(node):\n \"\"\"\n Lifts Rebroadcast through unary Elemwise operations,\n and merges consecutive Rebroadcasts.\n\n Rebroadcast(Elemwise(x)) => Elemwise(Rebroadcast(x))\n Rebroadcast(Rebroadcast(x)) => Rebroadcast(x)\n\n \"\"\"\n op = node.op\n if not isinstance(op, Rebroadcast):\n return False\n\n input = node.inputs[0]\n inode = input.owner\n if inode and isinstance(inode.op, Elemwise) and len(inode.inputs) == 1:\n # It may happen that `input` has no client because this optimization\n # is called from `apply_rebroadcast_opt`, which in particular is used\n # by the `unbroadcast` function before we are in the actual function\n # compilation phase.\n if hasattr(input, \"clients\") and len(input.clients) == 1:\n rebroadcasted = Rebroadcast(*list(op.axis.items()))(inode.inputs[0])\n # Copy over stacktrace from previous output (after rebroadcasting)\n # to new output, because an error in the new graph right after\n # rebroadcasting must have been caused by the previous rebroadcasting.\n copy_stack_trace(node.outputs, rebroadcasted)\n\n rval = inode.op.make_node(rebroadcasted).outputs\n\n # Copy over stacktrace from previous output (after rebroadcasting)\n # and input (after elemwise operation) to new output, because an\n # error in the new graph could have been caused by either of the\n # two ops.\n copy_stack_trace(node.outputs + node.inputs, rval)\n\n return rval\n if inode and isinstance(inode.op, Rebroadcast):\n # the \"axis\" specification in the outer Rebroadcast overrides\n # the axis of the inner one\n axis = inode.op.axis.copy()\n axis.update(op.axis)\n iinput = inode.inputs[0]\n\n rval = [Rebroadcast(*list(axis.items()))(iinput)]\n\n # Copy over stacktrace from previous output (after second rebroadcast)\n # and from previous input (after first rebroadcast op) because an error in\n # the new graph could have been caused by either of the two\n # rebroadcast ops.\n copy_stack_trace(node.outputs + node.inputs, rval)\n return rval\n\n\ndef apply_rebroadcast_opt(rval):\n \"\"\"\n Apply as many times as required the optimization local_useless_rebroadcast\n and local_rebroadcast_lift.\n\n Parameters\n ----------\n rval: a Variable\n\n Returns\n -------\n A Variable (the same if no optimization can be applied)\n\n \"\"\"\n\n changed = True\n while changed and rval.owner:\n changed = False\n rval2 = local_useless_rebroadcast.transform(rval.owner)\n if rval2:\n assert len(rval2) == 1\n rval = rval2[0]\n changed = True\n if rval.owner:\n rval2 = local_rebroadcast_lift.transform(rval.owner)\n if rval2:\n assert len(rval2) == 1\n rval = rval2[0]\n changed = True\n return rval\n\n\n#############\n# Join opts #\n#############\n@register_specialize\n@register_canonicalize\n@register_useless\n@local_optimizer([Join])\ndef local_join_1(node):\n \"\"\"Join(i, x) => x\n\n Remove Join() when only one element is joined.\n\n \"\"\"\n if not isinstance(node.op, Join):\n return\n tensors = node.inputs[1:]\n if len(tensors) == 1:\n # We don't need to copy over any stacktrace here, because the\n # input variable should already have its own stacktrace.\n return [tensors[0]]\n\n\n# TODO: merge in local_useless_join\n@register_useless\n@register_specialize\n@register_canonicalize\n@local_optimizer([Join])\ndef local_join_empty(node):\n \"\"\"Join(i, x, y, empty) => Join(i, x, y)\n\n Remove empty inputs to joins. The empty inputs can be anywhere.\n\n \"\"\"\n if not isinstance(node.op, Join):\n return\n new_inputs = []\n try:\n join_idx = get_scalar_constant_value(\n node.inputs[0], only_process_constants=True\n )\n except NotScalarConstantError:\n return\n for idx in range(1, len(node.inputs)):\n inp = node.inputs[idx]\n # We can not use size == 0,, as this can change shape from 3,0\n # to 2,0. This trigger DebugMode error. This happen with\n # stack(...,[]) as this add a dimshuffle on [], that add a\n # dimensions with shape 1.\n if isinstance(inp, aesara.Constant) and inp.data.shape[join_idx] == 0:\n continue\n new_inputs.append(inp)\n if len(new_inputs) < len(node.inputs) - 1:\n if len(new_inputs) == 0:\n # T.join do not work in that case.\n # constant folding will take care of this case.\n return\n ret = tt.join(node.inputs[0], *new_inputs)\n o = node.outputs[0]\n if ret.dtype != o.dtype:\n # Join can upcast some inputs\n return\n\n # Copy over stacktrace from previous output (after join op)\n # to new output, because an error in the new op must be caused\n # by an error in the old join op.\n copy_stack_trace(node.outputs, ret)\n\n if ret.type != o.type:\n assert ret.dtype == o.dtype\n assert ret.ndim == o.ndim\n ret = tt.patternbroadcast(ret, node.outputs[0].broadcastable)\n\n # Copy over stacktrace from previous output\n # (after patternbroadcast op) for same reasons as before.\n copy_stack_trace(node.outputs, ret)\n\n return [ret]\n\n\n@register_specialize\n@register_canonicalize\n@register_useless\n@local_optimizer([Join])\ndef local_join_make_vector(node):\n \"\"\"Join(0, make_vector1, make_vector2, ...) => Join(0, make_vector12, ...)\n\n Merge MakeVector inputs to Join. This can make the join completly\n disapear with the local_join_1 opt.\n\n \"\"\"\n if not isinstance(node.op, Join) or node.outputs[0].ndim != 1:\n return\n new_inputs = [node.inputs[1]]\n for idx in range(2, len(node.inputs)):\n inp = node.inputs[idx]\n if (\n inp.owner\n and isinstance(inp.owner.op, MakeVector)\n and new_inputs[-1].owner\n and isinstance(new_inputs[-1].owner.op, MakeVector)\n and\n # MakeVector have a dtype parameter\n inp.owner.op == new_inputs[-1].owner.op\n ):\n inps = new_inputs[-1].owner.inputs + inp.owner.inputs\n new_inputs[-1] = inp.owner.op(*inps)\n\n # Copy over stacktrace from previous output (after join op)\n # to new intermediate output, because an error in the intermediate\n # op must be caused by an error in the old join op.\n copy_stack_trace(node.outputs, new_inputs[-1])\n else:\n new_inputs.append(inp)\n if len(new_inputs) < len(node.inputs) - 1:\n ret = tt.join(node.inputs[0], *new_inputs)\n\n # Copy over stacktrace from previous output (after join op)\n # to new output, because an error in the new op must be caused\n # by an error in the old join op.\n copy_stack_trace(node.outputs, ret)\n return [ret]\n\n\n#################\n# speed/memory #\n#################\n@register_canonicalize\n@register_specialize\n@local_optimizer([Sum])\ndef local_sumsqr2dot(node):\n \"\"\"\n This optimization detects T.sqr( W.dimshuffle('x',0,1) * G.dimshuffle(0,'x',1) ).sum(axis=(1,2))\n and converts this to T.dot(T.sqr(G), T.sqr(W).sum(axis=0)).\n \"\"\"\n if (\n isinstance(node.op, Sum)\n and isinstance(node.op.scalar_op, ts.Add)\n and node.op.axis == (1, 2)\n ):\n in1 = node.inputs[0]\n out = node.outputs[0]\n\n if (\n in1.owner\n and isinstance(in1.owner.op, Elemwise)\n and isinstance(in1.owner.op.scalar_op, ts.Sqr)\n ):\n in_sqr = in1.owner.inputs[0]\n if (\n in_sqr.owner\n and isinstance(in_sqr.owner.op, Elemwise)\n and isinstance(in_sqr.owner.op.scalar_op, ts.Mul)\n and len(in_sqr.owner.inputs) == 2\n ):\n in_mul1, in_mul2 = in_sqr.owner.inputs\n\n if (\n isinstance(in_mul1.owner.op, tt.elemwise.DimShuffle)\n and in_mul1.owner.op.new_order == (\"x\", 0, 1)\n and isinstance(in_mul2.owner.op, tt.elemwise.DimShuffle)\n and in_mul2.owner.op.new_order == (0, \"x\", 1)\n ):\n W = in_mul1.owner.inputs[0]\n G = in_mul2.owner.inputs[0]\n\n new_out = tt.dot(tt.sqr(G), tt.sqr(W).sum(axis=0))\n if new_out.dtype != out.dtype:\n new_out = tt.cast(new_out, dtype=out.dtype)\n return [new_out]\n\n\n#################\n# Exp stability #\n#################\n@register_stabilize\n@register_specialize\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_expm1(node):\n \"\"\"\n This optimization detects exp(a)-1 and converts this to expm1(a).\n \"\"\"\n if isinstance(node.op, Elemwise) and isinstance(node.op.scalar_op, ts.Sub):\n in1, in2 = node.inputs\n out = node.outputs[0]\n\n if (\n in1.owner\n and isinstance(in1.owner.op, Elemwise)\n and isinstance(in1.owner.op.scalar_op, ts.Exp)\n and tt.extract_constant(in2, only_process_constants=False) == 1\n ):\n in11 = in1.owner.inputs[0]\n new_out = tt.expm1(in11)\n\n if new_out.dtype != out.dtype:\n new_out = tt.cast(new_out, dtype=out.dtype)\n if new_out.type != out.type:\n return\n return [new_out]\n\n\n###############\n# Switch opts #\n###############\n@register_useless(\"local_remove_switch_const_cond\")\n@register_canonicalize(\"fast_compile\", \"local_remove_switch_const_cond\")\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_useless_switch(node):\n \"\"\"\n This optimization makes the following changes in the graph:\n T.switch(cond,left,right) -->\n if cond is constant and cond == 0: right\n if cond is constant and cond != 0: left\n if left is right -> left\n\n T.switch(le(shape_i{id}(X), 0), 0, shape_i{id}(X)) -> shape_i{id}(X)\n \"\"\"\n if isinstance(node.op, Elemwise) and isinstance(node.op.scalar_op, ts.Switch):\n cond = tt.extract_constant(node.inputs[0], only_process_constants=True)\n if (type(cond) is np.ndarray and cond.ndim == 0) or isinstance(cond, np.number):\n if cond == 0:\n correct_out = node.inputs[2]\n else:\n correct_out = node.inputs[1]\n\n if correct_out.ndim != node.outputs[0].ndim:\n # TODO: broadcast?\n return False\n if correct_out.dtype != node.outputs[0].dtype:\n out = tt.cast(correct_out, node.outputs[0].dtype)\n else:\n out = correct_out\n\n if out.type.broadcastable != node.outputs[0].type.broadcastable:\n # We need to copy data to the new dimensions during execution\n\n # We should not depend on node.outputs as this would\n # make the new node depend on the old one that will\n # get optimized again. So this create a cycle.\n shps = []\n for idx, (b1, b2), in enumerate(\n zip(out.type.broadcastable, node.outputs[0].type.broadcastable)\n ):\n if b1 == b2:\n shps.append(out.shape[idx])\n elif not node.inputs[1].type.broadcastable[idx]:\n shps.append(node.inputs[1].shape[idx])\n else:\n shps.append(node.inputs[2].shape[idx])\n out = alloc(out, *shps)\n else:\n out = out\n\n # Copy over stacktrace from selected output to new output\n copy_stack_trace(node.outputs + correct_out, out)\n return [out]\n # if left is right -> left\n if node.inputs[1] is node.inputs[2]:\n # Note: No need to copy over stacktrace, because the input node\n # already has its own stacktrace\n if cond.type == node.inputs[1].type:\n return [node.inputs[1]]\n\n ret = fill(cond, node.inputs[1])\n\n # Copy over stacktrace from switch output and correct branch\n copy_stack_trace(node.outputs + node.inputs[1], ret)\n return [ret]\n\n # This case happens with scan.\n # Elemwise{switch}(le(shape_i{id}(X), 0), 0, shape_i{id}(X)) -> shape_i{id}(X)\n left = node.inputs[1]\n right = node.inputs[2]\n cond_var = node.inputs[0]\n if (\n cond_var.owner\n and isinstance(cond_var.owner.op, Elemwise)\n and isinstance(cond_var.owner.op.scalar_op, ts.LE)\n and cond_var.owner.inputs[0].owner\n and isinstance(cond_var.owner.inputs[0].owner.op, Shape_i)\n and tt.extract_constant(\n cond_var.owner.inputs[1], only_process_constants=True\n )\n == 0\n and tt.extract_constant(left, only_process_constants=True) == 0\n and right is cond_var.owner.inputs[0]\n ):\n assert right.type == node.outputs[0].type\n # No need to copy over stacktrace, because the right input node\n # already has its own stacktrace\n return [right]\n return False\n return False\n\n\n@register_specialize\n@register_canonicalize\n@local_optimizer([mul])\ndef local_mul_switch_sink(node):\n \"\"\"\n This optimization makes the following changes in the graph:\n T.mul(A,T.switch(cond,0,iff),B) --> T.switch(cond,0,T.mul(A,B,iff))\n T.mul(A,T.switch(cond,ift,0),B) --> T.switch(cond,T.mul(A,B,ift),0)\n A and B being several (or none) symbolic variables.\n This is useful because A and B may not be numerically stable and give\n NaN or inf values for cases where the switch returns 0.\n With this optimization T.grad(T.switch(...)) has the right behavior.\n\n Examples\n --------\n x -> f(x)\n x -> g(x)\n y = T.switch(cond,f(x),g(x))\n **without the optimization\n T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x))\n **with the optimization\n T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x))\n This will be particularly useful for the lazyif because we skip\n an entire part of the graph.\n\n \"\"\"\n if node.op != mul:\n return False\n for idx, i in enumerate(node.inputs):\n if i.owner and i.owner.op == tt.switch:\n switch = i.owner\n try:\n if (\n get_scalar_constant_value(\n switch.inputs[1], only_process_constants=True\n )\n == 0.0\n ):\n listmul = node.inputs[:idx] + node.inputs[idx + 1 :]\n fmul = mul(*(listmul + [switch.inputs[2]]))\n\n # Copy over stacktrace for elementwise multiplication op\n # from previous elementwise multiplication op.\n # An error in the multiplication (e.g. errors due to\n # inconsistent shapes), will point to the\n # multiplication op.\n copy_stack_trace(node.outputs, fmul)\n\n fct = [tt.switch(switch.inputs[0], 0, fmul)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise multiplication op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n try:\n if (\n get_scalar_constant_value(\n switch.inputs[2], only_process_constants=True\n )\n == 0.0\n ):\n listmul = node.inputs[:idx] + node.inputs[idx + 1 :]\n fmul = mul(*(listmul + [switch.inputs[1]]))\n # Copy over stacktrace for elementwise multiplication op\n # from previous elementwise multiplication op.\n # An error in the multiplication (e.g. errors due to\n # inconsistent shapes), will point to the\n # multiplication op.\n copy_stack_trace(node.outputs, fmul)\n\n fct = [tt.switch(switch.inputs[0], fmul, 0)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise multiplication op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n return False\n\n\n@register_canonicalize\n@local_optimizer([true_div, int_div])\ndef local_div_switch_sink(node):\n \"\"\"\n This optimization makes the following changes in the graph:\n T.div(T.switch(cond,0,iff),A) --> T.switch(cond,0,T.div(iff,A))\n T.div(T.switch(cond,ift,0),A) --> T.switch(cond,T.div(ift,A),0)\n\n A being a symbolic variable.\n This is useful because A may not be numerically stable and give\n NaN or inf values for cases where the switch returns 0.\n See local_mul_switch_sink for more details.\n\n \"\"\"\n if node.op != true_div and node.op != int_div:\n return False\n op = node.op\n if node.inputs[0].owner and node.inputs[0].owner.op == tt.switch:\n switch = node.inputs[0].owner\n try:\n if (\n get_scalar_constant_value(switch.inputs[1], only_process_constants=True)\n == 0.0\n ):\n fdiv = op(switch.inputs[2], node.inputs[1])\n # Copy over stacktrace for elementwise division op\n # from previous elementwise multiplication op.\n # An error in the division (e.g. errors due to\n # inconsistent shapes or division by zero),\n # will point to the new division op.\n copy_stack_trace(node.outputs, fdiv)\n\n fct = [tt.switch(switch.inputs[0], 0, fdiv)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise division op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n try:\n if (\n get_scalar_constant_value(switch.inputs[2], only_process_constants=True)\n == 0.0\n ):\n fdiv = op(switch.inputs[1], node.inputs[1])\n # Copy over stacktrace for elementwise division op\n # from previous elementwise multiplication op.\n # An error in the division (e.g. errors due to\n # inconsistent shapes or division by zero),\n # will point to the new division op.\n copy_stack_trace(node.outputs, fdiv)\n\n fct = [tt.switch(switch.inputs[0], fdiv, 0)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise division op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n return False\n\n\n# Merge add/sub/mul/div/minimum/maximum/... of switches sharing the same\n# condition, to enable further simplification of their branches\n# Example: switch(c, a, b) + switch(c, x, y) -> switch(c, a+x, b+y)\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_merge_switch_same_cond(node):\n # node must be binary elemwise or add or mul\n if not isinstance(node.op, Elemwise) or not isinstance(\n node.op.scalar_op, (ts.BinaryScalarOp, ts.Add, ts.Mul)\n ):\n return\n # all inputs must be switch\n if not all(\n s.owner\n and isinstance(s.owner.op, Elemwise)\n and isinstance(s.owner.op.scalar_op, ts.Switch)\n for s in node.inputs\n ):\n return\n # all switch conditions must be the same\n cond = node.inputs[0].owner.inputs[0]\n if not all(s.owner.inputs[0] is cond for s in node.inputs[1:]):\n return\n # pull out switch\n return [\n tt.switch(\n cond,\n node.op(*[s.owner.inputs[1] for s in node.inputs]),\n node.op(*[s.owner.inputs[2] for s in node.inputs]),\n )\n ]\n\n\n#############\n# Tile Opts #\n#############\n@register_useless\n@register_canonicalize\n@register_stabilize\n@local_optimizer([Tile])\ndef local_useless_tile(node):\n \"\"\"Tile(x, (1,)*N) -> x\n\n This is useless tile. (1,)*N, just mean a vector with all element\n being 1.\n\n \"\"\"\n if isinstance(node.op, Tile):\n try:\n a = tt.get_scalar_constant_value(\n node.inputs[1], only_process_constants=True\n )\n if a == 1:\n try:\n l = tt.get_vector_length(node.inputs[1])\n if l == node.inputs[0].ndim:\n # No need to copy over any stacktrace as previous\n # input variable already has a stacktrace\n return [node.inputs[0]]\n elif l < node.inputs[0].ndim:\n # The Op don't support that case, so we can't\n # implement the opt and test it.\n return\n return [node.inputs[0]]\n else:\n # The Op don't support that case, so we can't\n # implement the opt and test it.\n return\n x_nd = node.inputs[0].ndim\n broad = [\"x\"] * (l - x_nd) + range(x_nd)\n ret = node.inputs[0].dimshuffle(broad)\n # Copy over stacktrace from previous output node,\n # and from node before tiling operation.\n copy_stack_trace(node.outputs + node.inputs[0], ret)\n return [ret]\n except ValueError:\n return\n except NotScalarConstantError:\n return\n\n\n##############\n# Split Opts #\n##############\n@register_useless\n@register_canonicalize\n@register_specialize\n@local_optimizer([Split])\ndef local_useless_split(node):\n \"\"\"Split{n_splits=1}(x, y) -> x\n\n Remove Split with only 1 split.\n\n \"\"\"\n if isinstance(node.op, Split):\n if node.op.len_splits == 1:\n x, axis, splits = node.inputs\n out = assert_op(x, tt.eq(splits.shape[0], 1))\n # Copy over stacktrace from previous output node.\n copy_stack_trace(node.outputs, out)\n out2 = assert_op(out, tt.eq(x.shape[axis], splits[0]))\n # Copy over stacktrace from previous output node.\n copy_stack_trace(out, out2)\n\n return [out2]\n\n\n################\n# Flatten Opts #\n################\n@register_canonicalize\n@register_stabilize\n@local_optimizer([Flatten])\ndef local_flatten_lift(node):\n \"\"\"\n Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))\n\n This optimization is needed by optimization\n nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.\n\n \"\"\"\n if (\n isinstance(node.op, Flatten)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Elemwise)\n and len(node.inputs[0].owner.inputs) == 1\n ):\n f = node.op(node.inputs[0].owner.inputs[0])\n\n # Copy over stacktrace from previous output node (flatten op),\n # since this is the op which may cause an error for f.\n copy_stack_trace(node.outputs, f)\n\n e = node.inputs[0].owner.op(f)\n\n # Copy over stacktrace from previous output node and from unary\n # elementwise output node since if there was an error, it would\n # probably have come from that operation.\n copy_stack_trace(node.outputs + [node.inputs[0]], e)\n\n return [e]\n\n\n##################\n# Reshape opts #\n##################\n\n\ndef local_reshape_chain(op):\n @local_optimizer([op])\n def f(node):\n \"\"\"\n Reshape(Reshape(shape1),shape2) -> Reshape(shape2)\n\n \"\"\"\n if not opt.check_chain(node, op, op):\n return False\n\n # TODO: this can permit a failing program to run by eliminating\n # the lower reshape\n rval = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])\n\n # Copy over stacktrace from previous output node, as any error\n # in new computational graph would have been caused by last op\n # in the old computational graph.\n copy_stack_trace(node.outputs, rval)\n\n # It might happen that the desired output of this node has a\n # broadcastable pattern that does not match that of 'rval'. This is\n # when originally, we were able to figure out that one of the\n # dimensions of the reshape is one, but some other transformation\n # replaced the shape by one for which this cannot be guessed.\n # We should try to figure out why we lost the information about this\n # constant value... but in the meantime, better not apply this\n # optimization.\n if rval.broadcastable == node.outputs[0].broadcastable:\n return [rval]\n else:\n return False\n\n return f\n\n\nregister_canonicalize(local_reshape_chain(Reshape), name=\"local_reshape_chain\")\n\n\n@register_useless\n@register_canonicalize\n@register_stabilize\n@local_optimizer([Reshape])\ndef local_useless_reshape(node):\n \"\"\"\n Remove two kinds of useless reshape.\n\n Remove Reshape when both the input and output have a single dimension.\n Remove Reshape when reshaping to the shape of the input.\n\n \"\"\"\n op = node.op\n if not isinstance(op, Reshape):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n output_shape = node.inputs[1]\n\n if input.ndim != output.ndim:\n return False\n\n # Simple case: both input and output have a single dimension.\n # This could hide errors if the user provides inconsistent shapes.\n if (\n input.ndim == 1\n and output.ndim == 1\n and input.broadcastable == output.broadcastable\n ):\n return [input]\n\n # Second case: all the shapes match the input shape\n # Match Reshape(x, x.shape)\n if output_shape.owner and isinstance(output_shape.owner.op, Shape):\n shape_input = output_shape.owner.inputs[0]\n if shape_input == input:\n return [input]\n\n # Match Reshape(x, [x.shape[0], ..., x.shape[-1]]), accounting for\n # broadcastable and constant dimensions\n if output_shape.owner and isinstance(output_shape.owner.op, MakeVector):\n output_shape_is = output_shape.owner.inputs\n\n if not hasattr(node, \"fgraph\"):\n shape_feature = None\n else:\n shape_feature = getattr(node.fgraph, \"shape_feature\", None)\n\n nb_m1 = 0\n shape_match = [False] * input.ndim\n for dim in range(input.ndim):\n outshp_i = output_shape_is[dim]\n # Match Shape_i{dim}(input)\n if (\n outshp_i.owner\n and isinstance(outshp_i.owner.op, Shape_i)\n and outshp_i.owner.op.i == dim\n and outshp_i.owner.inputs[0] == input\n ):\n shape_match[dim] = True\n continue\n\n # Match Shape(input)[dim]\n if (\n outshp_i.owner\n and isinstance(outshp_i.owner.op, Subtensor)\n and len(outshp_i.owner.inputs) == 2\n and extract_constant(outshp_i.owner.inputs[1]) == dim\n ):\n subtensor_inp = outshp_i.owner.inputs[0]\n if subtensor_inp.owner and isinstance(subtensor_inp.owner.op, Shape):\n shape_input_i = subtensor_inp.owner.inputs[0]\n if shape_input_i == input:\n shape_match[dim] = True\n continue\n\n # Match 1 if input.broadcastable[dim] is True\n cst_outshp_i = extract_constant(outshp_i, only_process_constants=1)\n if input.broadcastable[dim] and cst_outshp_i == 1:\n shape_match[dim] = True\n continue\n\n # Match -1\n if cst_outshp_i == -1:\n shape_match[dim] = True\n nb_m1 += 1\n continue\n\n # Match shape_of[input][dim] or its constant equivalent\n if shape_feature:\n inpshp_i = shape_feature.get_shape(input, dim)\n if inpshp_i == outshp_i or (\n extract_constant(inpshp_i, only_process_constants=1)\n == extract_constant(outshp_i, only_process_constants=1)\n ):\n shape_match[dim] = True\n continue\n\n if all(shape_match) and nb_m1 <= 1:\n return [input]\n\n # TODO later: if all the shapes except one match, we may want to\n # consider it useless as well, like we do in the 1-dim case.\n\n\n@register_canonicalize\n@local_optimizer([Reshape])\ndef local_reshape_to_dimshuffle(node):\n \"\"\"\n Broadcastable dimensions in Reshape are replaced with dimshuffle.\n\n The goal is to avoid using reshape to add or remove broadcastable\n dimensions, but use dimshuffle instead, so dimshuffles can cancel out\n or be removed later on.\n\n For example:\n - reshape(x, (1, n)) --> dimshuffle{x,0}(reshape(x, (n,))\n - reshape(x, (1, m, 1, n, 1, 1))\n --> dimshuffle{x,0,x,1,x,x}(reshape(x, (m, n)))\n \"\"\"\n op = node.op\n if not isinstance(op, Reshape):\n return False\n\n input = node.inputs[0]\n output = node.outputs[0]\n output_shape = node.inputs[1]\n\n dimshuffle_new_order = []\n new_output_shape = []\n index = 0 # index over the output of the new reshape\n for i in range(output.ndim):\n # Since output_shape is a symbolic vector, we trust extract_constant\n # to go through however it is formed to see if its i-th element is 1.\n # We need only_process_constants=False for that.\n dim = extract_constant(\n output_shape[i], only_process_constants=False, elemwise=False\n )\n if dim == 1:\n dimshuffle_new_order.append(\"x\")\n else:\n dimshuffle_new_order.append(index)\n new_output_shape.append(dim)\n index = index + 1\n if index != output.ndim:\n inner = op.__class__(len(new_output_shape))(input, new_output_shape)\n copy_stack_trace(output, inner)\n new_node = [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]\n copy_stack_trace(output, new_node)\n return new_node\n\n\n@register_canonicalize\n@register_stabilize\n@local_optimizer([Reshape])\ndef local_reshape_lift(node):\n \"\"\"\n Reshape(UnaryElemwise(x)) -> UnaryElemwise(Reshape(x))\n\n This optimization is needed by optimization\n nnet/sigm.py:log1msigm_to_softplus to get applied when there is a reshape.\n\n \"\"\"\n if (\n isinstance(node.op, Reshape)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Elemwise)\n and len(node.inputs[0].owner.inputs) == 1\n ):\n r = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])\n # Copy stacktrace from previous Reshape op, as an error in new\n # Reshape op could only have been caused by old one.\n copy_stack_trace(node.outputs, r)\n\n e = node.inputs[0].owner.op(r)\n # Copy stacktrace from both previous Reshape and UnaryElemwise op\n # because an error in new cg could have been caused by either ops.\n copy_stack_trace(node.outputs + node.inputs, e)\n\n # In rare case the original broadcast was (False, True), but\n # the new one is (False, False). So don't crash in that case.\n if e.type != node.outputs[0].type:\n re = tt.patternbroadcast(e, node.outputs[0].broadcastable)\n\n # Copy over stack trace.\n # If the graph fails it is usually due to the fact that a dimension\n # that should be broadcastable does not actually have length 1,\n copy_stack_trace(e, re)\n else:\n re = e\n\n return [re]\n\n\n##################\n# Middleman cuts #\n##################\n\nregister_canonicalize(OpRemove(tensor_copy), name=\"remove_tensor_copy\")\n\n################\n# Canonization #\n################\n\n\nclass Canonizer(LocalOptimizer):\n r\"\"\"\n Simplification tool. The variable is a local_optimizer. It is best used\n with a TopoOptimizer in in_to_out order.\n\n Usage: Canonizer(main, inverse, reciprocal, calculate)\n\n Parameters\n ----------\n main\n A suitable Op class that is commutative, associative and\n takes one to an arbitrary number of inputs, e.g. add or\n mul\n inverse\n An Op class such that inverse(main(x, y), y) == x\n e.g. sub or true_div\n reciprocal\n A function such that main(x, reciprocal(y)) == inverse(x, y)\n e.g. neg or inv\n calculate\n Function that takes a list of numpy.ndarray instances\n for the numerator, another list for the denumerator,\n and calculates inverse(main(\\*num), main(\\*denum)). It\n takes a keyword argument, aslist. If True, the value\n should be returned as a list of one element, unless\n the value is such that value = main(). In that case,\n the return value should be an empty list.\n\n Examples\n --------\n >>> import aesara.tensor as tt\n >>> from aesara.tensor.opt import Canonizer\n >>> add_canonizer = Canonizer(add, sub, neg, \\\\\n ... lambda n, d: sum(n) - sum(d))\n >>> mul_canonizer = Canonizer(mul, true_div, inv, \\\\\n ... lambda n, d: prod(n) / prod(d))\n\n Examples of optimizations mul_canonizer can perform:\n\n | x / x -> 1\n | (x * y) / x -> y\n | x / y / x -> 1 / y\n | x / y / z -> x / (y * z)\n | x / (y / z) -> (x * z) / y\n | (a / b) * (b / c) * (c / d) -> a / d\n | (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\n | 2 * x / 2 -> x\n | x * y * z -> Elemwise(mul){x,y,z} #only one pass over the memory.\n | !-> Elemwise(mul){x,Elemwise(mul){y,z}}\n\n \"\"\"\n\n def __init__(self, main, inverse, reciprocal, calculate, use_reciprocal=True):\n self.main = main\n self.inverse = inverse\n self.reciprocal = reciprocal\n self.calculate = calculate\n self.use_reciprocal = use_reciprocal\n\n self.external_simplifiers = []\n\n def add_simplifier(self, simplifier, reason):\n self.external_simplifiers.append((reason, simplifier))\n\n def tracks(self):\n return [self.main, self.inverse, self.reciprocal]\n\n def get_num_denum(self, input):\n r\"\"\"\n This extract two lists, num and denum, such that the input is:\n self.inverse(self.main(\\*num), self.main(\\*denum)). It returns\n the two lists in a (num, denum) pair.\n\n For example, for main, inverse and reciprocal = \\*, / and inv(),\n\n | input -> returned value (num, denum)\n\n | x*y -> ([x, y], [])\n | inv(x) -> ([], [x])\n | inv(x) * inv(y) -> ([], [x, y])\n | x*y/z -> ([x, y], [z])\n | log(x) / y * (z + x) / y -> ([log(x), z + x], [y, y])\n | (((a / b) * c) / d) -> ([a, c], [b, d])\n | a / (b / c) -> ([a, c], [b])\n | log(x) -> ([log(x)], [])\n | x**y -> ([x**y], [])\n | x * y * z -> ([x, y, z], [])\n\n \"\"\"\n # This function is recursive. The idea is that there is a\n # get_num_denum recursion in which the internal ops are all\n # one of (main, inverse, reciprocal, DimShuffle) and the\n # internal data nodes all have the dtype of the 'input'\n # argument. The leaf-Variables of the graph covered by the\n # recursion may be of any Variable type.\n\n if input.owner is None or input.owner.op not in [\n self.main,\n self.inverse,\n self.reciprocal,\n ]:\n if input.owner and isinstance(input.owner.op, tt.DimShuffle):\n # If input is a DimShuffle of some input which does\n # something like this:\n\n # * change a vector of length N into a 1xN row matrix\n # * change a scalar into a 1x1x1 tensor\n # * in general, complete the shape of a tensor\n # with broadcastable 1s to the *left*\n # Then we will simply discard the DimShuffle and return\n # the num/denum of its input\n dsn = input.owner # dimshuffle node\n dsop = dsn.op # dimshuffle op\n\n # the first input of the dimshuffle i.e. the ndarray to redim\n dsi0 = dsn.inputs[0]\n\n # The compatible order is a DimShuffle \"new_order\" of the form:\n # ('x', ..., 'x', 0, 1, 2, ..., dimshuffle_input.type.ndim)\n\n # That kind of DimShuffle only adds broadcastable\n # dimensions on the left, without discarding any\n # existing broadcastable dimension and is inserted\n # automatically by Elemwise when the inputs have\n # different numbers of dimensions (hence why we can\n # discard its information - we know we can retrieve it\n # later on).\n compatible_order = (\"x\",) * (input.type.ndim - dsi0.type.ndim) + tuple(\n range(dsi0.type.ndim)\n )\n if dsop.new_order == compatible_order:\n # If the \"new_order\" is the one we recognize,\n # we return the num_denum of the dimshuffled input.\n return self.get_num_denum(input.owner.inputs[0])\n else:\n # This is when the input isn't produced by main,\n # inverse or reciprocal.\n return [input], []\n else:\n return [input], []\n num = []\n denum = []\n parent = input.owner\n\n # We get the (num, denum) pairs for each input\n # pairs = [self.get_num_denum(input2) if input2.type.dtype ==\n # input.type.dtype else ([input2], []) for input2 in\n # parent.inputs]\n pairs = [self.get_num_denum(input2) for input2 in parent.inputs]\n\n if parent.op == self.main:\n # If we have main(x, y, ...), numx, denumx, numy, denumy, ...\n # then num is concat(numx, numy, num...) and denum is\n # concat(denumx, denumy, denum...) note that main() can have any\n # number of arguments >= 0 concat is list concatenation\n num = reduce(list.__iadd__, map(operator.itemgetter(0), pairs))\n denum = reduce(list.__iadd__, map(operator.itemgetter(1), pairs))\n elif parent.op == self.inverse:\n # If we have inverse(x, y), numx, denumx, numy and denumy\n # then num is concat(numx, denumy) and denum is\n # concat(denumx, numy) note that inverse() is binary\n num = pairs[0][0] + pairs[1][1]\n denum = pairs[0][1] + pairs[1][0]\n elif parent.op == self.reciprocal:\n # If we have reciprocal(x), numx, denumx\n # then num is denumx and denum is numx\n # note that reciprocal() is unary\n num = pairs[0][1]\n denum = pairs[0][0]\n return num, denum\n\n def merge_num_denum(self, num, denum):\n r\"\"\"\n Utility function which takes two lists, num and denum, and\n returns something which is equivalent to inverse(main(\\*num),\n main(\\*denum)), but depends on the length of num and the length\n of denum (in order to minimize the number of operations).\n\n Let n = len(num) and d = len(denum):\n\n | n=0, d=0: neutral element (given by self.calculate([], []))\n | (for example, this would be 0 if main is addition\n | and 1 if main is multiplication)\n | n=1, d=0: num[0]\n | n=0, d=1: reciprocal(denum[0])\n | n=1, d=1: inverse(num[0], denum[0])\n | n=0, d>1: reciprocal(main(\\*denum))\n | n>1, d=0: main(\\*num)\n | n=1, d>1: inverse(num[0], main(\\*denum))\n | n>1, d=1: inverse(main(\\*num), denum[0])\n | n>1, d>1: inverse(main(\\*num), main(\\*denum))\n\n Given the values of n and d to which they are associated, all\n of the above are equivalent to:\n inverse(main(\\*num), main(\\*denum))\n\n \"\"\"\n\n ln, ld = len(num), len(denum)\n if not ln and not ld:\n return tt.as_tensor_variable(self.calculate([], []))\n if not ln:\n if self.use_reciprocal:\n return self.reciprocal(self.merge_num_denum(denum, []))\n else:\n ln = [self.calculate([], [], aslist=False)]\n if not ld:\n if ln == 1:\n # num[0] should always be a variable\n assert isinstance(num[0], gof.Variable)\n return num[0]\n else:\n return self.main(*num)\n return self.inverse(\n self.merge_num_denum(num, []), self.merge_num_denum(denum, [])\n )\n\n @staticmethod\n def get_constant(v):\n \"\"\"\n\n Returns\n -------\n object\n A numeric constant if v is a Constant or, well, a\n numeric constant. If v is a plain Variable, returns None.\n\n \"\"\"\n if isinstance(v, Constant):\n if getattr(v.tag, \"unique_value\", None) is not None:\n data = v.tag.unique_value\n else:\n data = v.data\n if data.ndim == 0:\n return data\n else:\n return None\n elif isinstance(v, Variable):\n return None\n else:\n return v\n\n def simplify(self, num, denum, out_type):\n \"\"\"\n Shorthand for:\n\n .. code-block:: python\n\n self.simplify_constants(*self.simplify_factors(num, denum))\n\n \"\"\"\n rval = self.simplify_constants(\n *self.simplify_factors(num, denum), out_type=out_type\n )\n for reason, simplifier in self.external_simplifiers:\n # TODO: document that 'reason' is associated with this\n # simplification to help auditing when things go\n # wrong\n rval = simplifier(*rval)\n return rval\n\n def simplify_factors(self, num, denum):\n \"\"\"\n For any Variable r which is both in num and denum, removes it\n from both lists. Modifies the lists inplace. Returns the\n modified lists. For example:\n\n | [x], [x] -> [], []\n | [x, y], [x] -> [y], []\n | [a, b], [c, d] -> [a, b], [c, d]\n\n \"\"\"\n ln = len(num)\n ld = len(denum)\n if ld > 2 and ln > 2:\n # Faster version for \"big\" inputs.\n while True:\n s = set(num)\n # Inputs can appear multiple times\n redo = len(s) != len(num)\n inter = s.intersection(denum)\n for v in inter:\n num.remove(v)\n denum.remove(v)\n if not redo or not inter:\n break\n else:\n for v in list(num):\n if v in denum:\n num.remove(v)\n denum.remove(v)\n return num, denum\n\n def simplify_constants(self, orig_num, orig_denum, out_type=None):\n \"\"\"\n Find all constants and put them together into a single constant.\n\n Finds all constants in orig_num and orig_denum (using\n get_constant) and puts them together into a single\n constant. The constant is inserted as the first element of the\n numerator. If the constant is the neutral element, it is\n removed from the numerator.\n\n Examples\n --------\n Let main be multiplication:\n\n | [2, 3, x], [] -> [6, x], []\n | [x, y, 2], [4, z] -> [0.5, x, y], [z]\n | [x, 2, y], [z, 2] -> [x, y], [z]\n\n \"\"\"\n # Lists representing the numerator and denumerator\n num, denum = [], []\n\n # Lists representing the *constant* elements of num and denum\n numct, denumct = [], []\n\n for v in orig_num:\n ct = self.get_constant(v)\n if ct is not None:\n # We found a constant in the numerator!\n # We add it to numct\n numct.append(ct)\n else:\n num.append(v)\n for v in orig_denum:\n ct = self.get_constant(v)\n if ct is not None:\n denumct.append(ct)\n else:\n denum.append(v)\n\n if self.use_reciprocal or num:\n # This will calculate either:\n # [inverse(main(*numct), main(*denumct))]\n # [] - if inverse(main(*numct), main(*denumct)) is the\n # neutral element\n ct = self.calculate(numct, denumct, aslist=True, out_type=out_type)\n else:\n # This happens if we don't allow the reciprocal and the\n # numerator is empty. That means we will need to represent\n # reciprocal(x) like inverse(neutral_element, x) so\n # we can't allow ct == []\n # TODO: why is this branch needed when merge_num_denum\n # does it for us?\n ct = [self.calculate(numct, denumct, aslist=False, out_type=out_type)]\n\n # Wrapping ct in a Constant with the right dtype\n ct = [tt.constant(c, dtype=out_type.dtype) for c in ct]\n\n if orig_num and len(numct) == 1 and len(denumct) == 0 and ct:\n # In that case we should only have one constant in `ct`.\n assert len(ct) == 1\n first_num_ct = self.get_constant(orig_num[0])\n if first_num_ct is not None and ct[0].type.values_eq(\n ct[0].data, first_num_ct\n ):\n # This is an important trick :( if it so happens that:\n # * there's exactly one constant on the numerator and none on\n # the denominator\n # * it's not the neutral element (ct is an empty list in that\n # case)\n # * the constant is the same as the first argument in the\n # numerator (we only check the first argument because the\n # canonizer puts the computed constants first)\n # -> then we return very exactly the original num/denum.\n # If we don't do that the optimizer will just loop\n # infinitely because it will not catch on that there are\n # no changes to be made and every time it will want to\n # replace something by the same thing...\n # Note that it is important to use `values_eq` instead of\n # the == operator, to handle NaN values correctly.\n return orig_num, orig_denum\n\n return ct + num, denum\n\n def transform(self, node):\n op = node.op\n if op not in [self.main, self.inverse, self.reciprocal]:\n return False\n\n assert len(node.outputs) == 1\n out = node.outputs[0]\n\n # out won't have a clients field when we didn't commit a\n # started change in the graph. We can't do the check if we\n # want to skip it, so we force the skip it. It should be\n # reapplied later.\n if not hasattr(out, \"clients\"):\n return\n\n # check if any of the clients of this node would be part of\n # this canonized graph... if so, we do nothing and wait for\n # them to be transformed.\n for c, c_idx in out.clients:\n if c == \"output\":\n continue\n while (\n isinstance(getattr(c, \"op\", None), DimShuffle)\n and len(c.outputs[0].clients) <= 1\n ):\n c = c.outputs[0].clients[0][0]\n if getattr(c, \"op\", \"\") in [self.main, self.inverse, self.reciprocal]:\n return False\n\n # Here we make the canonical version of the graph around this node\n # See the documentation of get_num_denum and simplify\n orig_num, orig_denum = self.get_num_denum(node.outputs[0])\n num, denum = self.simplify(list(orig_num), list(orig_denum), out.type)\n\n def same(x, y):\n return len(x) == len(y) and all(np.all(xe == ye) for xe, ye in zip(x, y))\n\n if (\n same(orig_num, num)\n and same(orig_denum, denum)\n and\n # Check to see if we've collapsed some nested ops.\n not (\n len(orig_denum) == 0\n and\n # Make sure this change would increase the number of vector\n # arguments--decreasing the number of unnecessary `self.main`\n # nodes.\n len(node.inputs) < len(orig_num)\n )\n and\n # Do a similar check for the reciprocal op.\n not (\n self.use_reciprocal\n and node.op == self.reciprocal\n and len(orig_num) == 0\n and node.inputs[0].owner\n and len(node.inputs[0].owner.inputs) < len(orig_denum)\n )\n ):\n return False\n\n new = self.merge_num_denum(num, denum)\n if new.type.dtype != out.type.dtype:\n new = tt.cast(new, out.type.dtype)\n\n assert (new.type == out.type) == (not (new.type != out.type))\n\n if not (new.type == out.type):\n new = _fill_chain(new, node.inputs)[0]\n\n if new.type == out.type:\n # This happen with test\n # aesara/tensor/tests/test_opt.py:T_local_switch_sink\n new.tag.values_eq_approx = values_eq_approx_remove_inf_nan\n\n # We need to implement the copy over of the stacktrace.\n # See issue #5104.\n return [new]\n else:\n _logger.warning(\n \" \".join(\n (\n \"CANONIZE FAILED: new, out = \",\n new,\n \",\",\n out,\n \"types\",\n new.type,\n \",\",\n out.type,\n )\n )\n )\n return False\n\n def __str__(self):\n return getattr(\n self,\n \"name\",\n \"Canonizer({}, {}, {})\".format(self.main, self.inverse, self.reciprocal),\n )\n\n\ndef mul_calculate(num, denum, aslist=False, out_type=None):\n if not num and not denum:\n # Smallest 1 possible.\n if aslist:\n return []\n else:\n return np.int8(1)\n\n # Make sure we do not accidentally upcast data types.\n if out_type is None:\n out_dtype = ts.upcast(*[v.dtype for v in (num + denum)])\n else:\n out_dtype = out_type.dtype\n one = aesara._asarray(1, dtype=out_dtype)\n\n v = reduce(np.multiply, num, one) / reduce(np.multiply, denum, one)\n if aslist:\n if np.all(v == 1):\n return []\n else:\n return [v]\n return v\n\n\nlocal_mul_canonizer = Canonizer(mul, true_div, inv, mul_calculate, False)\nregister_canonicalize(local_mul_canonizer, name=\"local_mul_canonizer\")\n\n\n@local_optimizer([neg])\ndef local_neg_to_mul(node):\n if node.op == neg:\n return [mul(np.array(-1, dtype=node.inputs[0].dtype), node.inputs[0])]\n\n\nregister_canonicalize(local_neg_to_mul)\n\n\n@register_specialize\n@local_optimizer([Sum, Prod])\ndef local_sum_prod_mul_by_scalar(node):\n \"\"\"\n sum(scalar * smth) -> scalar * sum(smth)\n sum(-smth) -> -sum(smth)\n\n or\n\n prod(scalar * smth) -> scalar ** size(smth) * prod(smth)\n prod(-smth) -> -1 ** size(smth) * prod(smth)\n\n \"\"\"\n # TODO: if the the thing inside the Sum is a division,\n # we should get at the numerator....\n if isinstance(node.op, (Sum, Prod)):\n (node_inps,) = node.inputs\n if node_inps.owner and node_inps.owner.op == mul:\n terms = node_inps.owner.inputs\n scalars = [t.dimshuffle() for t in terms if np.all(t.type.broadcastable)]\n\n if len(scalars) == 0:\n # Nothing to optimize here\n return\n\n non_scalars = [t for t in terms if not np.all(t.broadcastable)]\n\n # Perform the op only on the non-scalar inputs, if applicable\n if len(non_scalars) == 0:\n new_op_input_nb_elements = 1\n new_op_output = 1\n elif len(non_scalars) == 1:\n new_op_input_nb_elements = non_scalars[0].size\n new_op_output = node.op(non_scalars[0])\n else:\n new_op_input = mul(*non_scalars)\n # We assume that errors always come from the prod/mul op in the\n # original computational graph, and therefore need to only\n # copy over its output stacktrace.\n copy_stack_trace(node.outputs, new_op_input)\n\n new_op_input_nb_elements = new_op_input.size\n new_op_output = node.op(new_op_input)\n\n if not len(non_scalars) == 0:\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, new_op_output)\n\n # If node.op is a T.elemwise.Prod, then the scalars need to be\n # raised to the power of the number of elements in the input\n # to the Prod\n if isinstance(node.op, Prod) and new_op_input_nb_elements != 1:\n\n scalars = [s ** new_op_input_nb_elements for s in scalars]\n\n # Scale the output of the op by the scalars and return as\n # replacement for the original output\n mul_inputs = scalars\n if new_op_input_nb_elements != 1:\n mul_inputs.append(new_op_output)\n\n if len(mul_inputs) == 1:\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, mul_inputs)\n\n return mul_inputs\n else:\n ret = mul(*mul_inputs)\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, [ret] + mul_inputs)\n\n return [ret]\n\n if isinstance(node.op, Sum) and node_inps.owner and node_inps.owner.op == neg:\n s = node.op(node_inps.owner.inputs[0])\n ret = neg(s)\n # There are never errors in the negative op, thus\n # we need only to copy over stacktrace from previous output node to\n # the two new ops.\n copy_stack_trace(node.outputs, [s, ret])\n\n return [ret]\n\n\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_elemwise_sub_zeros(node):\n \"\"\"\n Elemwise{sub}(X,X) -> zeros_like(X)\n \"\"\"\n if (\n isinstance(node.op, Elemwise)\n and node.op.scalar_op.nin == 2\n and node.op.scalar_op == ts.sub\n and node.inputs[0] == node.inputs[1]\n ):\n res = tt.zeros_like(node.inputs[0])\n # Copy over stacktrace from previous output.\n # This could help for failures due to out-of-memory.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n\n@register_useless\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_useless_elemwise_comparison(node):\n \"\"\"...\n\n :note: These cases appear in the graph generated by scan.\n These optimizations will make the graph easier to read.\n # Comparing to itself is constant\n Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)\n Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)\n Elemwise[{minimum,maximum}](X, X) -> X\n\n # Comparing shape to 0 can be constant\n Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)\n Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)\n Elemwise[maximum](X.shape[i], 0) -> X.shape[i]\n Elemwise[maximum](0, X.shape[i]) -> X.shape[i]\n Elemwise[minimum](X.shape[i], 0) -> 0\n Elemwise[minimum](0, X.shape[i]) -> 0\n\n # The shape can be replaced with sum of shapes\n Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)\n Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)\n\n # Shapes are never negative\n # Needed by Reshape.infer_shape\n Elemwise[EQ](Subtensor(Shape(x)), -N) -> Elemwise[zeros](X)\n\n \"\"\"\n if not isinstance(node.op, Elemwise):\n return\n if node.op.scalar_op.nin != 2:\n return\n\n # We call zeros_like and one_like with opt=True to generate a\n # cleaner graph.\n dtype = node.outputs[0].dtype\n\n # Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)\n if (\n isinstance(node.op.scalar_op, (ts.LT, ts.GT))\n and node.inputs[0] is node.inputs[1]\n ):\n res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)\n if (\n isinstance(node.op.scalar_op, (ts.LE, ts.GE))\n and node.inputs[0] is node.inputs[1]\n ):\n res = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[{minimum,maximum}](X, X) -> X\n if (\n isinstance(node.op.scalar_op, (ts.Minimum, ts.Maximum))\n and node.inputs[0] is node.inputs[1]\n ):\n res = node.inputs[0]\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)\n if (\n isinstance(node.op.scalar_op, ts.LT)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)\n if (\n isinstance(node.op.scalar_op, ts.GE)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[maximum](X.shape[i], 0) -> X.shape[i]\n if (\n isinstance(node.op.scalar_op, ts.Maximum)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n # No need to copy over stacktrace.\n return [node.inputs[0]]\n # Elemwise[maximum](0, X.shape[i]) -> X.shape[i]\n if (\n isinstance(node.op.scalar_op, ts.Maximum)\n and tt.extract_constant(node.inputs[0], only_process_constants=True) == 0\n and node.inputs[1].owner\n and isinstance(node.inputs[1].owner.op, Shape_i)\n ):\n # No need to copy over stacktrace.\n return [node.inputs[1]]\n # Elemwise[minimum](X.shape[i], 0) -> 0\n if (\n isinstance(node.op.scalar_op, ts.Minimum)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[minimum](0, X.shape[i]) -> 0\n if (\n isinstance(node.op.scalar_op, ts.Minimum)\n and tt.extract_constant(node.inputs[0], only_process_constants=True) == 0\n and node.inputs[1].owner\n and isinstance(node.inputs[1].owner.op, Shape_i)\n ):\n res = tt.zeros_like(node.inputs[1], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)\n if (\n isinstance(node.op.scalar_op, ts.LT)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Elemwise)\n and isinstance(node.inputs[0].owner.op.scalar_op, ts.Add)\n and all(\n [\n isinstance(var.owner and var.owner.op, Shape_i)\n for var in node.inputs[0].owner.inputs\n ]\n )\n and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)\n if (\n isinstance(node.op.scalar_op, ts.GE)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Elemwise)\n and isinstance(node.inputs[0].owner.op.scalar_op, ts.Add)\n and all(\n [\n isinstance(var.owner and var.owner.op, Shape_i)\n for var in node.inputs[0].owner.inputs\n ]\n )\n and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[EQ](Subtensor(Shape(x)), -N)\n # Elemwise[EQ](somegraph that only depend of shape, -N)\n # TODO: handle the case where the -N is on either side\n \"\"\"\n |Elemwise{eq,no_inplace} [id B] ''\n | |Subtensor{int64} [id C] ''\n | | |Join [id D] ''\n | | | |TensorConstant{0} [id E]\n | | | |Subtensor{int64:int64:} [id F] ''\n | | | | |Shape [id G] ''\n \"\"\"\n\n def investigate(node):\n \" Return True if values will be shapes, so >= 0\"\n if isinstance(node.op, (Shape, Shape_i)):\n return True\n elif isinstance(node.op, Subtensor) and node.inputs[0].owner:\n return investigate(node.inputs[0].owner)\n elif isinstance(node.op, Join):\n return all(v.owner and investigate(v.owner) for v in node.inputs[1:])\n elif isinstance(node.op, MakeVector):\n return all(v.owner and investigate(v.owner) for v in node.inputs)\n\n if (\n isinstance(node.op.scalar_op, ts.EQ)\n and node.inputs[0].owner\n and investigate(node.inputs[0].owner)\n ):\n try:\n cst = get_scalar_constant_value(node.inputs[1], only_process_constants=True)\n\n res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)\n\n if cst < 0:\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n\n return [res]\n\n except NotScalarConstantError:\n pass\n return\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Sum, Prod])\ndef local_sum_prod_div_dimshuffle(node):\n \"\"\"\n sum(a / dimshuffle{...}(b), axis=l) -> sum(a, axis={...}) / b,\n if dimension l of the DimShuffle is 'x'\n\n or\n\n prod(a / dimshuffle{...}(b), axis=l) ->\n prod(a, axis={...}) / b ** a.shape[l],\n if dimension l of the DimShuffle is 'x'\n \"\"\"\n\n # It does not make much sense now to extend it to the case where the\n # dimshuffle is in the numerator, since elemwise inversion of the\n # denominator would still be needed before the summation or production.\n\n if isinstance(node.op, (Sum, Prod)):\n axis = node.op.axis\n if axis is None:\n axis = list(range(node.inputs[0].ndim))\n node_input = node.inputs[0]\n if node_input.owner and node_input.owner.op == true_div:\n numerator, denominator = node_input.owner.inputs\n\n # Old, bugged logic, reproduced here only to warn users\n if (\n config.warn.sum_div_dimshuffle_bug\n and isinstance(node.op, Sum)\n and numerator.owner\n and isinstance(numerator.owner.op, tt.DimShuffle)\n ):\n # Check compatibility\n new_order = numerator.owner.op.new_order\n compatible_dims = True\n for ax in axis:\n if len(new_order) <= ax or new_order[ax] != \"x\":\n compatible_dims = False\n break\n\n if compatible_dims:\n _logger.warning(\n \"WARNING: Your current code is fine, but\"\n \" Aesara versions between \"\n \"rev. 3bd9b789f5e8 (2010-06-16) and\"\n \" cfc6322e5ad4 (2010-08-03) would \"\n \"have given an incorrect result. \"\n \"To disable this warning, set the Aesara\"\n \" flag warn.sum_div_dimshuffle_bug to\"\n \" False.\"\n )\n\n if denominator.owner and isinstance(denominator.owner.op, tt.DimShuffle):\n dimshuffle_input = denominator.owner.inputs[0]\n dimshuffle_order = denominator.owner.op.new_order\n\n compatible_dims = []\n incompatible_dims = []\n for ax in axis:\n if ax < len(dimshuffle_order) and dimshuffle_order[ax] == \"x\":\n compatible_dims.append(ax)\n else:\n incompatible_dims.append(ax)\n reordered_incompatible_dims = []\n for ic_ax in incompatible_dims:\n reordered_incompatible_dims.append(\n ic_ax - sum([1 for c_ax in compatible_dims if c_ax < ic_ax])\n )\n\n if len(compatible_dims) > 0:\n optimized_dimshuffle_order = list(\n ax\n for i, ax in enumerate(dimshuffle_order)\n if (i not in axis) or (ax != \"x\")\n )\n\n # Removing leading 'x' (since it will be done automatically)\n while (\n len(optimized_dimshuffle_order) > 0\n and optimized_dimshuffle_order[0] == \"x\"\n ):\n del optimized_dimshuffle_order[0]\n\n # if optimized_dimshuffle_order is sorted with\n # not 'x', then dimshuffle is useless.\n if all(i == e for i, e in enumerate(optimized_dimshuffle_order)):\n optimized_dimshuffle = dimshuffle_input\n else:\n optimized_dimshuffle = tt.DimShuffle(\n dimshuffle_input.type.broadcastable,\n optimized_dimshuffle_order,\n )(dimshuffle_input)\n\n if config.warn.sum_div_dimshuffle_bug and isinstance(\n node.op, Sum\n ):\n _logger.warning(\n \"WARNING: Your current code is fine,\"\n \" but Aesara versions between \"\n \"rev. 3bd9b789f5e8 (2010-06-16) and\"\n \" cfc6322e5ad4 (2010-08-03) would \"\n \"have given an incorrect result. \"\n \"To disable this warning, set the\"\n \" Aesara flag \"\n \"warn.sum_div_dimshuffle_bug\"\n \" to False.\"\n )\n\n if isinstance(node.op, Sum):\n op_on_compatible_dims = tt.sum(numerator, axis=compatible_dims)\n rval = true_div(op_on_compatible_dims, optimized_dimshuffle)\n if len(reordered_incompatible_dims) > 0:\n rval = tt.sum(rval, axis=reordered_incompatible_dims)\n elif isinstance(node.op, Prod):\n op_on_compatible_dims = tt.prod(numerator, axis=compatible_dims)\n dtype = numerator.dtype\n rval = true_div(\n op_on_compatible_dims,\n (\n optimized_dimshuffle\n ** tt.prod(\n [\n numerator.shape[ax].astype(dtype)\n for ax in compatible_dims\n ]\n )\n ),\n )\n if len(reordered_incompatible_dims) > 0:\n rval = tt.prod(rval, axis=reordered_incompatible_dims)\n return [rval]\n\n\n@register_canonicalize\n@local_optimizer([Sum, Prod])\ndef local_sum_prod_all_to_none(node):\n \"\"\"\n Sum{0,1,...N} -> Sum{} or\n Prod{0,1,...N} -> Prod{}\n\n \"\"\"\n if isinstance(node.op, Sum) or isinstance(node.op, Prod):\n opt_type = Sum if isinstance(node.op, Sum) else Prod\n # if all the axes are named, then use None as a shorthand\n # this permits more merging\n if node.op.axis is None:\n return\n if set(node.op.axis) == set(range(node.inputs[0].type.ndim)):\n return [opt_type(axis=None, dtype=node.op.dtype)(node.inputs[0])]\n\n\n@register_canonicalize\n@local_optimizer([Sum, Prod])\ndef local_op_of_op(node):\n \"\"\"\n Prod(Prod()) -> single Prod()\n or\n Sum(Sum()) -> single Sum()\n\n \"\"\"\n if isinstance(node.op, Prod) or isinstance(node.op, Sum):\n opt_type = Sum if isinstance(node.op, Sum) else Prod\n (node_inps,) = node.inputs\n out_dtype = node.op.dtype\n # We manipulate the graph so this is done to make sure the opt\n # doesn't affect other computations.\n if len(node_inps.clients) == 1:\n if node_inps.owner and (isinstance(node_inps.owner.op, node.op.__class__)):\n\n # check to see either the inner or outer prod is doing a\n # product over all axis, in which case we can remove it\n if node_inps.owner.op.axis is None or node.op.axis is None:\n return [opt_type(None, dtype=out_dtype)(node_inps.owner.inputs[0])]\n\n # figure out which axes were in the original sum\n newaxis = list(tuple(node_inps.owner.op.axis))\n for i in node.op.axis:\n new_i = i\n for ii in node_inps.owner.op.axis:\n if new_i >= ii:\n new_i += 1\n assert new_i not in newaxis\n newaxis.append(new_i)\n\n assert len(newaxis) == len(\n list(node_inps.owner.op.axis) + list(node.op.axis)\n )\n\n # The old bugged logic. We keep it there to generate a warning\n # when we generated bad code.\n alldims = list(range(node_inps.owner.inputs[0].type.ndim))\n alldims = [\n d for i, d in enumerate(alldims) if i in node_inps.owner.op.axis\n ]\n alldims = [d for i, d in enumerate(alldims) if i in node.op.axis]\n newaxis_old = [\n i\n for i in range(node_inps.owner.inputs[0].type.ndim)\n if i not in alldims\n ]\n\n if (\n aesara.config.warn.sum_sum_bug\n and newaxis != newaxis_old\n and len(newaxis) == len(newaxis_old)\n ):\n _logger.warning(\n \"WARNING (YOUR CURRENT CODE IS FINE): Aesara \"\n \"versions between version 9923a40c7b7a and August \"\n \"2nd, 2010 generated bugged code in this case. \"\n \"This happens when there are two consecutive sums \"\n \"in the graph and the intermediate sum is not \"\n \"used elsewhere in the code. Some safeguard \"\n \"removed some bad code, but not in all cases. You \"\n \"are in one such case. To disable this warning \"\n \"(that you can safely ignore since this bug has \"\n \"been fixed) set the aesara flag \"\n \"`warn.sum_sum_bug` to False.\"\n )\n\n combined = opt_type(newaxis, dtype=out_dtype)\n return [combined(node_inps.owner.inputs[0])]\n\n\nALL_REDUCE = [\n CAReduce,\n All,\n Any,\n Sum,\n Prod,\n ProdWithoutZeros,\n] + CAReduce.__subclasses__()\n\n\n@register_canonicalize\n@register_uncanonicalize # Needed for MaxAndArgmax -> CAReduce\n@local_optimizer(ALL_REDUCE)\ndef local_reduce_join(node):\n \"\"\"\n Reduce{scalar.op}(Join(axis=0, a, b), axis=0) -> Elemwise{scalar.op}(a, b)\n\n Notes\n -----\n Supported scalar.op are Maximum, Mimimum in some cases and Add and Mul in\n all cases.\n\n Currently we must reduce on axis 0. It is probably extensible to the case\n where we join and reduce on the same set of axis.\n\n \"\"\"\n if (\n isinstance(node.op, tt.CAReduce)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Join)\n ):\n join = node.inputs[0].owner\n if tt.extract_constant(join.inputs[0], only_process_constants=True) != 0:\n return\n\n if isinstance(node.op.scalar_op, (ts.Maximum, ts.Minimum)):\n # Support only 2 inputs for now\n if len(join.inputs) != 3:\n return\n elif not isinstance(node.op.scalar_op, (ts.Add, ts.Mul)):\n return\n elif len(join.inputs) <= 2:\n # This is a useless join, that will get removed by another opt.\n return\n\n new_inp = []\n for inp in join.inputs[1:]:\n inp = inp.owner\n if not inp:\n return\n if not isinstance(inp.op, DimShuffle) or inp.op.new_order != (\"x\",) + tuple(\n range(inp.inputs[0].ndim)\n ):\n return\n new_inp.append(inp.inputs[0])\n ret = Elemwise(node.op.scalar_op)(*new_inp)\n\n if ret.dtype != node.outputs[0].dtype:\n # The reduction do something about the dtype.\n return\n\n reduce_axis = node.op.axis\n if reduce_axis is None:\n reduce_axis = tuple(range(node.inputs[0].ndim))\n\n # I put this warning late to don't add extra warning.\n if len(reduce_axis) != 1 or 0 not in reduce_axis:\n if aesara.config.warn.reduce_join:\n warnings.warning(\n \"Your current code is fine, but Aesara versions \"\n \"prior to 0.7 (or this development version Sept 2014) \"\n \"might have given an incorrect result for this code. \"\n \"To disable this warning, set the Aesara flag \"\n \"warn.reduce_join to False. The problem was an \"\n \"optimization, that modified the pattern \"\n '\"Reduce{scalar.op}(Join(axis=0, a, b), axis=0)\", '\n \"did not check the reduction axis. So if the \"\n \"reduction axis was not 0, you got a wrong answer.\"\n )\n return\n\n # We add the new check late to don't add extra warning.\n try:\n join_axis = get_scalar_constant_value(\n join.inputs[0], only_process_constants=True\n )\n\n if join_axis != reduce_axis[0]:\n return\n except NotScalarConstantError:\n return\n\n return [ret]\n\n\n@register_canonicalize(\"fast_compile\", \"local_cut_useless_reduce\")\n@register_useless(\"local_cut_useless_reduce\")\n@local_optimizer(ALL_REDUCE)\ndef local_useless_reduce(node):\n \"\"\"Sum(a, axis=[]) -> a \"\"\"\n if isinstance(node.op, tt.CAReduce):\n (summed,) = node.inputs\n # if reduce were doing anything, the output ndim would be reduced\n if summed.type == node.outputs[0].type:\n return [summed]\n\n\n@register_canonicalize\n@register_uncanonicalize\n@register_specialize\n@local_optimizer(ALL_REDUCE)\ndef local_reduce_broadcastable(node):\n \"\"\"Remove reduction over broadcastable dimensions.\"\"\"\n if isinstance(node.op, tt.CAReduce):\n (reduced,) = node.inputs\n odtype = node.outputs[0].dtype\n if node.op.axis is None:\n if all(reduced.broadcastable):\n return [reduced.dimshuffle().astype(odtype)]\n else:\n axis = list(node.op.axis)\n cuttable = [a for a in axis if reduced.broadcastable[a]]\n if cuttable:\n # -- we can remove some axes of summation,\n # which simplifies the codegen for sum, especially on GPU\n new_axis = []\n pattern = []\n ii = 0\n for p in range(reduced.ndim):\n if p not in cuttable:\n if p in axis:\n new_axis.append(ii)\n pattern.append(p)\n ii += 1\n new_reduced = reduced.dimshuffle(*pattern)\n if new_axis:\n if type(node.op) == CAReduce:\n # This happen for tt.max(), tt.min()\n new_op = node.op.__class__(node.op.scalar_op, axis=new_axis)\n else:\n new_op = node.op.__class__(axis=new_axis)\n return [new_op(new_reduced)]\n else:\n # -- in this case we can remove the reduction completely\n return [new_reduced.astype(odtype)]\n\n\n@register_specialize\n@local_optimizer([Sum, Prod])\ndef local_opt_alloc(node):\n \"\"\"\n sum(alloc(constant,shapes...)) => constant*prod(shapes)\n or\n prod(alloc(constant,shapes...)) => constant**prod(shapes)\n\n \"\"\"\n if isinstance(node.op, Sum) or isinstance(node.op, Prod):\n (node_inps,) = node.inputs\n if node_inps.owner and isinstance(node_inps.owner.op, Alloc):\n input = node_inps.owner.inputs[0]\n shapes = node_inps.owner.inputs[1:]\n try:\n val = get_scalar_constant_value(input, only_process_constants=True)\n assert val.size == 1\n val = val.reshape(1)[0]\n # check which type of op\n size = mul(*shapes)\n if input.dtype in [\"float16\", \"float32\"]:\n # shapes are ints and normally int64.\n # We don't want to have a float64 upcast\n # We don't want to downcast to float16\n # as we fear it could loose too much precision\n # that will be amplified by the mul/pow below.\n size = size.astype(\"float32\")\n if node.op.axis is None or node.op.axis == tuple(range(input.ndim)):\n if isinstance(node.op, Sum):\n val = val * size\n else:\n val = val ** size\n # Sum can change the input dtype (upcast or bool\n # -> float32) by default or by user request.\n # We can ignore the acc_dtype, as there is only 1\n # elemwise we will do and not a sequence, so there is no\n # accumulation of errors.\n # So mostly, we just need to cast the output to the old\n # dtype.\n val = val.astype(node.outputs[0].dtype)\n return [val]\n to_prod = [shapes[i] for i in range(len(shapes)) if i in node.op.axis]\n if to_prod:\n size = mul(*to_prod)\n if isinstance(node.op, Sum):\n val *= size\n else:\n val = val ** size\n # See comments above.\n val = val.astype(node.outputs[0].dtype)\n return [\n alloc(\n val,\n *[\n shapes[i]\n for i in range(len(shapes))\n if i not in node.op.axis\n ],\n )\n ]\n except NotScalarConstantError:\n pass\n\n\n@register_specialize\n@local_optimizer([neg])\ndef local_neg_neg(node):\n # other specializations shouldn't put this in,\n # but sometimes they do\n if node.op == neg:\n if node.inputs[0].owner and node.inputs[0].owner.op == neg:\n return [node.inputs[0].owner.inputs[0]]\n\n\n@register_specialize\n@local_optimizer([neg])\ndef local_neg_div_neg(node):\n \"\"\"\n - (-a / b) -> a / b\n\n Also performs - (c / b) -> ((-c) / b) when c is a scalar constant.\n\n \"\"\"\n if node.op == neg:\n if node.inputs[0].owner and node.inputs[0].owner.op == true_div:\n frac = node.inputs[0]\n num, denom = frac.owner.inputs\n if num.owner and num.owner.op == neg:\n if len(frac.clients) == 1:\n # No other clients of the original division\n new_num = num.owner.inputs[0]\n return [true_div(new_num, denom)]\n elif np.all(num.broadcastable) and isinstance(num, Constant):\n if len(frac.clients) == 1:\n new_num = -num.data\n return [true_div(new_num, denom)]\n\n\n@local_optimizer([mul])\ndef local_mul_zero(node):\n \"\"\"\n As part of canonicalization, we replace multiplication by zero\n with zero.\n\n \"\"\"\n if node.op == mul:\n otype = node.outputs[0].type\n\n for i in node.inputs:\n try:\n value = get_scalar_constant_value(i)\n except NotScalarConstantError:\n continue\n # print 'MUL by value', value, node.inputs\n if value == 0:\n # print '... returning zeros'\n return _fill_chain(aesara._asarray(0, dtype=otype.dtype), node.inputs)\n\n\nregister_canonicalize(local_mul_zero)\n\n\n@local_optimizer([true_div])\ndef local_div_to_inv(node):\n if node.op == true_div and np.all(\n local_mul_canonizer.get_constant(node.inputs[0]) == 1.0\n ):\n out = node.outputs[0]\n new_out = inv(local_mul_canonizer.merge_num_denum(node.inputs[1:], []))\n # The ones could have forced upcasting\n if new_out.dtype != out.dtype:\n new_out = tt.cast(new_out, dtype=out.dtype)\n # The ones could have forced a specific length\n if new_out.type != out.type:\n new_out = broadcast_like(new_out, out, node.fgraph)\n return [new_out]\n else:\n return False\n\n\nregister_specialize(local_div_to_inv)\n\n\n@local_optimizer([inv])\ndef local_inv_canon(node):\n if node.op == inv:\n return [pow(node.inputs[0], -1.0)]\n else:\n return False\n\n\nregister_canonicalize(local_inv_canon)\n\n\n@local_optimizer([pow])\ndef local_pow_canonicalize(node):\n if node.op == pow:\n cst = local_mul_canonizer.get_constant(node.inputs[1])\n if cst == 0:\n return [broadcast_like(1, node.outputs[0], node.fgraph)]\n if cst == 1:\n return [broadcast_like(node.inputs[0], node.outputs[0], node.fgraph)]\n else:\n return False\n\n\nregister_canonicalize(local_pow_canonicalize)\n\n\n@register_specialize\n@local_optimizer([mul])\ndef local_mul_to_sqr(node):\n \"\"\"\n x*x -> sqr(x)\n\n This is faster on the GPU when memory fetching is a big part of\n the computation time.\n\n \"\"\"\n if node.op == mul:\n if len(node.inputs) == 2:\n if node.inputs[0] is node.inputs[1]:\n return [tt.sqr(node.inputs[0])]\n\n\n@register_canonicalize\n@local_optimizer([int_div])\ndef local_intdiv_by_one(node):\n \"\"\"x // 1 -> x\"\"\"\n if node.op in [int_div]:\n if isinstance(node.inputs[1], tt.TensorConstant) and np.all(\n node.inputs[1].value == 1\n ):\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([int_div, true_div])\ndef local_zero_div(node):\n \"\"\"0 / x -> 0\"\"\"\n if isinstance(node.op, Elemwise) and isinstance(\n node.op.scalar_op, (ts.IntDiv, ts.TrueDiv)\n ):\n if local_mul_canonizer.get_constant(node.inputs[0]) == 0:\n ret = broadcast_like(0, node.outputs[0], node.fgraph)\n ret.tag.values_eq_approx = values_eq_approx_remove_nan\n return [ret]\n\n\n@local_optimizer([pow])\ndef local_pow_specialize(node):\n # here, we are past the point of canonicalization, so we don't want\n # to put in un-necessary fills.\n if node.op == pow:\n # the idea here is that we have pow(x, y)\n odtype = node.outputs[0].dtype\n xsym = node.inputs[0]\n ysym = node.inputs[1]\n y = local_mul_canonizer.get_constant(ysym)\n if (y is not None) and encompasses_broadcastable(\n xsym.type.broadcastable, ysym.type.broadcastable\n ):\n rval = None\n\n if np.all(y == 2):\n rval = [tt.sqr(xsym)]\n if np.all(y == 1):\n rval = [xsym]\n if np.all(y == 0):\n rval = [fill(xsym, np.asarray(1, dtype=odtype))]\n if np.all(y == 0.5):\n rval = [tt.sqrt(xsym)]\n if np.all(y == -0.5):\n rval = [inv(tt.sqrt(xsym))]\n if np.all(y == -1):\n rval = [inv(xsym)]\n if np.all(y == -2):\n rval = [inv(tt.sqr(xsym))]\n if rval:\n rval[0] = tt.cast(rval[0], odtype)\n assert rval[0].type == node.outputs[0].type, (rval, node.outputs)\n return rval\n else:\n return False\n\n\nregister_specialize(local_pow_specialize)\n\n\n@register_specialize_device\n@local_optimizer([pow])\ndef local_pow_specialize_device(node):\n \"\"\"\n This optimization is not the same on all device. We do it only on cpu here.\n \"\"\"\n if node.op == pow:\n # the idea here is that we have pow(x, y)\n odtype = node.outputs[0].dtype\n xsym = node.inputs[0]\n ysym = node.inputs[1]\n y = local_mul_canonizer.get_constant(ysym)\n\n # the next line is needed to fix a strange case that I don't\n # know how to make a separate test.\n # That happen in the test_opt.py:test_log_erfc test.\n # y is a ndarray with dtype int8 and value 2,4 or 6. This make\n # the abs(y) <= 512 fail!\n # taking the value outside ndarray solve the problem.\n # it could be that in that case, numpy make the comparaison\n # into the wrong type(do in int8 that overflow.)\n if isinstance(y, np.ndarray):\n assert y.size == 1\n try:\n y = y[0]\n except IndexError:\n pass\n if (y is not None) and encompasses_broadcastable(\n xsym.type.broadcastable, ysym.type.broadcastable\n ):\n rval = None\n # 512 is too small for the cpu and too big for some gpu!\n if abs(y) == int(abs(y)) and abs(y) <= 512:\n pow2 = [xsym]\n pow2_scal = [ts.get_scalar_type(xsym.dtype)()]\n y_to_do = abs(y)\n for i in range(int(np.log2(y_to_do))):\n pow2.append(tt.sqr(pow2[i]))\n pow2_scal.append(ts.sqr(pow2_scal[i]))\n rval1 = None\n rval1_scal = None\n while y_to_do > 0:\n log_to_do = int(np.log2(y_to_do))\n if rval1:\n rval1 *= pow2[log_to_do]\n rval1_scal *= pow2_scal[log_to_do]\n else:\n rval1 = pow2[log_to_do]\n rval1_scal = pow2_scal[log_to_do]\n y_to_do -= 2 ** log_to_do\n\n if abs(y) > 2:\n # We fuse all the pow together here to make\n # compilation faster\n rval1 = Elemwise(\n ts.Composite([pow2_scal[0]], [rval1_scal])\n ).make_node(xsym)\n if y < 0:\n rval = [inv(rval1)]\n else:\n rval = [rval1]\n if rval:\n rval[0] = tt.cast(rval[0], odtype)\n assert rval[0].type == node.outputs[0].type, (rval, node.outputs)\n return rval\n\n\n@local_optimizer([mul])\ndef local_mul_specialize(node):\n \"\"\"\n Remove special-case constants from mul arguments and useless neg in inputs.\n\n mul(-1, x) -> neg(x)\n mul(1, x, y) -> mul(x, y)\n mul(0, ...) -> alloc(0, shapes...)\n\n This is not done if we would add more nodes in the graph, like with:\n\n mul(-1, x, y) -/-> neg(mul(x, y))\n\n \"\"\"\n # here, we are past the point of canonicalization, so we don't\n # want to put in un-necessary fills.\n #\n # at this point [post canonicalize], mul() may have many inputs.\n if node.op == mul:\n # the idea here is that we have pow(x, y)\n has_neg = False\n new_inputs = []\n nb_neg_node = 0\n nb_cst = 0\n for input in node.inputs:\n # remove any neg arguments\n while input.owner and input.owner.op == neg:\n has_neg ^= True\n input = input.owner.inputs[0]\n nb_neg_node += 1\n\n # remove special case arguments of 1, -1 or 0\n y = local_mul_canonizer.get_constant(input)\n if y == 1.0:\n nb_cst += 1\n elif y == -1.0:\n nb_cst += 1\n has_neg ^= True # toggles\n elif y == 0.0:\n # if we find any zero, we just return right away\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\n else:\n new_inputs.append(input)\n\n if new_inputs != node.inputs:\n if new_inputs:\n if len(new_inputs) == 1:\n if has_neg:\n if new_inputs[0].dtype in (tt.uint_dtypes + [\"bool\"]):\n return\n else:\n rval = -new_inputs[0]\n else:\n rval = new_inputs[0]\n else:\n # The next case would cause a replace by an equivalent case.\n if has_neg and nb_neg_node == 0 and nb_cst == 1:\n return\n elif has_neg:\n # Don't add an extra neg node as we can't\n # fully replace this mul by a neg.\n m1 = np.asarray(-1, dtype=node.outputs[0].dtype)\n new_inputs = [m1] + new_inputs\n rval = mul(*new_inputs)\n\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\n else:\n # there are no variable inputs to mul\n # N.B. this could have been constant-folded...\n if has_neg:\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\n else:\n return [broadcast_like(1, node.outputs[0], node.fgraph)]\n\n\nregister_specialize(local_mul_specialize)\n\n\n@local_optimizer([add])\ndef local_add_specialize(node):\n def fill_chain(v):\n out = _fill_chain(v, node.inputs)\n return out\n\n # here, we are past the point of canonicalization, so we don't want\n # to put in un-necessary fills.\n if node.op == add:\n new_inputs = []\n for input in node.inputs:\n try:\n y = get_scalar_constant_value(input)\n except NotScalarConstantError:\n y = input\n if np.all(y == 0.0):\n continue\n new_inputs.append(input)\n\n if len(new_inputs) < len(node.inputs):\n dtype = node.outputs[0].type.dtype\n if len(new_inputs) == 0:\n # we got rid of the entire expression!\n ndim = node.outputs[0].type.ndim\n # Reuse call to constant for cache()\n cst = tt.constant(np.zeros((1,) * ndim, dtype=dtype))\n assert cst.type.broadcastable == (True,) * ndim\n return fill_chain(cst)\n\n if len(new_inputs) == 1:\n ret = fill_chain(new_inputs[0])\n else:\n ret = fill_chain(add(*new_inputs))\n # The dtype should not be changed. It can happen if the input\n # that was forcing upcasting was equal to 0.\n if ret[0].dtype != dtype:\n ret = [tt.cast(ret[0], dtype)]\n return ret\n else:\n return False\n\n\nregister_specialize(local_add_specialize)\n\nmul_canonizer = in2out(\n gof.LocalOptGroup(local_mul_canonizer, local_fill_sink, apply_all_opts=True),\n name=\"mul_canonizer_groups\",\n)\n\n\ndef check_for_x_over_absX(numerators, denominators):\n \"\"\"Convert x/abs(x) into sign(x). \"\"\"\n # TODO: this function should dig/search through dimshuffles\n # This won't catch a dimshuffled absolute value\n for den in list(denominators):\n if den.owner and den.owner.op == abs_ and den.owner.inputs[0] in numerators:\n if den.owner.inputs[0].type.dtype.startswith(\"complex\"):\n # TODO: Make an Op that projects a complex number to\n # have unit length but projects 0 to 0. That\n # would be a weird Op, but consistent with the\n # special case below. I heard there's some\n # convention in Matlab that is similar to\n # this... but not sure.\n pass\n else:\n denominators.remove(den)\n numerators.remove(den.owner.inputs[0])\n numerators.append(tt.sgn(den.owner.inputs[0]))\n return numerators, denominators\n\n\nlocal_mul_canonizer.add_simplifier(check_for_x_over_absX, \"X_over_absX\")\n\n\n@register_canonicalize\n@local_optimizer([abs_])\ndef local_abs_lift(node):\n \"\"\"\n Move the abs toward the input.\n\n This is needed for check_for_x_over_absX to apply in more case.\n\n \"\"\"\n if node.op == abs_ and node.inputs[0].owner:\n assert node.nin == 1\n if node.inputs[0].owner.op == mul:\n return [mul(*[abs_(i) for i in node.inputs[0].owner.inputs])]\n if node.inputs[0].owner.op == true_div:\n i = node.inputs[0].owner.inputs\n return [true_div(abs_(i[0]), abs_(i[1]))]\n\n\n@register_specialize\n@local_optimizer([mul, true_div])\ndef local_abs_merge(node):\n \"\"\"\n Merge abs generated by local_abs_lift when the canonizer don't\n need it anymore\n\n \"\"\"\n if node.op == mul and sum([i.owner.op == abs_ for i in node.inputs if i.owner]) > 1:\n inputs = []\n for i in node.inputs:\n if i.owner and i.owner.op == abs_:\n inputs.append(i.owner.inputs[0])\n elif isinstance(i, Constant):\n try:\n const = get_scalar_constant_value(i, only_process_constants=True)\n except NotScalarConstantError:\n return False\n if not (const >= 0).all():\n return False\n inputs.append(i)\n else:\n return False\n return [abs_(mul(*inputs))]\n if (\n node.op == true_div\n and sum([i.owner.op == abs_ for i in node.inputs if i.owner]) == 2\n ):\n return [\n abs_(\n true_div(node.inputs[0].owner.inputs[0], node.inputs[1].owner.inputs[0])\n )\n ]\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log1p(node):\n # log(1+x) -> log1p(x)\n # log(1-x) -> log1p(-x)\n if node.op == log:\n (log_arg,) = node.inputs\n if log_arg.owner and log_arg.owner.op == add:\n scalars, scalar_inputs, nonconsts = scalarconsts_rest(\n log_arg.owner.inputs, only_process_constants=True\n )\n # scalar_inputs are potentially dimshuffled and fill'd scalars\n if scalars and np.allclose(np.sum(scalars), 1):\n if nonconsts:\n if len(nonconsts) > 1:\n ninp = add(*nonconsts)\n else:\n ninp = nonconsts[0]\n if ninp.dtype != log_arg.type.dtype:\n ninp = ninp.astype(node.outputs[0].dtype)\n return _fill_chain(log1p(ninp), scalar_inputs)\n\n elif log_arg.owner and log_arg.owner.op == sub:\n one = tt.extract_constant(\n log_arg.owner.inputs[0], only_process_constants=True\n )\n if one != 1:\n return\n other = log_arg.owner.inputs[1]\n if other.dtype != log_arg.dtype:\n other = other.astype(log_arg.dtype)\n return [log1p(neg(other))]\n\n\n# TODO: in canonicalize, change log10 and log2 -> log\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log_add(node):\n # log(exp(x)+exp(y))\n #\n # Suppose x >= y\n # log(exp(x) + exp(y))\n # log(exp(x) * (1 + exp(y)/exp(x)))\n # x + log(1 + exp(y)/exp(x))\n # x + log1p(exp(y)/exp(x))\n # x + log1p(exp(y-x))\n if node.op == log:\n z = node.inputs[0]\n if z.owner and z.owner.op == add:\n zi = z.owner.inputs\n if len(zi) != 2:\n # -- upgrading Maximum to handle multiple inputs wasn't trivial\n # TODO\n # raise NotImplementedError()\n return\n pre_exp = [\n x.owner.inputs[0] for x in zi if x.owner and x.owner.op == tt.exp\n ]\n if len(pre_exp) == len(zi):\n # all arguments to add are exp(<something>)\n max_pre = tt.maximum(*pre_exp)\n\n ret = max_pre + log1p(tt.exp(add(*[p - max_pre for p in pre_exp])))\n ret.tag.values_eq_approx = values_eq_approx_remove_inf\n return [ret]\n\n\n@local_optimizer([log])\ndef local_log_sum_exp(node):\n # log(sum_i(exp(x_i))) = x_max + log(sum_i(exp(x_i - x_max)))\n\n if node.op != log:\n return\n\n sum_node = node.inputs[0].owner\n # If the sum has keepdims=True, there might be a dimshuffle\n if sum_node and isinstance(sum_node.op, tt.DimShuffle):\n dimshuffle_op = sum_node.op\n sum_node = sum_node.inputs[0].owner\n else:\n dimshuffle_op = None\n\n if not sum_node or not isinstance(sum_node.op, Sum):\n return\n\n exp_node, axis = sum_node.inputs[0].owner, sum_node.op.axis\n if not exp_node or not (\n isinstance(exp_node.op, Elemwise) and isinstance(exp_node.op.scalar_op, ts.Exp)\n ):\n return\n\n pre_exp = exp_node.inputs[0]\n max_pre_exp = tt.max(pre_exp, axis=axis)\n max_pre_exp_keepdims = tt.makeKeepDims(pre_exp, max_pre_exp, axis)\n\n ret = max_pre_exp + log(tt.sum(tt.exp(pre_exp - max_pre_exp_keepdims), axis=axis))\n\n # Restore the dimshuffle op, if any.\n if dimshuffle_op:\n ret = dimshuffle_op(ret)\n\n return [ret]\n\n\ncompile.optdb.register(\n \"local_log_sum_exp\",\n in2out(local_log_sum_exp, ignore_newtrees=True),\n 1.6,\n \"fast_run\",\n)\n\n\ndef add_calculate(num, denum, aslist=False, out_type=None):\n # TODO: make sure that this function and mul_calculate are similar\n if out_type is None:\n zero = 0.0\n else:\n zero = aesara._asarray(0, dtype=out_type.dtype)\n # zero = 0.0 if out_type is None else aesara._asarray(0,\n # dtype=out_type.dtype)\n if out_type and out_type.dtype == \"bool\":\n if len(denum) == 0:\n # NumPy 1.14 do not accept to do \"bool - bool\"\n v = reduce(np.add, num, zero)\n else:\n raise Exception(\n \"bool subtraction not supported. This should not happen as\"\n \" an earlier error should have been raised\"\n )\n else:\n v = reduce(np.add, num, zero) - reduce(np.add, denum, zero)\n if aslist:\n if np.all(v == 0):\n return []\n else:\n return [v]\n return v\n\n\nlocal_add_canonizer = Canonizer(add, sub, neg, add_calculate)\nadd_canonizer = in2out(\n gof.LocalOptGroup(local_add_canonizer, local_fill_sink, apply_all_opts=True),\n name=\"add_canonizer_group\",\n)\n\n\nregister_canonicalize(local_add_canonizer, name=\"local_add_canonizer\")\n\n\n##################\n# Distributivity #\n##################\n\n\ndef distribute_greedy(pos_pairs, neg_pairs, num, denum, out_type, minscore=0):\n # each pair in pos_pairs and neg_pairs is a num/denum pair. this\n # function attempts to add num and denum to the corresponding parts\n # of each pair, and counts how many multiplications/divisions can\n # be saved in that way.\n\n # each division is counted like div_cost multiplications\n # (typically, division costs more so we are willing to multiply more\n # in order to divide less)\n # 1.5 was obtained through an informal test and may very well be\n # platform dependent\n div_cost = 1.5\n\n # score is number of operations saved, higher is better\n score = len(num) + div_cost * len(denum)\n new_pos_pairs = list(\n itertools.starmap(\n local_mul_canonizer.simplify,\n [(n + num, d + denum, out_type) for (n, d) in pos_pairs],\n )\n )\n new_neg_pairs = list(\n itertools.starmap(\n local_mul_canonizer.simplify,\n [(n + num, d + denum, out_type) for (n, d) in neg_pairs],\n )\n )\n for (n, d), (nn, dd) in zip(pos_pairs + neg_pairs, new_pos_pairs + new_neg_pairs):\n # We calculate how many operations we are saving with the new\n # num and denum\n score += len(n) + div_cost * len(d) - len(nn) - div_cost * len(dd)\n if score <= minscore:\n # the change is not applied because it adds too many operations\n return False, pos_pairs, neg_pairs\n return True, new_pos_pairs, new_neg_pairs\n\n\ndef attempt_distribution(factor, num, denum, out_type):\n \"\"\"Try to insert each `num` and each `denum` in the factor?\n\n Returns\n -------\n changes?, new_factor, new_num, new_denum\n If there are changes, `new_num` and `new_denum` contain all the\n numerators and denominators that could not be distributed in the factor\n\n \"\"\"\n pos_terms, neg_terms = local_add_canonizer.get_num_denum(factor)\n if len(pos_terms) == 1 and not neg_terms:\n return False, factor, num, denum\n pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos_terms))\n neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg_terms))\n change = False\n for n in list(num):\n success, pos_pairs, neg_pairs = distribute_greedy(\n pos_pairs, neg_pairs, [n], [], out_type\n )\n if success:\n change = True\n num.remove(n)\n for d in list(denum):\n success, pos_pairs, neg_pairs = distribute_greedy(\n pos_pairs, neg_pairs, [], [d], out_type\n )\n if success:\n change = True\n denum.remove(d)\n if not change:\n return change, factor, num, denum\n else:\n return (\n change,\n local_add_canonizer.merge_num_denum(\n list(itertools.starmap(local_mul_canonizer.merge_num_denum, pos_pairs)),\n list(itertools.starmap(local_mul_canonizer.merge_num_denum, neg_pairs)),\n ),\n num,\n denum,\n )\n\n\n@register_canonicalize\n@register_stabilize\n@local_optimizer([mul, true_div, inv])\ndef local_greedy_distributor(node):\n \"\"\"\n Optimize by reducing the number of multiplications and/or divisions.\n\n This optimization tries to apply distributivity of multiplication\n to addition in order to reduce the number of multiplications\n and/or divisions that must be done. The algorithm weighs division\n more than multiplication to account for the former's slightly\n greater computational cost.\n\n The following expressions are simplified:\n 1. ((a/x + b/y) * x * y) --> a*y + b*x\n 2. ((a/x + b) * x) --> a + b*x\n 3. There are other forms too where node is a true_div.\n\n The following expressions are not simplified:\n 4. ((a + b) * x) -/-> a*x + b*x\n\n This optimization aims to reduce computational cost. It may also\n increase numerical stability, e.g. when x and/or y tend to 0 in\n example 1.\n\n \"\"\"\n\n out = node.outputs[0]\n num, denum = local_mul_canonizer.get_num_denum(out)\n if len(num) == 1 and not denum:\n return False\n\n new_num, new_denum = [], []\n\n change = False\n\n out_type = out.type\n for candidate in list(num):\n if candidate not in num:\n continue\n num.remove(candidate)\n _change, candidate, num, denum = attempt_distribution(\n candidate,\n num,\n denum,\n out_type,\n )\n\n change |= _change\n new_num.append(candidate)\n\n for candidate in list(denum):\n if candidate not in denum:\n continue\n denum.remove(candidate)\n _change, candidate, denum, num = attempt_distribution(\n candidate, denum, num, out_type\n )\n change |= _change\n new_denum.append(candidate)\n if not change:\n return False\n\n new_num += num\n new_denum += denum\n\n rval = local_mul_canonizer.merge_num_denum(new_num, new_denum)\n\n if not (rval.type == out.type):\n # WHY DOES THIS HAPPEN?\n return False\n\n return [rval]\n\n\n@local_optimizer(None)\ndef constant_folding(node):\n for input in node.inputs:\n if not isinstance(input, Constant):\n return False\n # condition: all inputs are constant\n if not node.op.do_constant_folding(node):\n # The op asks not to be constant folded.\n return False\n\n storage_map = {i: [i.data] for i in node.inputs}\n compute_map = {i: [True] for i in node.inputs}\n for o in node.outputs:\n storage_map[o] = [None]\n compute_map[o] = [False]\n impl = None\n if hasattr(node.op, \"python_constant_folding\") and node.op.python_constant_folding(\n node\n ):\n impl = \"py\"\n thunk = node.op.make_thunk(\n node, storage_map, compute_map, no_recycling=[], impl=impl\n )\n\n required = thunk()\n assert not required # a node whose inputs are all provided should always\n # return successfully\n rval = []\n for output in node.outputs:\n assert compute_map[output][0], (output, storage_map[output][0])\n try:\n constant = output.type.Constant\n except AttributeError:\n constant = Constant\n\n v = constant(output.type, storage_map[output][0])\n copy_stack_trace(output, v)\n\n rval.append(v)\n return rval\n\n\ntopo_constant_folding = in2out(\n constant_folding, ignore_newtrees=True, name=\"topo_constant_folding\"\n)\nregister_canonicalize(topo_constant_folding, \"fast_compile\", final_opt=True)\nregister_uncanonicalize(topo_constant_folding, \"fast_compile\", final_opt=True)\nregister_stabilize(topo_constant_folding, \"fast_compile\", final_opt=True)\nregister_specialize(topo_constant_folding, \"fast_compile\", final_opt=True)\n\n\ndef get_clients(node):\n \"\"\"\n Used by erf/erfc opt to track less frequent op.\n\n \"\"\"\n return [c for c, i in node.outputs[0].clients if c != \"output\"]\n\n\ndef get_clients2(node):\n \"\"\"\n Used by erf/erfc opt to track less frequent op.\n\n \"\"\"\n l = []\n for c, i in node.outputs[0].clients:\n if c != \"output\":\n for var in c.outputs:\n l.extend([cc for cc, ii in var.clients if cc != \"output\"])\n return l\n\n\n# 1+erf(x)=>erfc(-x)\nlocal_one_plus_erf = PatternSub(\n (add, 1, (erf, \"x\")),\n (erfc, (neg, \"x\")),\n allow_multiple_clients=True,\n name=\"local_one_plus_erf\",\n tracks=[erf],\n get_nodes=get_clients,\n)\nregister_canonicalize(local_one_plus_erf)\nregister_stabilize(local_one_plus_erf)\nregister_specialize(local_one_plus_erf)\n\n# 1-erf(x)=>erfc(x)\nlocal_one_minus_erf = PatternSub(\n (sub, 1, (erf, \"x\")),\n (erfc, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erf\",\n)\nregister_canonicalize(local_one_minus_erf)\nregister_stabilize(local_one_minus_erf)\nregister_specialize(local_one_minus_erf)\n\nlocal_one_minus_erf2 = PatternSub(\n (add, 1, (mul, -1, (erf, \"x\"))),\n (erfc, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erf2\",\n)\nregister_canonicalize(local_one_minus_erf2)\nregister_stabilize(local_one_minus_erf2)\nregister_specialize(local_one_minus_erf2)\n\n# 1+(-erf(x))=>erfc(x) This is a different graph then the previous as\n# the canonicalize don't work completly\nlocal_one_plus_neg_erf = PatternSub(\n (add, 1, (neg, (erf, \"x\"))),\n (erfc, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_plus_neg_erf\",\n tracks=[erf],\n get_nodes=get_clients2,\n)\nregister_canonicalize(local_one_plus_neg_erf)\nregister_stabilize(local_one_plus_neg_erf)\nregister_specialize(local_one_plus_neg_erf)\n\n# (-1)+erf(x) => -erfc(x) don't need erf(x)+(-1) as the canonicalize\n# will put the -1 as the first argument.\nlocal_erf_minus_one = PatternSub(\n (add, -1, (erf, \"x\")),\n (neg, (erfc, \"x\")),\n allow_multiple_clients=True,\n name=\"local_erf_minus_one\",\n tracks=[erf],\n get_nodes=get_clients,\n)\nregister_canonicalize(local_erf_minus_one)\nregister_stabilize(local_erf_minus_one)\nregister_specialize(local_erf_minus_one)\n\n# 1-erfc(x) => erf(x)\nlocal_one_minus_erfc = PatternSub(\n (sub, 1, (erfc, \"x\")),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erfc\",\n tracks=[erfc],\n get_nodes=get_clients,\n)\nregister_canonicalize(local_one_minus_erfc)\nregister_stabilize(local_one_minus_erfc)\nregister_specialize(local_one_minus_erfc)\n\nlocal_one_minus_erfc2 = PatternSub(\n (add, 1, (neg, (erfc, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erfc2\",\n tracks=[erfc],\n get_nodes=get_clients2,\n)\nregister_canonicalize(local_one_minus_erfc2)\nregister_stabilize(local_one_minus_erfc2)\nregister_specialize(local_one_minus_erfc2)\n\nlocal_one_minus_erfc3 = PatternSub(\n (add, 1, (mul, -1, (erfc, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erfc3\",\n tracks=[erfc],\n get_nodes=get_clients2,\n)\nregister_canonicalize(local_one_minus_erfc3)\nregister_stabilize(local_one_minus_erfc3)\nregister_specialize(local_one_minus_erfc3)\n\n# 1+(-erfc(x)) => erf(x) This is a different graph then the previous as\n# the canonicalize don't work completly\nlocal_one_add_neg_erfc = PatternSub(\n (add, 1, (neg, (erfc, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_add_neg_erfc\",\n tracks=[erfc],\n get_nodes=get_clients2,\n)\n\nregister_canonicalize(local_one_add_neg_erfc)\nregister_stabilize(local_one_add_neg_erfc)\nregister_specialize(local_one_add_neg_erfc)\n\n# (-1)+erfc(-x)=>erf(x)\nlocal_erf_neg_minus_one = PatternSub(\n (add, -1, (erfc, (neg, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_erf_neg_minus_one\",\n tracks=[erfc],\n get_nodes=get_clients,\n)\nregister_canonicalize(local_erf_neg_minus_one)\nregister_stabilize(local_erf_neg_minus_one)\nregister_specialize(local_erf_neg_minus_one)\n\n# (-1)+erfc(-1*x)=>erf(x)\nlocal_erf_neg_minus_one2 = PatternSub(\n (add, -1, (erfc, (mul, -1, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_erf_neg_minus_one2\",\n tracks=[erfc],\n get_nodes=get_clients,\n)\nregister_canonicalize(local_erf_neg_minus_one2)\nregister_stabilize(local_erf_neg_minus_one2)\nregister_specialize(local_erf_neg_minus_one2)\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log_erfc(node):\n \"\"\"Stability optimization for `log(erfc(x))`.\n\n log(erfc(x)) => when x>threshold,\n -x**2-log(x)-.5*log(pi)+log(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))\n for float64: threshold=26.641747557 was choosed with:\n [(i,numpy.log(scipy.special.erfc(numpy.asarray([i],dtype='float64'))))\n for i in numpy.arange(26.641747557,26.6417475571,.00000000001)]\n for float32: threshold=10.0541949, [(i,numpy.log(scipy.special.erfc(\n numpy.asarray([i],dtype='float32')))) for i in numpy.arange(\n 10.0541948,10.0541951,.0000001)]\n \"\"\"\n if node.op != log:\n return False\n if not node.inputs[0].owner or node.inputs[0].owner.op != erfc:\n return False\n\n if hasattr(node.tag, \"local_log_erfc_applied\"):\n # We use that flag to don't apply the optimization recursively\n return False\n node.tag.local_log_erfc_applied = True\n\n x = node.inputs[0].owner.inputs[0]\n stab_value = (\n -(x ** 2)\n - log(x)\n - 0.5 * log(np.pi)\n + log(1 - 1 / (2 * x ** 2) + 3 / (4 * x ** 4) - 15 / (8 * x ** 6))\n )\n\n if node.outputs[0].dtype == \"float32\" or node.outputs[0].dtype == \"float16\":\n threshold = 10.0541949\n elif node.outputs[0].dtype == \"float64\":\n threshold = 26.641747557\n\n ret = tt.switch(x < threshold, node.outputs[0], stab_value)\n ret.tag.values_eq_approx = values_eq_approx_remove_inf\n return [ret]\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([true_div])\ndef local_grad_log_erfc_neg(node):\n \"\"\"Stability optimization for the grad of `log(erfc(x))`.\n\n ([y*]exp(-(x**2)))/erfc(x) # The y* is optional\n ([y*]exp(x**2))/erfc(-x) => [y*](when x>threashold,\n sqrt(pi)*-x/(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6)))\n\n for float64: threshold=26.63 see at the end of the fct for the explanation\n for float32: threshold=9.3 see at the end of the fct for the explanation\n\n TODO: remove the contraint that there are only 2 inputs to exp(x**2)\n is the second.\n TODO: at the test point 10 in float32, there is instability in the original\n value. The original gives -30.0, the stab -20.1 and in float64 -18.1.\n Make it so that the test does not generate an error in that case!\n\n \"\"\"\n if node.op != true_div:\n return False\n if not node.inputs[1].owner or node.inputs[1].owner.op != erfc:\n return False\n erfc_in = node.inputs[1]\n erfc_x = erfc_in.owner.inputs[0]\n if not node.inputs[0].owner:\n return False\n\n # The mul is optional.\n if node.inputs[0].owner.op != mul:\n mul_in = None\n y = []\n if not node.inputs[0].owner or node.inputs[0].owner.op != tt.exp:\n return False\n exp_in = node.inputs[0]\n else:\n mul_in = node.inputs[0]\n exp_in = None\n for idx, inp in enumerate(mul_in.owner.inputs):\n if inp.owner and inp.owner.op == tt.exp:\n exp_in = inp\n break\n if len(mul_in.owner.inputs) == 2:\n y = [mul_in.owner.inputs[1 - idx]]\n else:\n y = mul_in.owner.inputs[:]\n del y[idx]\n del mul_in\n if not exp_in.owner.inputs[0].owner:\n return False\n\n if exp_in.owner.inputs[0].owner.op == neg:\n neg_in = exp_in.owner.inputs[0]\n if (\n not neg_in.owner.inputs[0].owner\n or neg_in.owner.inputs[0].owner.op != tt.sqr\n ):\n return False\n sqr_in = neg_in.owner.inputs[0]\n x = sqr_in.owner.inputs[0]\n elif exp_in.owner.inputs[0].owner.op == mul:\n # We should compare that -(erfc_x**2) is equivalent to mul_neg.\n # There is currently no easy way to do this in the general case,\n # so we implement some common case for now.\n\n # In many cases the neg are replaced by mul in the graph.\n # This also allows to stabilize log(erfc(cst*x)).\n mul_neg = exp_in.owner.inputs[0]\n\n # In case that multiple mul are not fused together, we do it here.\n def check_input(inputs):\n new_inputs = []\n for i in inputs:\n if i.owner and i.owner.op == mul:\n new_inputs.extend(check_input(i.owner.inputs))\n else:\n new_inputs.append(i)\n return new_inputs\n\n mul_inputs = check_input(mul_neg.owner.inputs)\n\n # Put the constant first.\n for i in range(len(mul_inputs)):\n if isinstance(i, Constant):\n if i == 0:\n break\n else:\n tmp = mul_inputs[0]\n mul_inputs[0] = mul_inputs[i]\n mul_inputs[i] = tmp\n break\n mul_neg = mul(*mul_inputs)\n\n try:\n cst2 = get_scalar_constant_value(\n mul_neg.owner.inputs[0], only_process_constants=True\n )\n except NotScalarConstantError:\n return False\n\n if len(mul_neg.owner.inputs) == 2:\n if (\n not mul_neg.owner.inputs[1].owner\n or mul_neg.owner.inputs[1].owner.op != tt.sqr\n ):\n return False\n sqr_in = mul_neg.owner.inputs[1]\n x = sqr_in.owner.inputs[0]\n elif len(mul_neg.owner.inputs) == 3:\n if mul_neg.owner.inputs[1] is not mul_neg.owner.inputs[2]:\n return False\n x = mul_neg.owner.inputs[1]\n else:\n return False\n\n if cst2 != -1:\n if (\n not erfc_x.owner\n or erfc_x.owner.op != mul\n or len(erfc_x.owner.inputs) != 2\n ):\n # todo implement that case\n return False\n if erfc_x.owner.inputs[1] is not mul_neg.owner.inputs[1]:\n return False\n\n x = erfc_x\n try:\n cst = get_scalar_constant_value(\n erfc_x.owner.inputs[0], only_process_constants=True\n )\n except NotScalarConstantError:\n return False\n if cst2 != -cst * 2:\n return False\n\n # The constant is valid. Must check that the\n elif erfc_x is not x:\n return False\n\n else:\n return False\n\n if hasattr(node.tag, \"local_grad_log_erfc_neg\"):\n # We use that flag to don't apply the optimization recursively\n return False\n\n # we move the y outside the div.\n true_div_no_mul = true_div(exp_in, erfc_in)\n true_div_no_mul.owner.tag.local_grad_log_erfc_neg = True\n\n # aaron value\n stab_value = (\n x\n * pow(1 - 1 / (2 * (x ** 2)) + 3 / (4 * (x ** 4)) - 15 / (8 * (x ** 6)), -1)\n * tt.cast(tt.sqrt(np.pi), dtype=x.dtype)\n )\n\n if x.dtype == \"float32\" or x.dtype == \"float16\":\n threshold = 9.3\n # threshold = 10.1\n elif x.dtype == \"float64\":\n threshold = 26.641747557\n ret = tt.switch(x < threshold, true_div_no_mul, stab_value)\n if y:\n ret = mul(ret, *y)\n ret.tag.values_eq_approx = values_eq_approx_remove_inf_nan\n return [ret]\n\n\ndef local_elemwise_fusion_op(op_class, max_input_fct=lambda node: 32, maker=None):\n \"\"\"Create a recursive function that fuses `Elemwise` `Op`s.\n\n The basic idea is that we loop through an `Elemwise` node's inputs, find\n other `Elemwise` nodes, determine the scalars input types for all of the\n `Elemwise` `Op`s, construct a new scalar `Op` using the scalar input types\n and each `Elemwise`'s scalar `Op`, and use the composite scalar `Op` in a\n new \"fused\" `Elemwise`.\n\n It's parameterized in order to work for `Elemwise` and `GpuElemwise` `Op`s.\n\n Parameters\n ----------\n op_class : type\n `GpuElemwise` or `Elemwise` class (the one that we want to fuse)\n max_input_fct : callable\n A function that returns the maximum number of inputs that this `Elemwise`\n can take (useful for `GpuElemwise`). The GPU kernel currently has a\n limit of 256 bytes for the size of all parameters passed to it. As\n currently we pass a lot of information only by parameter, we must limit how\n many `Op`s we fuse together to avoid busting that 256 limit.\n\n On the CPU we limit to 32 input variables since that is the maximum\n NumPy support.\n\n maker: callable\n A function with the signature `(node, *args)` that constructs an\n `op_class` instance (e.g. `op_class(*args)`).\n\n \"\"\"\n if maker is None:\n\n def maker(node, scalar_op):\n return op_class(scalar_op)\n\n def local_fuse(node):\n \"\"\"Fuse `Elemwise` `Op`s in a node.\n\n\n As part of specialization, we fuse two consecutive elemwise `Op`s of the\n same shape.\n\n For mixed dtype, we let the `Composite` `Op` do the cast. It lets the C\n compiler do the cast.\n\n The number of dimensions is validated at call time by Aesara itself.\n\n \"\"\"\n # META TODO: PUT THESE THINGS IN TRAC, NOT TODO NOTES!!\n # TODO: use broadcast flag?\n\n # TODO: don't do this optimization as a localOptimizer.\n # Analyze the graph in terms of elemwise subgraphs, and then\n # replace each subgraph with a Composite version.\n\n # TODO: use malloc and copy to transfer arguments that don't\n # fit within the parameter space of 256 bytes\n #\n # TODO: Merge with multiple output to merge when an inputs\n # have multiple clients. This can't be done with a local\n # optimiser.\n\n # TODO: Related: Support composites with multiple outputs\n\n # TODO: Use Composite to combine Elemwise and Reduce\n # operations. We have to loop over the data anyway... might\n # as well sum it up while we're at it (this can be trickier\n # than i'm making it seound here. The data-traversal should be\n # done contiguously, and the summing-up might not be easy or\n # worthwhile if the summation axis doesn't line up with a\n # contiguous dimension)\n\n if type(node.op) is not op_class:\n return False\n\n if len(node.outputs) > 1:\n # We don't support fusion for nodes with multiple outputs.\n return\n\n inputs = [] # inputs of the new Elemwise op.\n s_inputs = [] # inputs of the new scalar op used by the Composite.\n # Inputs of the new scalar op that represents the current node.\n s_g = []\n\n # There is a hard limit of 256 bytes for the formal argument list to a\n # GPU kernel function.\n max_nb_input = max_input_fct(node)\n # The number of inputs to the new fused op if we do not fuse more\n # inputs.\n new_nb_input = len(node.inputs)\n # Did we fuse something?\n # Needed as we can fuse unary op that don't change the number of\n # inputs.\n # And there is a case where the inputs are the same as the current\n # node. That won't change the number of inputs of the new op.\n fused = False\n\n for i in node.inputs:\n do_fusion = False\n # Will store inputs of the fused node that are not currently inputs\n # of the node we want to create (to avoid duplicating inputs).\n tmp_input = []\n # Same as tmp_input, but for scalars.\n tmp_scalar = []\n\n # We should not check the number of inputs here\n # As fusing op don't always change the number of input.\n # If a variable is used as multiple into to the same node,\n # we still want to fusion. So we take the set.\n if (\n i.owner\n and isinstance(i.owner.op, op_class)\n and len({n for n, idx in i.clients}) == 1\n and\n # Do not merge elemwise that don't have the same\n # broadcastable pattern to don't redo duplicate\n # computation due to broadcast.\n i.owner.outputs[0].broadcastable == node.outputs[0].broadcastable\n ):\n try:\n tmp_s_input = []\n # we should not put duplicate input into s_inputs and inputs\n for ii in i.owner.inputs:\n if ii in inputs:\n tmp_s_input.append(s_inputs[inputs.index(ii)])\n elif ii in tmp_input:\n tmp_s_input.append(tmp_scalar[tmp_input.index(ii)])\n else:\n tmp = ts.get_scalar_type(ii.dtype).make_variable()\n try:\n tv = gof.op.get_test_value(ii)\n if tv.size > 0:\n tmp.tag.test_value = tv.flatten()[0]\n else:\n _logger.warning(\n \"Cannot construct a scalar test value\"\n \" from a test value with no size: {}\".format(ii)\n )\n except TestValueError:\n pass\n\n tmp_s_input.append(tmp)\n tmp_input.append(ii)\n tmp_scalar.append(tmp_s_input[-1])\n\n s_op = i.owner.op.scalar_op(*tmp_s_input, return_list=True)\n\n # if the scalar_op don't have a c implementation,\n # we skip its fusion to allow the fusion of the\n # other ops.\n i.owner.op.scalar_op.c_code(\n s_op[0].owner,\n \"test_presence_of_c_code\",\n [\"x\" for x in i.owner.inputs],\n [\"z\" for z in i.owner.outputs],\n {\"fail\": \"%(fail)s\"},\n )\n\n do_fusion = True\n\n except (NotImplementedError, MethodNotDefined):\n _logger.warning(\n (\n \"%s does not implement the c_code function.\"\n \" As well as being potentially slow, this\"\n \" disables loop fusion of this op.\"\n )\n % str(i.owner.op.scalar_op)\n )\n do_fusion = False\n\n # Compute the number of inputs in case we fuse this input.\n # We subtract 1 because we replace the existing input with the new\n # inputs from `tmp_input`.\n new_nb_input_ = new_nb_input + len(tmp_input) - 1\n\n # If the new input is already an input of the current node, it was\n # already counted when `new_nb_input` was initialized to\n # len(node.inputs).\n # This can happen when a variable is used both by the Elemwise to\n # fuse and the current node.\n for x in tmp_input:\n if x in node.inputs:\n new_nb_input_ -= 1\n\n if do_fusion and (new_nb_input_ <= max_nb_input):\n fused = True\n new_nb_input = new_nb_input_\n inputs.extend(tmp_input)\n s_inputs.extend(tmp_scalar)\n s_g.extend(s_op)\n else:\n # We must support the case where the same variable appears many\n # times within the inputs\n if inputs.count(i) == node.inputs.count(i):\n s = s_inputs[inputs.index(i)]\n else:\n s = ts.get_scalar_type(i.dtype).make_variable()\n try:\n if aesara.config.compute_test_value != \"off\":\n v = gof.op.get_test_value(i)\n if v.size > 0:\n s.tag.test_value = v.flatten()[0]\n except TestValueError:\n pass\n\n inputs.append(i)\n s_inputs.append(s)\n s_g.append(s)\n\n if not fused:\n return False\n\n if new_nb_input != len(inputs) or len(s_inputs) != len(inputs):\n raise Exception(\n \"\"\"Something has gone wrong with the elemwise\nfusion optimization. We skip this optimization. You can ignore this message,\nyour code will run correctly, but may be slower.\"\"\"\n )\n\n s_new_out = node.op.scalar_op(*s_g, return_list=True)\n try:\n s_new_out[0].owner.op.c_code(\n s_new_out[0].owner,\n \"test_presence_of_c_code\",\n [\"x\" for x in s_g],\n [\"z\" for x in s_new_out],\n {\"fail\": \"%(fail)s\"},\n )\n except (NotImplementedError, MethodNotDefined):\n _logger.warning(\n (\n \"%s does not implement the c_code function.\"\n \" As well as being potentially slow, this disables \"\n \"loop fusion of this op.\"\n )\n % str(s_new_out[0].owner.op)\n )\n\n # create the composite op.\n composite_op = ts.Composite(s_inputs, s_new_out)\n\n # create the new node.\n # Do not call make_node to have test_value\n new_node = maker(node, composite_op)(*inputs).owner\n\n assert len(new_node.outputs) == 1\n assert node.outputs[0].dtype == new_node.outputs[0].dtype\n\n if len(new_node.inputs) > max_nb_input:\n _logger.warning(\n \"loop fusion failed because Op would exceed\" \" kernel argument limit.\"\n )\n return False\n\n # we fuse as many that we can at the same time to make debug mode faster\n # debug mode will be faster as it won't test all intermediate step.\n while True:\n ret = local_fuse(new_node)\n if ret is not False and ret is not None:\n assert len(ret) == len(new_node.outputs)\n assert len(ret) == 1\n new_node = ret[0].owner\n else:\n break\n\n return new_node.outputs\n\n return local_fuse\n\n\ndef elemwise_max_input_fct(node):\n # The Elemwise.perform use numpy ufunc and they are limited to 31\n # inputs.\n if not aesara.config.cxx:\n return 31\n return 1024\n\n\nlocal_elemwise_fusion = local_elemwise_fusion_op(Elemwise, elemwise_max_input_fct)\n\n\nclass FusionOptimizer(Optimizer):\n \"\"\"Graph optimizer for Fusion of elemwise operations.\"\"\"\n\n def __init__(self, local_optimizer):\n Optimizer.__init__(self)\n self.optimizer = local_optimizer\n\n def add_requirements(self, fgraph):\n fgraph.attach_feature(toolbox.ReplaceValidate())\n\n def apply(self, fgraph):\n did_something = True\n nb_iter = 0\n nb_replacement = 0\n nb_inconsistency_replace = 0\n time_toposort = 0\n if fgraph.profile:\n validate_before = fgraph.profile.validate_time\n callbacks_before = fgraph.execute_callbacks_times.copy()\n callback_before = fgraph.execute_callbacks_time\n while did_something:\n t0 = time.time()\n nodelist = list(fgraph.toposort())\n time_toposort += time.time() - t0\n nodelist.reverse()\n did_something = False\n for node in nodelist:\n # Don't try to fuse node that have already been fused.\n if node in fgraph.apply_nodes:\n new_outputs = self.optimizer(node)\n if new_outputs:\n assert len(new_outputs) == len(node.outputs)\n try:\n fgraph.replace_all_validate(\n list(zip(node.outputs, new_outputs)),\n reason=self.__class__.__name__,\n )\n did_something = True\n nb_replacement += 1\n except InconsistencyError:\n nb_inconsistency_replace += 1\n nb_iter += 1\n\n if fgraph.profile:\n validate_time = fgraph.profile.validate_time - validate_before\n callback_time = fgraph.execute_callbacks_time - callback_before\n callbacks_time = {}\n for k, v in fgraph.execute_callbacks_times.items():\n if k in callbacks_before:\n callbacks_time[k] = v - callbacks_before[k]\n else:\n callbacks_time[k] = v\n else:\n validate_time = None\n callback_time = None\n callbacks_time = {}\n return (\n self,\n nb_iter,\n nb_replacement,\n nb_inconsistency_replace,\n validate_time,\n callback_time,\n callbacks_time,\n time_toposort,\n )\n\n @staticmethod\n def print_profile(stream, prof, level=0):\n blanc = \" \" * level\n print(blanc, \"FusionOptimizer\", file=stream)\n print(blanc, \" nb_iter\", prof[1], file=stream)\n print(blanc, \" nb_replacement\", prof[2], file=stream)\n print(blanc, \" nb_inconsistency_replace\", prof[3], file=stream)\n print(blanc, \" validate_time\", prof[4], file=stream)\n print(blanc, \" callback_time\", prof[5], file=stream)\n if prof[5] > 1:\n print(blanc, \" callbacks_time\", file=stream)\n for i in sorted(prof[6].items(), key=lambda a: a[1])[::-1]:\n if i[1] > 0:\n print(blanc, \" \", i)\n print(blanc, \" time_toposort\", prof[7], file=stream)\n\n\ndef local_add_mul_fusion(node):\n \"\"\"Fuse consecutive add or mul in one such node with more inputs.\n\n It is better to fuse add/mul that way then in a Composite node as\n this make the inner graph of the Composite smaller. This allow to\n put more computation in a Composite before hitting the max\n recusion limit when pickling Composite.\n\n \"\"\"\n if not isinstance(node.op, Elemwise) or not isinstance(\n node.op.scalar_op, (ts.Add, ts.Mul)\n ):\n return False\n\n s_op = node.op.scalar_op.__class__\n new_inp = []\n fused = False\n nb_inputs = len(node.inputs)\n max_inputs = float(\"inf\")\n if hasattr(node.op, \"max_inputs\"):\n max_inputs = node.op.max_inputs(node)\n for inp in node.inputs:\n if (\n inp.owner\n and isinstance(inp.owner.op, Elemwise)\n and isinstance(inp.owner.op.scalar_op, s_op)\n and\n # Do not duplicate the operation.\n len(inp.clients) == 1\n and (nb_inputs + len(inp.owner.inputs) - 1) <= max_inputs\n ):\n new_inp.extend(inp.owner.inputs)\n fused = True\n else:\n new_inp.append(inp)\n\n # We can not compare the number of inputs as Mul and Add could have\n # 0 or 1 inputs in some corner cases.\n if fused:\n output = node.op(*new_inp)\n copy_stack_trace(node.outputs[0], output)\n\n # Do the recursion here to help lower the number of\n # FusionOptimizer iteration.\n if output.owner:\n output2 = local_add_mul_fusion(output.owner)\n if output2:\n return output2\n return [output]\n\n\nif config.tensor.local_elemwise_fusion:\n _logger.debug(\"enabling optimization fusion elemwise in fast_run\")\n # Must be after gpu(48.5) and before AddDestroyHandler(49.5)\n fuse_seqopt = gof.SequenceDB()\n fuse_seqopt.register(\n \"local_add_mul_fusion\",\n FusionOptimizer(local_add_mul_fusion),\n 0,\n \"fast_run\",\n \"fusion\",\n )\n fuse_seqopt.register(\n \"composite_elemwise_fusion\",\n FusionOptimizer(local_elemwise_fusion),\n 1,\n \"fast_run\",\n \"fusion\",\n )\n compile.optdb.register(\n \"elemwise_fusion\",\n fuse_seqopt,\n 49,\n \"fast_run\",\n \"fusion\",\n \"local_elemwise_fusion\",\n \"FusionOptimizer\",\n )\nelse:\n _logger.debug(\"not enabling optimization fusion elemwise in fast_run\")\n compile.optdb.register(\n \"elemwise_fusion\",\n FusionOptimizer(local_elemwise_fusion),\n 49,\n \"fusion\",\n \"local_elemwise_fusion\",\n \"FusionOptimizer\",\n )\n\n\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_useless_composite(node):\n \"\"\"For elemwise Composite that have multiple outputs, remove the\n outputs that are not used.\n\n \"\"\"\n if not isinstance(node.op, Elemwise) or not isinstance(\n node.op.scalar_op, ts.Composite\n ):\n return\n comp = node.op.scalar_op\n idx = [i for i, o_extern in enumerate(node.outputs) if o_extern.clients]\n if len(idx) < len(node.outputs):\n new_outputs = [comp.outputs[i] for i in idx]\n c = ts.Composite(inputs=comp.inputs, outputs=new_outputs)\n e = Elemwise(scalar_op=c)(*node.inputs, return_list=True)\n return dict(zip([node.outputs[i] for i in idx], e))\n\n\n# ############################\n# # Remove consider_constant #\n# ############################\n\n\n# Although the ops ConsiderConstant, ZeroGrad and DisconnectedGrad\n# just returns the input, it should be removed from the graph to\n@register_canonicalize(\"fast_compile\")\n@register_useless(\"fast_compile\")\n@local_optimizer(None)\ndef local_view_op(node):\n if isinstance(node.op, aesara.compile.ops.ViewOp):\n return node.inputs\n\n\n@register_useless\n@register_canonicalize\n@register_stabilize\n@register_specialize\n@local_optimizer([Alloc])\ndef local_merge_alloc(node):\n # This opt takes care of several cases:\n # Alloc(Alloc(m, x, 1, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n # Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n # Alloc(Alloc(m, y1, 1, 1), x, y2, z, w) -> Alloc(m, x, assert(y1, y1==y2), z, w)\n if not isinstance(node.op, Alloc):\n return False\n if not node.inputs[0].owner or not isinstance(node.inputs[0].owner.op, Alloc):\n return False\n inputs_outer = node.inputs\n inputs_inner = node.inputs[0].owner.inputs\n dims_outer = inputs_outer[1:]\n dims_inner = inputs_inner[1:]\n dims_outer_rev = dims_outer[::-1]\n dims_inner_rev = dims_inner[::-1]\n # check if the pattern of broadcasting is matched, in the reversed ordering.\n # The reverse ordering is needed when an Alloc add an implicit new\n # broadcasted dimensions to its inputs[0]. Eg:\n # Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n i = 0\n for dim_inner, dim_outer in zip(dims_inner_rev, dims_outer_rev):\n if dim_inner != dim_outer:\n if isinstance(dim_inner, Constant) and dim_inner.data == 1:\n pass\n else:\n dims_outer[-1 - i] = Assert(\n \"You have a shape error in your graph. To see a better\"\n \" error message and a stack trace of where in your code\"\n \" the error is created, use the Aesara flags\"\n \" optimizer=None or optimizer=fast_compile.\"\n )(dim_outer, tt.eq(dim_outer, dim_inner))\n i += 1\n return [alloc(inputs_inner[0], *dims_outer)]\n\n\n@register_useless(\"fast_compile\")\n@local_optimizer([TopKOp])\ndef local_useless_topk(node):\n \"\"\"\n TopKOp generates two outputs by default\n This opt removes the useless ones\n\n \"\"\"\n op = node.op\n if not isinstance(op, TopKOp):\n return\n if not (op.return_values and op.return_indices):\n return False\n\n x, k = node.inputs\n ret_val = bool(node.outputs[0].clients)\n ret_idx = bool(node.outputs[1].clients)\n\n if not (ret_val ^ ret_idx):\n # both true -> nothing to remove\n # both false -> let pruner handle\n return False\n\n old_output = node.outputs[ret_idx]\n new_output = TopKOp(\n axis=op.axis,\n sorted=op.sorted,\n idx_dtype=op.idx_dtype,\n return_values=ret_val,\n return_indices=ret_idx,\n )(x, k)\n copy_stack_trace(node.outputs[0], new_output)\n return {old_output: new_output}\n",
"import numpy as np\nimport pytest\n\nfrom aesara import _asarray, config\nfrom aesara.scalar.basic import round_half_away_from_zero_vec, upcast\nfrom aesara.tensor import vector\nfrom aesara.tensor.inplace import (\n abs__inplace,\n add_inplace,\n arccos_inplace,\n arccosh_inplace,\n arcsin_inplace,\n arcsinh_inplace,\n arctan2_inplace,\n arctan_inplace,\n arctanh_inplace,\n ceil_inplace,\n conj_inplace,\n cos_inplace,\n cosh_inplace,\n deg2rad_inplace,\n exp2_inplace,\n exp_inplace,\n expm1_inplace,\n floor_inplace,\n int_div_inplace,\n inv_inplace,\n log1p_inplace,\n log2_inplace,\n log10_inplace,\n log_inplace,\n maximum_inplace,\n minimum_inplace,\n mod_inplace,\n mul_inplace,\n neg_inplace,\n pow_inplace,\n rad2deg_inplace,\n round_half_away_from_zero_inplace,\n round_half_to_even_inplace,\n sgn_inplace,\n sin_inplace,\n sinh_inplace,\n sqr_inplace,\n sqrt_inplace,\n sub_inplace,\n tan_inplace,\n tanh_inplace,\n true_div_inplace,\n trunc_inplace,\n xor_inplace,\n)\nfrom tests import unittest_tools as utt\nfrom tests.tensor.utils import (\n _bad_build_broadcast_binary_normal,\n _bad_runtime_broadcast_binary_normal,\n _bad_runtime_inv,\n _good_broadcast_binary_arctan2,\n _good_broadcast_binary_normal,\n _good_broadcast_div_mod_normal_float_inplace,\n _good_broadcast_pow_normal_float_pow,\n _good_broadcast_unary_arccosh,\n _good_broadcast_unary_arcsin_float,\n _good_broadcast_unary_arctanh,\n _good_broadcast_unary_normal,\n _good_broadcast_unary_normal_abs,\n _good_broadcast_unary_normal_float,\n _good_broadcast_unary_normal_float_no_complex,\n _good_broadcast_unary_normal_float_no_empty_no_complex,\n _good_broadcast_unary_normal_no_complex,\n _good_broadcast_unary_positive_float,\n _good_broadcast_unary_tan,\n _good_broadcast_unary_wide_float,\n _good_inv_inplace,\n _numpy_true_div,\n angle_eps,\n check_floatX,\n copymod,\n div_grad_rtol,\n ignore_isfinite_mode,\n inplace_func,\n makeBroadcastTester,\n upcast_float16_ufunc,\n)\n\n\nTestAddInplaceBroadcast = makeBroadcastTester(\n op=add_inplace,\n expected=lambda x, y: x + y,\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n inplace=True,\n)\n\nTestSubInplaceBroadcast = makeBroadcastTester(\n op=sub_inplace,\n expected=lambda x, y: x - y,\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n inplace=True,\n)\n\nTestMaximumInplaceBroadcast = makeBroadcastTester(\n op=maximum_inplace,\n expected=np.maximum,\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n inplace=True,\n)\n\nTestMinimumInplaceBroadcast = makeBroadcastTester(\n op=minimum_inplace,\n expected=np.minimum,\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n inplace=True,\n)\n\nTestMulInplaceBroadcast = makeBroadcastTester(\n op=mul_inplace,\n expected=lambda x, y: x * y,\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n inplace=True,\n)\n\nTestTrueDivInplaceBroadcast = makeBroadcastTester(\n op=true_div_inplace,\n expected=_numpy_true_div,\n good=copymod(\n _good_broadcast_div_mod_normal_float_inplace,\n # The output is now in float, we cannot work inplace on an int.\n without=[\"integer\", \"uint8\", \"uint16\", \"int8\"],\n ),\n grad_rtol=div_grad_rtol,\n inplace=True,\n)\n\nTestInvInplaceBroadcast = makeBroadcastTester(\n op=inv_inplace,\n expected=lambda x: _numpy_true_div(np.int8(1), x),\n good=_good_inv_inplace,\n bad_runtime=_bad_runtime_inv,\n grad_rtol=div_grad_rtol,\n inplace=True,\n)\n\nTestModInplaceBroadcast = makeBroadcastTester(\n op=mod_inplace,\n expected=lambda x, y: np.asarray(x % y, dtype=upcast(x.dtype, y.dtype)),\n good=copymod(\n _good_broadcast_div_mod_normal_float_inplace, [\"complex1\", \"complex2\"]\n ),\n grad_eps=1e-5,\n inplace=True,\n)\n\nTestPowInplaceBroadcast = makeBroadcastTester(\n op=pow_inplace,\n expected=lambda x, y: x ** y,\n good=_good_broadcast_pow_normal_float_pow,\n inplace=True,\n mode=ignore_isfinite_mode,\n)\n\nTestNegInplaceBroadcast = makeBroadcastTester(\n op=neg_inplace,\n expected=lambda x: -x,\n good=_good_broadcast_unary_normal,\n inplace=True,\n)\n\nTestSgnInplaceBroadcast = makeBroadcastTester(\n op=sgn_inplace,\n expected=np.sign,\n good=_good_broadcast_unary_normal_no_complex,\n inplace=True,\n)\n\nTestAbsInplaceBroadcast = makeBroadcastTester(\n op=abs__inplace,\n expected=lambda x: np.abs(x),\n good=_good_broadcast_unary_normal_abs,\n inplace=True,\n)\n\nTestIntDivInplaceBroadcast = makeBroadcastTester(\n op=int_div_inplace,\n expected=lambda x, y: check_floatX((x, y), x // y),\n good=_good_broadcast_div_mod_normal_float_inplace,\n # I don't test the grad as the output is always an integer\n # (this is not a continuous output).\n # grad=_grad_broadcast_div_mod_normal,\n inplace=True,\n)\n\nTestCeilInplaceBroadcast = makeBroadcastTester(\n op=ceil_inplace,\n expected=upcast_float16_ufunc(np.ceil),\n good=copymod(\n _good_broadcast_unary_normal_no_complex,\n without=[\"integers\", \"int8\", \"uint8\", \"uint16\"],\n ),\n # corner cases includes a lot of integers: points where Ceil is not\n # continuous (not differentiable)\n inplace=True,\n)\n\nTestFloorInplaceBroadcast = makeBroadcastTester(\n op=floor_inplace,\n expected=upcast_float16_ufunc(np.floor),\n good=copymod(\n _good_broadcast_unary_normal_no_complex,\n without=[\"integers\", \"int8\", \"uint8\", \"uint16\"],\n ),\n inplace=True,\n)\n\nTestTruncInplaceBroadcast = makeBroadcastTester(\n op=trunc_inplace,\n expected=upcast_float16_ufunc(np.trunc),\n good=_good_broadcast_unary_normal_no_complex,\n inplace=True,\n)\n\nTestRoundHalfToEvenInplaceBroadcast = makeBroadcastTester(\n op=round_half_to_even_inplace,\n expected=np.round,\n good=_good_broadcast_unary_normal_float_no_complex,\n inplace=True,\n)\n\nTestRoundHalfAwayFromZeroInplaceBroadcast = makeBroadcastTester(\n op=round_half_away_from_zero_inplace,\n expected=lambda a: round_half_away_from_zero_vec(a),\n good=_good_broadcast_unary_normal_float_no_empty_no_complex,\n inplace=True,\n)\n\nTestSqrInplaceBroadcast = makeBroadcastTester(\n op=sqr_inplace,\n expected=np.square,\n good=_good_broadcast_unary_normal,\n inplace=True,\n)\n\nTestExpInplaceBroadcast = makeBroadcastTester(\n op=exp_inplace,\n expected=np.exp,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestExp2InplaceBroadcast = makeBroadcastTester(\n op=exp2_inplace,\n expected=np.exp2,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestExpm1InplaceBroadcast = makeBroadcastTester(\n op=expm1_inplace,\n expected=np.expm1,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestLogInplaceBroadcast = makeBroadcastTester(\n op=log_inplace,\n expected=np.log,\n good=_good_broadcast_unary_positive_float,\n inplace=True,\n)\n\nTestLog2InplaceBroadcast = makeBroadcastTester(\n op=log2_inplace,\n expected=np.log2,\n good=_good_broadcast_unary_positive_float,\n inplace=True,\n)\n\nTestLog10InplaceBroadcast = makeBroadcastTester(\n op=log10_inplace,\n expected=np.log10,\n good=_good_broadcast_unary_positive_float,\n inplace=True,\n)\n\nTestLog1pInplaceBroadcast = makeBroadcastTester(\n op=log1p_inplace,\n expected=np.log1p,\n good=_good_broadcast_unary_positive_float,\n inplace=True,\n)\n\nTestSqrtInplaceBroadcast = makeBroadcastTester(\n op=sqrt_inplace,\n expected=np.sqrt,\n good=_good_broadcast_unary_positive_float,\n inplace=True,\n)\n\nTestDeg2radInplaceBroadcast = makeBroadcastTester(\n op=deg2rad_inplace,\n expected=np.deg2rad,\n good=_good_broadcast_unary_normal_float_no_complex,\n inplace=True,\n eps=angle_eps,\n)\n\nTestRad2degInplaceBroadcast = makeBroadcastTester(\n op=rad2deg_inplace,\n expected=np.rad2deg,\n good=_good_broadcast_unary_normal_float_no_complex,\n inplace=True,\n eps=angle_eps,\n)\n\nTestSinInplaceBroadcast = makeBroadcastTester(\n op=sin_inplace,\n expected=np.sin,\n good=_good_broadcast_unary_wide_float,\n inplace=True,\n)\n\nTestArcsinInplaceBroadcast = makeBroadcastTester(\n op=arcsin_inplace,\n expected=np.arcsin,\n good=_good_broadcast_unary_arcsin_float,\n inplace=True,\n)\n\nTestCosInplaceBroadcast = makeBroadcastTester(\n op=cos_inplace,\n expected=np.cos,\n good=_good_broadcast_unary_wide_float,\n inplace=True,\n)\n\nTestArccosInplaceBroadcast = makeBroadcastTester(\n op=arccos_inplace,\n expected=np.arccos,\n good=_good_broadcast_unary_arcsin_float,\n inplace=True,\n)\n\nTestTanInplaceBroadcast = makeBroadcastTester(\n op=tan_inplace,\n expected=np.tan,\n good=copymod(\n _good_broadcast_unary_tan, without=[\"integers\", \"int8\", \"uint8\", \"uint16\"]\n ),\n inplace=True,\n)\n\nTestArctanInplaceBroadcast = makeBroadcastTester(\n op=arctan_inplace,\n expected=np.arctan,\n good=_good_broadcast_unary_wide_float,\n inplace=True,\n)\n\nTestArctan2InplaceBroadcast = makeBroadcastTester(\n op=arctan2_inplace,\n expected=np.arctan2,\n good=copymod(\n _good_broadcast_binary_arctan2,\n without=[\"integers\", \"int8\", \"uint8\", \"uint16\", \"dtype_mixup_2\"],\n ),\n inplace=True,\n)\n\nTestCoshInplaceBroadcast = makeBroadcastTester(\n op=cosh_inplace,\n expected=np.cosh,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestArccoshInplaceBroadcast = makeBroadcastTester(\n op=arccosh_inplace,\n expected=np.arccosh,\n good=copymod(_good_broadcast_unary_arccosh, without=[\"integers\", \"uint8\"]),\n inplace=True,\n)\n\nTestSinhInplaceBroadcast = makeBroadcastTester(\n op=sinh_inplace,\n expected=np.sinh,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestArcsinhInplaceBroadcast = makeBroadcastTester(\n op=arcsinh_inplace,\n expected=np.arcsinh,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestTanhInplaceBroadcast = makeBroadcastTester(\n op=tanh_inplace,\n expected=np.tanh,\n good=_good_broadcast_unary_normal_float,\n inplace=True,\n)\n\nTestArctanhInplaceBroadcast = makeBroadcastTester(\n op=arctanh_inplace,\n expected=np.arctanh,\n good=copymod(\n _good_broadcast_unary_arctanh, without=[\"integers\", \"int8\", \"uint8\", \"uint16\"]\n ),\n inplace=True,\n)\n\nTestConjInplaceBroadcast = makeBroadcastTester(\n op=conj_inplace,\n expected=np.conj,\n good=_good_broadcast_unary_normal,\n inplace=True,\n)\n\n\[email protected](\n config.cycle_detection == \"fast\" and config.mode != \"FAST_COMPILE\",\n reason=\"Cycle detection is fast and mode is FAST_COMPILE\",\n)\ndef test_exp_inplace_grad_1():\n utt.verify_grad(\n exp_inplace,\n [\n np.asarray(\n [\n [1.5089518, 1.48439076, -4.7820262],\n [2.04832468, 0.50791564, -1.58892269],\n ]\n )\n ],\n )\n\n\ndef test_XOR_inplace():\n dtype = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n ]\n\n for dtype in dtype:\n x, y = vector(dtype=dtype), vector(dtype=dtype)\n l = _asarray([0, 0, 1, 1], dtype=dtype)\n r = _asarray([0, 1, 0, 1], dtype=dtype)\n ix = x\n ix = xor_inplace(ix, y)\n gn = inplace_func([x, y], ix)\n _ = gn(l, r)\n # test the in-place stuff\n assert np.all(l == np.asarray([0, 1, 1, 0])), l\n",
"import warnings\n\nimport numpy as np\nimport pytest\n\nimport aesara\nimport aesara.tensor as tt\nfrom aesara import config, scalar\nfrom aesara.gof import Apply, Op, Type, utils\nfrom aesara.tensor.basic import _allclose\n\n\[email protected](scope=\"module\", autouse=True)\ndef set_aesara_flags():\n with aesara.change_flags(compute_test_value=\"raise\"):\n yield\n\n\n# Used in TestComputeTestValue.test_no_perform\nclass IncOneC(Op):\n \"\"\"\n An Op with only a C (c_code) implementation\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, input):\n input = scalar.as_scalar(input)\n output = input.type()\n return Apply(self, [input], [output])\n\n def c_code_cache_version(self):\n return (1,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (x,) = inputs\n (z,) = outputs\n return \"%(z)s = %(x)s + 1;\" % locals()\n\n\nclass TestComputeTestValue:\n def test_destroy_map(self):\n class SomeType(Type):\n def filter(self, data, strict=False, allow_downcast=None):\n return data\n\n class InplaceOp(Op):\n __props__ = ()\n\n def __init__(self, inplace):\n if inplace:\n self.destroy_map = {0: [0]}\n\n super().__init__()\n\n def make_node(self, input):\n return Apply(self, [input], [input.type()])\n\n def perform(self, node, inputs, outputs):\n outputs[0][0] = inputs[0]\n\n test_input = SomeType()()\n orig_object = object()\n test_input.tag.test_value = orig_object\n\n res = InplaceOp(False)(test_input)\n assert res.tag.test_value is orig_object\n\n res = InplaceOp(True)(test_input)\n assert res.tag.test_value is not orig_object\n\n def test_variable_only(self):\n x = tt.matrix(\"x\")\n x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)\n y = tt.matrix(\"y\")\n y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)\n\n # should work\n z = tt.dot(x, y)\n assert hasattr(z.tag, \"test_value\")\n f = aesara.function([x, y], z)\n assert _allclose(f(x.tag.test_value, y.tag.test_value), z.tag.test_value)\n\n # this test should fail\n y.tag.test_value = np.random.rand(6, 5).astype(config.floatX)\n with pytest.raises(ValueError):\n tt.dot(x, y)\n\n def test_compute_flag(self):\n x = tt.matrix(\"x\")\n y = tt.matrix(\"y\")\n y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)\n\n # should skip computation of test value\n aesara.config.compute_test_value = \"off\"\n z = tt.dot(x, y)\n assert not hasattr(z.tag, \"test_value\")\n\n # should fail when asked by user\n aesara.config.compute_test_value = \"raise\"\n with pytest.raises(ValueError):\n tt.dot(x, y)\n\n # test that a warning is raised if required\n aesara.config.compute_test_value = \"warn\"\n warnings.simplefilter(\"error\", UserWarning)\n try:\n with pytest.raises(UserWarning):\n tt.dot(x, y)\n finally:\n # Restore the default behavior.\n # TODO There is a cleaner way to do this in Python 2.6, once\n # Aesara drops support of Python 2.4 and 2.5.\n warnings.simplefilter(\"default\", UserWarning)\n\n def test_string_var(self):\n x = tt.matrix(\"x\")\n x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)\n y = tt.matrix(\"y\")\n y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)\n\n z = aesara.shared(np.random.rand(5, 6).astype(config.floatX))\n\n # should work\n out = tt.dot(tt.dot(x, y), z)\n assert hasattr(out.tag, \"test_value\")\n tf = aesara.function([x, y], out)\n assert _allclose(tf(x.tag.test_value, y.tag.test_value), out.tag.test_value)\n\n def f(x, y, z):\n return tt.dot(tt.dot(x, y), z)\n\n # this test should fail\n z.set_value(np.random.rand(7, 6).astype(config.floatX))\n with pytest.raises(ValueError):\n f(x, y, z)\n\n def test_shared(self):\n x = tt.matrix(\"x\")\n x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)\n y = aesara.shared(np.random.rand(4, 6).astype(config.floatX), \"y\")\n\n # should work\n z = tt.dot(x, y)\n assert hasattr(z.tag, \"test_value\")\n f = aesara.function([x], z)\n assert _allclose(f(x.tag.test_value), z.tag.test_value)\n\n # this test should fail\n y.set_value(np.random.rand(5, 6).astype(config.floatX))\n with pytest.raises(ValueError):\n tt.dot(x, y)\n\n def test_ndarray(self):\n x = np.random.rand(2, 3).astype(config.floatX)\n y = aesara.shared(np.random.rand(3, 6).astype(config.floatX), \"y\")\n\n # should work\n z = tt.dot(x, y)\n assert hasattr(z.tag, \"test_value\")\n f = aesara.function([], z)\n assert _allclose(f(), z.tag.test_value)\n\n # this test should fail\n x = np.random.rand(2, 4).astype(config.floatX)\n with pytest.raises(ValueError):\n tt.dot(x, y)\n\n def test_empty_elemwise(self):\n x = aesara.shared(np.random.rand(0, 6).astype(config.floatX), \"x\")\n\n # should work\n z = (x + 2) * 3\n assert hasattr(z.tag, \"test_value\")\n f = aesara.function([], z)\n assert _allclose(f(), z.tag.test_value)\n\n def test_constant(self):\n x = tt.constant(np.random.rand(2, 3), dtype=config.floatX)\n y = aesara.shared(np.random.rand(3, 6).astype(config.floatX), \"y\")\n\n # should work\n z = tt.dot(x, y)\n assert hasattr(z.tag, \"test_value\")\n f = aesara.function([], z)\n assert _allclose(f(), z.tag.test_value)\n\n # this test should fail\n x = tt.constant(np.random.rand(2, 4), dtype=config.floatX)\n with pytest.raises(ValueError):\n tt.dot(x, y)\n\n def test_incorrect_type(self):\n\n x = tt.vector(\"x\")\n with pytest.raises(TypeError):\n # Incorrect shape for test value\n x.tag.test_value = np.empty((2, 2))\n\n x = tt.fmatrix(\"x\")\n with pytest.raises(TypeError):\n # Incorrect dtype (float64) for test value\n x.tag.test_value = np.random.rand(3, 4)\n\n def test_overided_function(self):\n # We need to test those as they mess with Exception\n # And we don't want the exception to be changed.\n x = tt.matrix()\n x.tag.test_value = np.zeros((2, 3), dtype=config.floatX)\n y = tt.matrix()\n y.tag.test_value = np.zeros((2, 2), dtype=config.floatX)\n with pytest.raises(ValueError):\n x.__mul__(y)\n\n def test_scan(self):\n # Test the compute_test_value mechanism Scan.\n k = tt.iscalar(\"k\")\n A = tt.vector(\"A\")\n k.tag.test_value = 3\n A.tag.test_value = np.random.rand(5).astype(config.floatX)\n\n def fx(prior_result, A):\n return prior_result * A\n\n # Symbolic description of the result\n result, updates = aesara.scan(\n fn=fx, outputs_info=tt.ones_like(A), non_sequences=A, n_steps=k\n )\n\n # We only care about A**k, but scan has provided us with A**1 through A**k.\n # Discard the values that we don't care about. Scan is smart enough to\n # notice this and not waste memory saving them.\n final_result = result[-1]\n assert hasattr(final_result.tag, \"test_value\")\n\n def test_scan_err1(self):\n # This test should fail when building fx for the first time\n k = tt.iscalar(\"k\")\n A = tt.matrix(\"A\")\n k.tag.test_value = 3\n A.tag.test_value = np.random.rand(5, 3).astype(config.floatX)\n\n def fx(prior_result, A):\n return tt.dot(prior_result, A)\n\n with pytest.raises(ValueError) as e:\n aesara.scan(fn=fx, outputs_info=tt.ones_like(A), non_sequences=A, n_steps=k)\n\n assert str(e.traceback[0].path).endswith(\"test_compute_test_value.py\")\n # We should be in the \"fx\" function defined above\n assert e.traceback[2].name == \"fx\"\n\n def test_scan_err2(self):\n # This test should not fail when building fx for the first time,\n # but when calling the scan's perform()\n k = tt.iscalar(\"k\")\n A = tt.matrix(\"A\")\n k.tag.test_value = 3\n A.tag.test_value = np.random.rand(5, 3).astype(config.floatX)\n\n def fx(prior_result, A):\n return tt.dot(prior_result, A)\n\n with pytest.raises(ValueError):\n aesara.scan(\n fn=fx, outputs_info=tt.ones_like(A.T), non_sequences=A, n_steps=k\n )\n\n with pytest.raises(ValueError, match=\"^could not broadcast input\"):\n aesara.scan(\n fn=fx, outputs_info=tt.ones_like(A.T), non_sequences=A, n_steps=k\n )\n\n def test_no_c_code(self):\n class IncOnePython(Op):\n \"\"\"\n An Op with only a Python (perform) implementation\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, input):\n input = scalar.as_scalar(input)\n output = input.type()\n return Apply(self, [input], [output])\n\n def perform(self, node, inputs, outputs):\n (input,) = inputs\n (output,) = outputs\n output[0] = input + 1\n\n i = scalar.int32(\"i\")\n i.tag.test_value = 3\n\n o = IncOnePython()(i)\n\n # Check that the c_code function is not implemented\n with pytest.raises((NotImplementedError, utils.MethodNotDefined)):\n o.owner.op.c_code(o.owner, \"o\", [\"x\"], \"z\", {\"fail\": \"\"})\n\n assert hasattr(o.tag, \"test_value\")\n assert o.tag.test_value == 4\n\n @pytest.mark.skipif(\n not aesara.config.cxx, reason=\"G++ not available, so we need to skip this test.\"\n )\n def test_no_perform(self):\n i = scalar.int32(\"i\")\n i.tag.test_value = 3\n\n # Class IncOneC is defined outside of the TestComputeTestValue\n # so it can be pickled and unpickled\n o = IncOneC()(i)\n\n # Check that the perform function is not implemented\n with pytest.raises((NotImplementedError, utils.MethodNotDefined)):\n o.owner.op.perform(o.owner, 0, [None])\n\n assert hasattr(o.tag, \"test_value\")\n assert o.tag.test_value == 4\n\n def test_disabled_during_compilation(self):\n # We test that it is disabled when we include deep copy in the code\n # This don't test that it is disabled during optimization, but the code do it.\n init_Mu1 = aesara.shared(np.zeros((5,), dtype=config.floatX)).dimshuffle(\"x\", 0)\n\n aesara.function([], outputs=[init_Mu1])\n",
"\"\"\"This script trigger convolution operation. We think it cause more\nGPU power consumption then gemm call.\n\n\"\"\"\n\n\nimport numpy as np\n\nimport aesara\nimport aesara.tensor as tt\nfrom aesara.gpuarray import dnn\nfrom aesara.tensor.nnet.abstract_conv import get_conv_output_shape\n\n\ndef burn():\n sz = 128\n img_shp = [sz, sz, sz, sz]\n kern_shp = [sz // 2, sz, 3, 3]\n out_shp = get_conv_output_shape(img_shp, kern_shp, \"valid\", (1, 1))\n img = tt.tensor4(\"img\")\n kern = tt.tensor4(\"kern\")\n out = tt.tensor4(\"out\")\n\n def rand(shp):\n return np.random.rand(*shp).astype(aesara.config.floatX)\n\n img = aesara.shared(rand(img_shp))\n kern = aesara.shared(rand(kern_shp))\n out = aesara.shared(rand(out_shp))\n # beta 1 is needed to force the reuse of out, otherwise, it is\n # replaced by a GpuAllocEmpty\n o1 = dnn._dnn_conv(img, kern, conv_mode=\"conv\", out=out, beta=1.0)\n mode = aesara.compile.get_default_mode().including(\"local_remove_all_assert\")\n f = aesara.function([], [o1], mode=mode)\n aesara.printing.debugprint(f)\n print(\"Start computation\")\n for i in range(10000):\n f.fn()\n print(\"Computation stopped\")\n\n\nif __name__ == \"__main__\":\n burn()\n",
"import numpy as np\nimport pytest\n\nimport aesara\nfrom aesara import tensor\nfrom aesara.compile import In, config, pfunc, shared\nfrom aesara.tensor import dmatrices, dmatrix, iscalar, lscalar\n\n\ndef data_of(s):\n # Return the raw value of a shared variable\n return s.container.storage[0]\n\n\nclass TestPfunc:\n def test_doc(self):\n # Ensure the code given in pfunc.txt works as expected\n\n # Example #1.\n a = lscalar()\n b = shared(1)\n f1 = pfunc([a], (a + b))\n f2 = pfunc([In(a, value=44)], a + b, updates={b: b + 1})\n assert b.get_value() == 1\n assert f1(3) == 4\n assert f2(3) == 4\n assert b.get_value() == 2\n assert f1(3) == 5\n b.set_value(0)\n assert f1(3) == 3\n\n # Example #2.\n a = tensor.lscalar()\n b = shared(7)\n f1 = pfunc([a], a + b)\n f2 = pfunc([a], a * b)\n assert f1(5) == 12\n b.set_value(8)\n assert f1(5) == 13\n assert f2(4) == 32\n\n def test_shared(self):\n\n # CHECK: two functions (f1 and f2) can share w\n w = shared(np.random.rand(2, 2), \"w\")\n wval = w.get_value(borrow=False)\n\n x = dmatrix()\n out1 = w + x\n out2 = w * x\n f1 = pfunc([x], [out1])\n f2 = pfunc([x], [out2])\n xval = np.random.rand(2, 2)\n assert np.all(f1(xval) == xval + wval)\n assert np.all(f2(xval) == xval * wval)\n\n # CHECK: updating a shared value\n f3 = pfunc([x], out1, updates=[(w, (w - 1))])\n # f3 changes the value of w\n assert np.all(f3(xval) == xval + wval)\n # this same value is read by f1\n assert np.all(f1(xval) == xval + (wval - 1))\n\n w.set_value(w.get_value(borrow=True) * 10, borrow=True)\n # this same value is read by f1\n assert np.all(f1(xval) == xval + w.get_value(borrow=True))\n\n def test_no_shared_as_input(self):\n # Test that shared variables cannot be used as function inputs.\n w_init = np.random.rand(2, 2)\n w = shared(w_init.copy(), \"w\")\n with pytest.raises(\n TypeError, match=r\"^Cannot use a shared variable \\(w\\) as explicit input\"\n ):\n pfunc([w], aesara.tensor.sum(w * w))\n\n def test_default_container(self):\n # Ensure it is possible to (implicitly) use a shared variable in a\n # function, as a 'state' that can be updated at will.\n\n rng = np.random.RandomState(1827)\n w_init = rng.rand(5)\n w = shared(w_init.copy(), \"w\")\n reg = aesara.tensor.sum(w * w)\n f = pfunc([], reg)\n\n assert f() == np.sum(w_init * w_init)\n # Change the value of w and ensure the output changes accordingly.\n w.set_value(w.get_value(borrow=True) + 1.0, borrow=True)\n assert f() == np.sum((w_init + 1) ** 2)\n\n def test_default_scalar_container(self):\n # Similar in spirit to test_default_container, but updating a scalar\n # variable. This is a sanity check for non mutable types.\n x = shared(0.0, \"x\")\n f = pfunc([], x)\n assert f() == 0\n x.set_value(x.get_value(borrow=True) + 1, borrow=True)\n assert f() == 1\n\n def test_param_strict(self):\n\n a = tensor.dvector()\n b = shared(7)\n out = a + b\n\n f = pfunc([In(a, strict=False)], [out])\n # works, rand generates float64 by default\n f(np.random.rand(8))\n # works, casting is allowed\n f(np.array([1, 2, 3, 4], dtype=\"int32\"))\n\n f = pfunc([In(a, strict=True)], [out])\n try:\n # fails, f expects float64\n f(np.array([1, 2, 3, 4], dtype=\"int32\"))\n except TypeError:\n pass\n\n def test_param_mutable(self):\n a = tensor.dvector()\n a_out = a * 2 # assuming the op which makes this \"in place\" triggers\n\n # using mutable=True will let fip change the value in aval\n fip = pfunc([In(a, mutable=True)], [a_out], mode=\"FAST_RUN\")\n aval = np.random.rand(10)\n aval2 = aval.copy()\n assert np.all(fip(aval) == (aval2 * 2))\n assert not np.all(aval == aval2)\n\n # using mutable=False should leave the input untouched\n f = pfunc([In(a, mutable=False)], [a_out], mode=\"FAST_RUN\")\n aval = np.random.rand(10)\n aval2 = aval.copy()\n assert np.all(f(aval) == (aval2 * 2))\n assert np.all(aval == aval2)\n\n def test_shared_mutable(self):\n bval = np.arange(5)\n b = shared(bval)\n b_out = b * 2\n\n # shared vars copy args.\n assert b.get_value(borrow=True) is not bval\n # so we do this to get at the underlying data\n bval = data_of(b)\n\n # by default, shared are not mutable unless doing an explicit update\n f = pfunc([], [b_out], mode=\"FAST_RUN\")\n assert (f() == np.arange(5) * 2).all()\n assert np.all(b.get_value(borrow=True) == np.arange(5))\n\n # using updates, b is now a mutable parameter\n f = pfunc([], [b_out], updates=[(b, b_out)], mode=\"FAST_RUN\")\n assert (f() == (np.arange(5) * 2)).all()\n # because of the update\n assert (b.get_value(borrow=True) == (np.arange(5) * 2)).all()\n assert (bval == (np.arange(5) * 2)).all() # because of mutable=True\n\n # do not depend on updates being in-place though!\n bval = np.arange(5)\n b.set_value(bval, borrow=True)\n bval = data_of(b)\n f = pfunc([], [b_out], updates=[(b, (b_out + 3))], mode=\"FAST_RUN\")\n assert (f() == (np.arange(5) * 2)).all()\n # because of the update\n assert (b.get_value(borrow=True) == ((np.arange(5) * 2) + 3)).all()\n # bval got modified to something...\n assert not (bval == np.arange(5)).all()\n # ... but not to b.value !\n assert not (bval == b.get_value(borrow=True)).all()\n\n def test_param_allow_downcast_int(self):\n a = tensor.wvector(\"a\") # int16\n b = tensor.bvector(\"b\") # int8\n c = tensor.bscalar(\"c\") # int8\n f = pfunc(\n [\n In(a, allow_downcast=True),\n In(b, allow_downcast=False),\n In(c, allow_downcast=None),\n ],\n (a + b + c),\n )\n\n # Both values are in range. Since they're not ndarrays (but lists),\n # they will be converted, and their value checked.\n assert np.all(f([3], [6], 1) == 10)\n\n # Values are in range, but a dtype too large has explicitly been given\n # For performance reasons, no check of the data is explicitly performed\n # (It might be OK to change this in the future.)\n with pytest.raises(TypeError):\n f([3], np.array([6], dtype=\"int16\"), 1)\n\n # Value too big for a, silently ignored\n assert np.all(f([2 ** 20], np.ones(1, dtype=\"int8\"), 1) == 2)\n\n # Value too big for b, raises TypeError\n with pytest.raises(TypeError):\n f([3], [312], 1)\n\n # Value too big for c, raises TypeError\n with pytest.raises(TypeError):\n f([3], [6], 806)\n\n def test_param_allow_downcast_floatX(self):\n a = tensor.fscalar(\"a\")\n b = tensor.fscalar(\"b\")\n c = tensor.fscalar(\"c\")\n\n f = pfunc(\n [\n In(a, allow_downcast=True),\n In(b, allow_downcast=False),\n In(c, allow_downcast=None),\n ],\n (a + b + c),\n )\n\n # If the values can be accurately represented, everything is OK\n assert np.all(f(0, 0, 0) == 0)\n\n # If allow_downcast is True, idem\n assert np.allclose(f(0.1, 0, 0), 0.1)\n\n # If allow_downcast is False, nope\n with pytest.raises(TypeError):\n f(0, 0.1, 0)\n\n # If allow_downcast is None, it should work iff floatX=float32\n if config.floatX == \"float32\":\n assert np.allclose(f(0, 0, 0.1), 0.1)\n else:\n with pytest.raises(TypeError):\n f(0, 0, 0.1)\n\n def test_param_allow_downcast_vector_floatX(self):\n a = tensor.fvector(\"a\")\n b = tensor.fvector(\"b\")\n c = tensor.fvector(\"c\")\n\n f = pfunc(\n [\n In(a, allow_downcast=True),\n In(b, allow_downcast=False),\n In(c, allow_downcast=None),\n ],\n (a + b + c),\n )\n\n # If the values can be accurately represented, everything is OK\n z = [0]\n assert np.all(f(z, z, z) == 0)\n\n # If allow_downcast is True, idem\n assert np.allclose(f([0.1], z, z), 0.1)\n\n # If allow_downcast is False, nope\n with pytest.raises(TypeError):\n f(z, [0.1], z)\n\n # If allow_downcast is None, like False\n with pytest.raises(TypeError):\n f(z, z, [0.1])\n\n def test_allow_input_downcast_int(self):\n a = tensor.wvector(\"a\") # int16\n b = tensor.bvector(\"b\") # int8\n c = tensor.bscalar(\"c\") # int8\n\n f = pfunc([a, b, c], (a + b + c), allow_input_downcast=True)\n # Value too big for a, b, or c, silently ignored\n assert f([2 ** 20], [1], 0) == 1\n assert f([3], [312], 0) == 59\n assert f([3], [1], 806) == 42\n\n g = pfunc([a, b, c], (a + b + c), allow_input_downcast=False)\n # All values are in range. Since they're not ndarrays (but lists\n # or scalars), they will be converted, and their value checked.\n assert np.all(g([3], [6], 0) == 9)\n\n # Values are in range, but a dtype too large has explicitly been given\n # For performance reasons, no check of the data is explicitly performed\n # (It might be OK to change this in the future.)\n with pytest.raises(TypeError):\n g([3], np.array([6], dtype=\"int16\"), 0)\n\n # Value too big for b, raises TypeError\n with pytest.raises(TypeError):\n g([3], [312], 0)\n\n h = pfunc([a, b, c], (a + b + c)) # Default: allow_input_downcast=None\n # Everything here should behave like with False\n assert np.all(h([3], [6], 0) == 9)\n with pytest.raises(TypeError):\n h([3], np.array([6], dtype=\"int16\"), 0)\n with pytest.raises(TypeError):\n h([3], [312], 0)\n\n def test_allow_downcast_floatX(self):\n a = tensor.fscalar(\"a\")\n b = tensor.fvector(\"b\")\n\n f = pfunc([a, b], (a + b), allow_input_downcast=True)\n g = pfunc([a, b], (a + b), allow_input_downcast=False)\n h = pfunc([a, b], (a + b), allow_input_downcast=None)\n\n # If the values can be accurately represented, OK\n assert np.all(f(0, [0]) == 0)\n assert np.all(g(0, [0]) == 0)\n assert np.all(h(0, [0]) == 0)\n\n # For the vector: OK iff allow_input_downcast is True\n assert np.allclose(f(0, [0.1]), 0.1)\n with pytest.raises(TypeError):\n g(0, [0.1])\n with pytest.raises(TypeError):\n h(0, [0.1])\n\n # For the scalar: OK if allow_input_downcast is True,\n # or None and floatX==float32\n assert np.allclose(f(0.1, [0]), 0.1)\n with pytest.raises(TypeError):\n g(0.1, [0])\n if config.floatX == \"float32\":\n assert np.allclose(h(0.1, [0]), 0.1)\n else:\n with pytest.raises(TypeError):\n h(0.1, [0])\n\n def test_update(self):\n # Test update mechanism in different settings.\n\n # Simple value assignment.\n x = shared(0)\n assign = pfunc([], [], updates={x: 3})\n assign()\n assert x.get_value() == 3\n\n # Basic increment function.\n x.set_value(0)\n inc = pfunc([], [], updates={x: x + 1})\n inc()\n assert x.get_value() == 1\n\n # Increment by a constant value.\n x.set_value(-1)\n y = shared(2)\n inc_by_y = pfunc([], [], updates={x: x + y})\n inc_by_y()\n assert x.get_value() == 1\n\n def test_update_err_broadcast(self):\n # Test that broadcastable dimensions raise error\n data = np.random.rand(10, 10).astype(\"float32\")\n output_var = shared(name=\"output\", value=data)\n\n # the update_var has type matrix, and the update expression\n # is a broadcasted scalar, and that should be allowed.\n with pytest.raises(TypeError):\n aesara.function(\n inputs=[],\n outputs=[],\n updates={output_var: output_var.sum().dimshuffle(\"x\", \"x\")},\n )\n\n def test_duplicate_updates(self):\n x, y = dmatrices(\"x\", \"y\")\n z = shared(np.ones((2, 3)))\n with pytest.raises(ValueError):\n aesara.function([x, y], [z], updates=[(z, (z + x + y)), (z, (z - x))])\n\n def test_givens(self):\n x = shared(0)\n assign = pfunc([], x, givens={x: 3})\n assert assign() == 3\n assert x.get_value(borrow=True) == 0\n\n y = tensor.ivector()\n f = pfunc([y], (y * x), givens={x: 6})\n assert np.all(f([1, 1, 1]) == [6, 6, 6])\n assert x.get_value() == 0\n\n z = tensor.ivector()\n c = z * y\n f = pfunc([y], (c + 7), givens={z: aesara._asarray([4, 4, 4], dtype=\"int32\")})\n assert np.all(f([1, 1, 1]) == [11, 11, 11])\n assert x.get_value() == 0\n\n def test_clone0(self):\n x = shared(np.asarray([4, 4, 4]))\n y = shared(np.asarray([4, 4, 4]))\n z = shared(np.asarray([2, 2, 2]))\n up = pfunc(\n [], [], updates={x: (x * 5), y: ((x * 5) + y), z: (((x * 5) + y) ** z)}\n )\n\n up()\n assert np.all(x.get_value() == 20)\n assert np.all(y.get_value() == 24)\n assert np.all(z.get_value() == (24 ** 2))\n\n def test_default_updates(self):\n x = shared(0)\n x.default_update = x + 1\n\n f = pfunc([], [x])\n f()\n assert x.get_value() == 1\n\n del x.default_update\n f()\n assert x.get_value() == 2\n\n g = pfunc([], [x])\n g()\n assert x.get_value() == 2\n\n def test_no_default_updates(self):\n x = shared(0)\n y = shared(1)\n x.default_update = x + 2\n\n # Test that the default update is taken into account in the right cases\n f1 = pfunc([], [x], no_default_updates=True)\n f1()\n assert x.get_value() == 0\n\n f2 = pfunc([], [x], no_default_updates=[x])\n f2()\n assert x.get_value() == 0\n\n f3 = pfunc([], [x], no_default_updates=[x, y])\n f3()\n assert x.get_value() == 0\n\n f4 = pfunc([], [x], no_default_updates=[y])\n f4()\n assert x.get_value() == 2\n\n f5 = pfunc([], [x], no_default_updates=[])\n f5()\n assert x.get_value() == 4\n\n f5 = pfunc([], [x], no_default_updates=False)\n f5()\n assert x.get_value() == 6\n\n with pytest.raises(TypeError):\n pfunc([], [x], no_default_updates=(x))\n with pytest.raises(TypeError):\n pfunc([], [x], no_default_updates=x)\n with pytest.raises(TypeError):\n pfunc([], [x], no_default_updates=\"canard\")\n\n # Mix explicit updates and no_default_updates\n g1 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=True)\n g1()\n assert x.get_value() == 5\n\n g2 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[x])\n g2()\n assert x.get_value() == 4\n\n g3 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[x, y])\n g3()\n assert x.get_value() == 3\n\n g4 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[y])\n g4()\n assert x.get_value() == 2\n\n g5 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=[])\n g5()\n assert x.get_value() == 1\n\n g5 = pfunc([], [x], updates=[(x, (x - 1))], no_default_updates=False)\n g5()\n assert x.get_value() == 0\n\n def test_default_updates_expressions(self):\n x = shared(0)\n y = shared(1)\n a = lscalar(\"a\")\n\n z = a * x\n x.default_update = x + y\n\n f1 = pfunc([a], z)\n f1(12)\n assert x.get_value() == 1\n\n f2 = pfunc([a], z, no_default_updates=True)\n assert f2(7) == 7\n assert x.get_value() == 1\n\n f3 = pfunc([a], z, no_default_updates=[x])\n assert f3(9) == 9\n assert x.get_value() == 1\n\n def test_default_updates_multiple(self):\n x = shared(0)\n y = shared(1)\n\n x.default_update = x - 1\n y.default_update = y + 1\n\n f1 = pfunc([], [x, y])\n f1()\n assert x.get_value() == -1\n assert y.get_value() == 2\n\n f2 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=[y])\n f2()\n assert x.get_value() == -3\n assert y.get_value() == 2\n\n f3 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=True)\n f3()\n assert x.get_value() == -5\n assert y.get_value() == 2\n\n f4 = pfunc([], [x, y], updates=[(y, (y - 2))])\n f4()\n assert x.get_value() == -6\n assert y.get_value() == 0\n\n def test_default_updates_chained(self):\n x = shared(2)\n y = shared(1)\n z = shared(-1)\n\n x.default_update = x - y\n y.default_update = z\n z.default_update = z - 1\n\n f1 = pfunc([], [x])\n f1()\n assert x.get_value() == 1\n assert y.get_value() == -1\n assert z.get_value() == -2\n\n f2 = pfunc([], [x, y])\n f2()\n assert x.get_value() == 2\n assert y.get_value() == -2\n assert z.get_value() == -3\n\n f3 = pfunc([], [y])\n f3()\n assert x.get_value() == 2\n assert y.get_value() == -3\n assert z.get_value() == -4\n\n f4 = pfunc([], [x, y], no_default_updates=[x])\n f4()\n assert x.get_value() == 2\n assert y.get_value() == -4\n assert z.get_value() == -5\n\n f5 = pfunc([], [x, y, z], no_default_updates=[z])\n f5()\n assert x.get_value() == 6\n assert y.get_value() == -5\n assert z.get_value() == -5\n\n def test_default_updates_input(self):\n x = shared(0)\n y = shared(1)\n if aesara.configdefaults.python_int_bitwidth() == 32:\n a = iscalar(\"a\")\n else:\n a = lscalar(\"a\")\n\n x.default_update = y\n y.default_update = y + a\n\n f1 = pfunc([], x, no_default_updates=True)\n f1()\n assert x.get_value() == 0\n assert y.get_value() == 1\n\n f2 = pfunc([], x, no_default_updates=[x])\n f2()\n assert x.get_value() == 0\n assert y.get_value() == 1\n\n f3 = pfunc([], x, no_default_updates=[y])\n f3()\n assert x.get_value() == 1\n assert y.get_value() == 1\n\n f4 = pfunc([a], x)\n f4(2)\n assert x.get_value() == 1\n assert y.get_value() == 3\n\n f5 = pfunc([], x, updates={y: (y - 1)})\n f5()\n assert x.get_value() == 3\n assert y.get_value() == 2\n\n # a is needed as input if y.default_update is used\n with pytest.raises(aesara.gof.MissingInputError):\n pfunc([], x)\n\n def test_default_updates_partial_graph(self):\n a = shared(0)\n a.default_update = a + 1 # Increment a each time it is used\n b = 2 * a\n # Use only the tip of the graph, a is not used\n f = pfunc([b], b)\n assert a.get_value() == 0\n f(21)\n assert a.get_value() == 0\n\n def test_givens_replaces_shared_variable(self):\n a = shared(1.0, \"a\")\n a.default_update = a + 3.0\n b = tensor.dscalar(\"b\")\n c = a + 10\n f = pfunc([b], c, givens={a: b})\n\n assert len(f.maker.fgraph.inputs) == 1\n assert len(f.maker.fgraph.outputs) == 1\n\n def test_givens_replaces_shared_variable2(self):\n a = shared(1.0, \"a\")\n a.default_update = a + 3\n c = a + 10\n f = pfunc([], c, givens={a: (a + 10)})\n\n assert f() == 21\n assert f() == 34\n\n def test_duplicate_inputs(self):\n x = aesara.tensor.lscalar(\"x\")\n with pytest.raises(aesara.compile.UnusedInputError):\n aesara.function([x, x, x], x)\n\n def test_update_same(self):\n # There was a bug in CVM, triggered when a shared variable\n # was its own update expression.\n a = shared(1.0, \"a\")\n b = shared(np.ones((2, 3)), \"b\")\n\n # The order of the variables is not determined, so we try\n # both shared variables.\n # TODO: explain the above comment. By \"not determined\" does\n # this mean \"not deterministic\"?\n # This test originally wrote the updates using dictionaries,\n # and iterating over the dictionary was not deterministic.\n # Is that all the comment above meant, or is the CVM intended\n # to add extra non-determinism? Or is the CVM meant to\n # deterministically but arbitrarily pick an order for the updates?\n f = aesara.function([], [], updates=[(a, a), (b, (2 * b))])\n g = aesara.function([], [], updates=[(a, (a * 2)), (b, b)])\n\n f()\n assert a.get_value(borrow=True).shape == (), a.get_value()\n assert b.get_value(borrow=True).shape == (2, 3), b.get_value()\n g()\n assert a.get_value(borrow=True).shape == (), a.get_value()\n assert b.get_value(borrow=True).shape == (2, 3), b.get_value()\n\n def test_update_equiv(self):\n # Like test_update_same, but the update expression is simplified until\n # it is found to be equal to the original variable\n a = shared(1.0, \"a\")\n b = shared(np.ones((2, 3)), \"b\")\n\n # See comment in test_update_same about why we try both\n # shared variables.\n f = aesara.function([], [], updates=[(a, a), (b, (2 * b - b))])\n g = aesara.function([], [], updates=[(a, (a * 2 - a)), (b, b)])\n\n f()\n assert a.get_value(borrow=True).shape == (), a.get_value()\n assert b.get_value(borrow=True).shape == (2, 3), b.get_value()\n g()\n assert a.get_value(borrow=True).shape == (), a.get_value()\n assert b.get_value(borrow=True).shape == (2, 3), b.get_value()\n\n\nclass TestAliasingRules:\n # 1. Aesara manages its own memory space, which typically does not overlap\n # with the memory of normal python variables that the user uses.\n #\n # 2. shared variables are allocated in this memory space, as are the\n # temporaries used for Function evalution.\n #\n # 3. Physically, this managed memory space may be spread across the host,\n # on a GPU device(s), or even on a remote machine.\n #\n # 4. Aesara assumes that shared variables are never aliased to one another,\n # and tries to make it impossible to accidentally alias them.\n #\n # 5. Aesara's managed data is constant while Aesara Functions are not running\n # and aesara library code is not running.\n #\n # 6. The default behaviour of Function is to return user-space values for\n # outputs, but this can be overridden (borrow=True) for better performance,\n # in which case the returned value may be aliased to managed memory, and\n # potentially invalidated by the next Aesara Function call or call to aesara\n # library code.\n\n def shared(self, x):\n return tensor._shared(x)\n\n def test_shared_constructor_copies(self):\n # shared constructor makes copy\n # (rule #2)\n orig_a = np.zeros((2, 2))\n A = self.shared(orig_a)\n assert not np.may_share_memory(orig_a, data_of(A))\n\n # rule #2 reading back from aesara-managed memory\n assert not np.may_share_memory(A.get_value(borrow=False), data_of(A))\n\n def test_sparse_input_aliasing_affecting_inplace_operations(self):\n sp = pytest.importorskip(\"scipy\", minversion=\"0.7.0\")\n\n from aesara import sparse\n\n # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,\n # you need to make in inputs mutable (so that inplace\n # operations are used) and to break the elemwise composition\n # with some non-elemwise op (here dot)\n\n x = sparse.SparseType(\"csc\", dtype=\"float64\")()\n y = sparse.SparseType(\"csc\", dtype=\"float64\")()\n f = aesara.function(\n [aesara.In(x, mutable=True), aesara.In(y, mutable=True)], (x + y) + (x + y)\n )\n # Test 1. If the same variable is given twice\n\n # Compute bogus values\n m = sp.sparse.csc_matrix(\n np.asarray(\n [\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ],\n dtype=\"float64\",\n )\n )\n bogus_vals = f(m, m)\n # Since we used inplace operation v and m may be corrupted\n # so we need to recreate them\n\n m = sp.sparse.csc_matrix(\n np.asarray(\n [\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ],\n dtype=\"float64\",\n )\n )\n m_copy = m.copy()\n vals = f(m, m_copy)\n\n assert np.allclose(vals.todense(), bogus_vals.todense())\n\n def test_input_aliasing_affecting_inplace_operations(self):\n\n # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,\n # you need to make in inputs mutable (so that inplace\n # operations are used) and to break the elemwise composition\n # with some non-elemwise op (here dot)\n x = aesara.tensor.dvector()\n y = aesara.tensor.dvector()\n m1 = aesara.tensor.dmatrix()\n m2 = aesara.tensor.dmatrix()\n f = aesara.function(\n [\n aesara.In(x, mutable=True),\n aesara.In(y, mutable=True),\n aesara.In(m1, mutable=True),\n aesara.In(m2, mutable=True),\n ],\n aesara.dot((x * 2), m1) + aesara.dot((y * 3), m2),\n )\n # Test 1. If the same variable is given twice\n\n # Compute bogus values\n v = np.asarray([1, 2, 3, 4, 5], dtype=\"float64\")\n m = np.asarray(\n [\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ],\n dtype=\"float64\",\n )\n bogus_vals = f(v, v, m, m)\n # Since we used inplace operation v and m may be corrupted\n # so we need to recreate them\n\n v = np.asarray([1, 2, 3, 4, 5], dtype=\"float64\")\n m = np.asarray(\n [\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ],\n dtype=\"float64\",\n )\n m_copy = m.copy()\n v_copy = v.copy()\n vals = f(v, v_copy, m, m_copy)\n\n assert np.allclose(vals, bogus_vals)\n\n def test_partial_input_aliasing_affecting_inplace_operations(self):\n\n # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,\n # you need to make in inputs mutable ( so that inplace\n # operations are used) and to break the elemwise composition\n # with some non-elemwise op ( here dot )\n x = aesara.tensor.dvector()\n y = aesara.tensor.dvector()\n z = aesara.tensor.dvector()\n m1 = aesara.tensor.dmatrix()\n m2 = aesara.tensor.dmatrix()\n m3 = aesara.tensor.dmatrix()\n\n # Test 2. If variables only partial overlap\n # more exactly we care about the case when we have a,b,c\n # and a shares memory with b, b shares memory with c, but\n # c does not share memory with a\n\n f = aesara.function(\n [\n aesara.In(x, mutable=True),\n aesara.In(y, mutable=True),\n aesara.In(z, mutable=True),\n aesara.In(m1, mutable=True),\n aesara.In(m2, mutable=True),\n aesara.In(m3, mutable=True),\n ],\n (\n aesara.dot((x * 2), m1)\n + aesara.dot((y * 3), m2)\n + aesara.dot((z * 4), m3)\n ),\n )\n\n # Compute bogus values\n v = np.asarray([1, 2, 3, 4, 5], dtype=\"float64\")\n m = np.asarray([[1, 0], [0, 1]], dtype=\"float64\")\n bogus_vals = f(v[:2], v[1:3], v[2:4], m, m, m)\n # Since we used inplace operation v and m may be corrupted\n # so we need to recreate them\n\n v = np.asarray([1, 2, 3, 4, 5], dtype=\"float64\")\n m = np.asarray([[1, 0], [0, 1]], dtype=\"float64\")\n m_copy1 = m.copy()\n v_copy1 = v.copy()\n m_copy2 = m.copy()\n v_copy2 = v.copy()\n vals = f(v[:2], v_copy1[1:3], v_copy2[2:4], m, m_copy1, m_copy2)\n\n assert np.allclose(vals, bogus_vals)\n\n def test_potential_output_aliasing_induced_by_updates(self):\n\n A = self.shared(np.zeros((2, 2)))\n B = self.shared(np.zeros((2, 2)))\n C = np.zeros((2, 2))\n D = tensor.dmatrix()\n DD = D + 5\n\n f = pfunc([D], [], updates=[(A, D), (B, D)])\n f(C)\n\n assert not np.may_share_memory(data_of(A), data_of(B))\n f = pfunc([D], [], updates=[(A, D[:]), (B, D)])\n f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n f = pfunc([D], [], updates=[(A, (D + 5)), (B, D[:])])\n f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n\n f = pfunc([D], [], updates=[(A, (D + 5)), (B, D)])\n f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n\n f = pfunc([D], DD, updates=[(A, DD[:1]), (B, DD)])\n R = f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n assert not np.may_share_memory(R, data_of(B))\n assert not np.may_share_memory(R, data_of(A))\n\n f = pfunc([D], DD, updates=[(A, DD[:1]), (B, (DD[:1] * 2))])\n R = f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n assert not np.may_share_memory(R, data_of(B))\n assert not np.may_share_memory(R, data_of(A))\n\n f = pfunc([D], (DD * 4), updates=[(A, (DD[:1] * 3)), (B, (DD[:1] * 2))])\n R = f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n assert not np.may_share_memory(R, data_of(B))\n assert not np.may_share_memory(R, data_of(A))\n\n f = pfunc([D], (DD * 4), updates=[(A, (DD[:1] * 3)), (B, (DD[:1] * 3))])\n R = f(C)\n assert not np.may_share_memory(data_of(A), data_of(B))\n assert not np.may_share_memory(R, data_of(B))\n assert not np.may_share_memory(R, data_of(A))\n\n def test_no_aliasing_0(self):\n # B is a shared variable, A is updated with B's contents\n # we need A to be copied to avoid aliasing\n A = self.shared(np.zeros((2, 2)) + 0.5)\n B = self.shared(np.zeros((2, 2)) - 0.5)\n f = pfunc([], [], updates=[(A, B)])\n f()\n assert not np.may_share_memory(data_of(A), data_of(B))\n\n def test_no_aliasing_1(self):\n # B is a shared variable, A is updated with B's contents\n # since B is being updated as well, we don't need to copy anything\n # to avoid aliasing shared variables.\n A = self.shared(np.zeros((2, 2)) + 0.5)\n B = self.shared(np.zeros((2, 2)) - 0.5)\n C = tensor.dmatrix()\n f = pfunc([C], [], updates=[(A, B), (B, C)])\n z = np.zeros((2, 2))\n f(z)\n assert not np.may_share_memory(data_of(A), data_of(B))\n # Aesara tries to maintain its own memory space.\n assert not np.may_share_memory(z, data_of(B))\n assert np.all(data_of(B) == z)\n\n def test_no_aliasing_2(self):\n # B and A take one another's values\n # no copying is necessary since each one is updated.\n orig_a = np.zeros((2, 2)) + 0.5\n orig_b = np.zeros((2, 2)) - 0.5\n A = self.shared(orig_a)\n B = self.shared(orig_b)\n\n data_of_a = data_of(A)\n data_of_b = data_of(B)\n\n f = pfunc([], [], updates=[(A, B), (B, A)])\n f()\n # correctness\n assert np.all(data_of(A) == -0.5)\n assert np.all(data_of(B) == +0.5)\n\n # shared vars may not be aliased\n assert not np.may_share_memory(data_of(A), data_of(B))\n\n # aesara should have been smart enough to not make copies\n assert np.may_share_memory(data_of(A), data_of_b)\n assert np.may_share_memory(data_of(B), data_of_a)\n\n def test_no_aliasing_2b(self):\n # B and A take one another's values\n # no copying is necessary since each one is updated.\n # The twist one `test_no_aliasing_2` is that each shared var is updated\n # with a view of the other one.\n\n orig_a = np.zeros((2, 2)) + 0.5\n orig_b = np.zeros((2, 2)) - 0.5\n A = self.shared(orig_a)\n B = self.shared(orig_b)\n\n data_of_a = data_of(A)\n data_of_b = data_of(B)\n\n f = pfunc([], [], updates=[(A, B[:, ::-1]), (B, A.T)])\n # aesara.printing.debugprint(f)\n f()\n # correctness (doesn't actually test the view...)\n assert np.all(data_of(A) == -0.5)\n assert np.all(data_of(B) == +0.5)\n\n # shared vars may not be aliased\n assert not np.may_share_memory(data_of(A), data_of(B))\n\n # aesara should have been smart enough to not make copies\n if aesara.config.mode not in [\"DebugMode\", \"DEBUG_MODE\", \"FAST_COMPILE\"]:\n # We don't ask DebugMode and FAST_COMPILE not to make copy.\n # We have the right to do so.\n assert np.all(data_of(A) < 5)\n data_of_b += 10\n assert np.all(data_of(A) > 5)\n data_of_b -= 10\n\n assert np.all(data_of(B) < 5)\n data_of_a += 10\n assert np.all(data_of(B) > 5)\n data_of_a -= 10\n\n # N.B. may_share_memory is what we mean, but does it work?\n assert np.may_share_memory(data_of(A), data_of_b)\n assert np.may_share_memory(data_of(B), data_of_a)\n\n # N.B. This pattern could form a memory leak - each shared\n # variable always points to a view, and that view gets\n # further and further from the (e.g. data_of_a) with each\n # call. The memory leak is in the increasing number of view\n # objects forming a chain to the underlying data.\n\n\nclass TestRebuildStrict:\n def test_rebuild_strict(self):\n # Test fix for error reported at\n # https://groups.google.com/d/topic/aesara-users/BRK0UEB72XA/discussion\n w = tensor.imatrix()\n x, y = tensor.ivectors(\"x\", \"y\")\n z = x * y\n f = aesara.function([w, y], z, givens=[(x, w)], rebuild_strict=False)\n z_val = f(np.ones((3, 5), dtype=\"int32\"), np.arange(5, dtype=\"int32\"))\n assert z_val.ndim == 2\n assert np.all(z_val == np.ones((3, 5)) * np.arange(5))\n",
"import itertools\n\nimport numpy as np\n\nimport aesara\nfrom aesara import config, tensor\nfrom aesara.gpuarray import gpuarray_shared_constructor\nfrom aesara.gpuarray.blas import (\n GpuGemm,\n GpuGer,\n gpu_dot22,\n gpugemm_inplace,\n gpugemm_no_inplace,\n gpugemmbatch_inplace,\n gpugemv_inplace,\n gpugemv_no_inplace,\n gpuger_inplace,\n gpuger_no_inplace,\n)\nfrom aesara.tensor.blas import _dot22, batched_dot, gemm_inplace, gemv, gemv_inplace\nfrom tests import unittest_tools as utt\nfrom tests.gpuarray.config import mode_with_gpu, test_ctx_name\nfrom tests.gpuarray.test_basic_ops import makeTester, rand\nfrom tests.tensor.test_blas import BaseGemv, TestGer\n\n\nTestGpuGemv = makeTester(\n \"GpuGemvTester\",\n op=gemv_inplace,\n gpu_op=gpugemv_inplace,\n # It doesn't support float16\n cases=dict(\n dot_vv=[rand(1), 1.0, rand(1, 2), rand(2), 0.0],\n dot_vm=[rand(3), 1.0, rand(3, 2), rand(2), 0.0],\n float32=[\n rand(3).astype(\"float32\"),\n np.float32(1),\n rand(3, 2).astype(\"float32\"),\n rand(2).astype(\"float32\"),\n np.float32(0),\n ],\n float64=[\n rand(3).astype(\"float64\"),\n np.float64(1),\n rand(3, 2).astype(\"float64\"),\n rand(2).astype(\"float64\"),\n np.float64(0),\n ],\n # test_02=[rand(0), 1, rand(0, 2), rand(2), 0],\n # test_30=[rand(3), 1, rand(3, 0), rand(0), 0],\n # test_00=[rand(0), 1, rand(0, 0), rand(0), 0],\n test_stride=[rand(3)[::-1], 1.0, rand(3, 2)[::-1], rand(2)[::-1], 0.0],\n ),\n)\n\n\ndef test_float16():\n # gemv (gemm called)\n float16_data = [\n rand(3).astype(\"float16\"),\n np.asarray(1, dtype=np.float32),\n rand(3, 3).astype(\"float16\"),\n rand(3).astype(\"float16\"),\n np.asarray(0.5, dtype=np.float32),\n ]\n float16_shared = [\n gpuarray_shared_constructor(val, target=test_ctx_name) for val in float16_data\n ]\n o = gemv(*float16_shared)\n f = aesara.function([], o, mode=mode_with_gpu)\n y, alpha, A, x, beta = float16_data\n out = f()\n utt.assert_allclose(np.asarray(out), alpha * np.dot(A, x) + beta * y)\n topo = f.maker.fgraph.toposort()\n assert any([isinstance(n.op, GpuGemm) for n in topo])\n\n # gemm\n float16_data = [\n rand(3, 3).astype(\"float16\"),\n np.asarray(1, dtype=np.float32),\n rand(3, 3).astype(\"float16\"),\n rand(3, 3).astype(\"float16\"),\n np.asarray(0.5, dtype=np.float32),\n ]\n float16_shared = [\n gpuarray_shared_constructor(val, target=test_ctx_name) for val in float16_data\n ]\n o = gpugemm_no_inplace(*float16_shared)\n f = aesara.function([], o)\n y, alpha, A, x, beta = float16_data\n out = f()\n utt.assert_allclose(np.asarray(out), alpha * np.dot(A, x) + beta * y)\n\n # dot22\n float16_data = [rand(3, 3).astype(\"float16\"), rand(3, 3).astype(\"float16\")]\n\n float16_shared = [gpuarray_shared_constructor(val) for val in float16_data]\n o = gpu_dot22(*float16_shared)\n f = aesara.function([], o)\n x, y = float16_data\n out = f()\n utt.assert_allclose(np.asarray(out), np.dot(x, y))\n\n\nclass TestGpuSgemv(BaseGemv, utt.OptimizationTestMixin):\n mode = mode_with_gpu\n dtype = \"float32\"\n\n gemv = gpugemv_no_inplace\n gemv_inplace = gpugemv_inplace\n\n @staticmethod\n def shared(val):\n try:\n return gpuarray_shared_constructor(val)\n except TypeError:\n return aesara.shared(val)\n\n\nTestGpuGemm = makeTester(\n \"GpuGemmTester\",\n op=gemm_inplace,\n gpu_op=gpugemm_inplace,\n # float16 tested in test_float16\n cases=dict(\n test1=[rand(3, 4), 1.0, rand(3, 5), rand(5, 4), 0.0],\n test2=[rand(3, 4), 1.0, rand(3, 5), rand(5, 4), 1.0],\n test3=[rand(3, 4), 1.0, rand(3, 5), rand(5, 4), -1.0],\n test4=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), 0.0],\n test5=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), 0.6],\n test6=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), -1.0],\n test7=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), 0.0],\n test8=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), 1.1],\n float32=[\n rand(3, 4).astype(\"float32\"),\n np.float32(-1.0),\n rand(3, 5).astype(\"float32\"),\n rand(5, 4).astype(\"float32\"),\n np.float32(-1.1),\n ],\n float64=[\n rand(3, 4).astype(\"float64\"),\n np.float64(-1.0),\n rand(3, 5).astype(\"float64\"),\n rand(5, 4).astype(\"float64\"),\n np.float64(-1.1),\n ],\n # test10=[rand(0, 4), -1.0, rand(0, 5), rand(5, 4), 0.0],\n # test11=[rand(3, 0), -1.0, rand(3, 5), rand(5, 0), 1.1],\n # test12=[rand(3, 4), -1.0, rand(3, 0), rand(0, 4), -1.1],\n # test13=[rand(0, 0), -1.0, rand(0, 0), rand(0, 0), -1.1],\n ),\n)\n\n\ngemm_batched_tests = {\n \"test_b%im%ik%in%i\"\n % (b, m, k, n): [rand(b, m, n), rand(), rand(b, m, k), rand(b, k, n), rand()]\n for b, m, k, n in itertools.combinations([2, 3, 5, 7, 11, 13], 4)\n}\n\ngemm_batched_tests[\"float16\"] = [\n rand(3, 4, 7).astype(\"float16\"),\n rand().astype(\"float16\"),\n rand(3, 4, 4).astype(\"float16\"),\n rand(3, 4, 7).astype(\"float16\"),\n rand().astype(\"float16\"),\n]\ngemm_batched_tests[\"float32\"] = [\n rand(3, 4, 7).astype(\"float32\"),\n rand().astype(\"float32\"),\n rand(3, 4, 4).astype(\"float32\"),\n rand(3, 4, 7).astype(\"float32\"),\n rand().astype(\"float32\"),\n]\ngemm_batched_tests[\"float64\"] = [\n rand(3, 4, 7).astype(\"float64\"),\n rand().astype(\"float64\"),\n rand(3, 4, 4).astype(\"float64\"),\n rand(3, 4, 7).astype(\"float64\"),\n rand().astype(\"float64\"),\n]\n\n\nTestGpuGemmBatch = makeTester(\n \"GpuGemmBatchTester\",\n op=lambda z, alpha, x, y, beta: alpha * batched_dot(x, y) + beta * z,\n gpu_op=gpugemmbatch_inplace,\n cases=gemm_batched_tests,\n)\n\n\nclass TestGpuGemmBatchStrided:\n def test_basic(self):\n # Reported in https://github.com/Aesara/Aesara/issues/5730\n x = tensor.tensor3()\n y = tensor.tensor3()\n z = tensor.batched_dot(x, y[:, 0, :, np.newaxis])\n f = aesara.function([x, y], z, mode=mode_with_gpu)\n x_num = np.arange(32 * 19 * 600, dtype=config.floatX).reshape((32, 19, 600))\n y_num = np.arange(7 * 32 * 600, dtype=config.floatX).reshape((32, 7, 600))\n f(x_num, y_num)\n assert f.maker.fgraph.toposort()[-2].op.inplace\n\n\nclass TestGpuSger(TestGer):\n def setup_method(self):\n self.mode = mode_with_gpu\n dtype = self.dtype = \"float32\" # optimization isn't dtype-dependent\n self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))\n self.a = tensor.tensor(dtype=dtype, broadcastable=())\n self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))\n self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))\n self.ger_destructive = gpuger_inplace\n\n # data on the gpu make the op always inplace\n self.ger = gpuger_inplace\n self.gemm = gpugemm_inplace\n super().setup_method()\n\n\nclass TestGpuSgerNoTransfer(TestGpuSger):\n shared = staticmethod(gpuarray_shared_constructor)\n\n\nclass TestGpuGer_OpContract(utt.OpContractTestMixin):\n def setup_method(self):\n self.ops = [gpuger_no_inplace, gpuger_inplace]\n\n def clone(self, op):\n return GpuGer(inplace=op.inplace)\n\n\nTestGpuDot22 = makeTester(\n \"GpuDot22Tester\",\n op=_dot22,\n gpu_op=gpu_dot22,\n cases=dict(\n test1=[rand(3, 4), rand(4, 5)],\n test2=[rand(1, 4), rand(4, 5)],\n test3=[rand(3, 1), rand(1, 5)],\n test4=[rand(3, 4), rand(4, 1)],\n # test5=[rand(0, 4), rand(4, 5)],\n # test6=[rand(3, 0), rand(0, 5)],\n # test7=[rand(3, 4), rand(4, 0)],\n # test8=[rand(0, 4), rand(4, 0)],\n # test9=[rand(0, 0), rand(0, 0)],\n ),\n)\n\n\ndef test_gemv_zeros():\n W = tensor.matrix()\n v = tensor.vector()\n f = aesara.function([W, v], W.dot(v), mode=mode_with_gpu)\n\n # Apply to an empty matrix shape (5,0) and an empty vector shape (0,)\n dim = 1000\n A = np.zeros((dim, 0), dtype=aesara.config.floatX)\n b = np.zeros((0,), dtype=aesara.config.floatX)\n tmp = f(A, b)\n assert np.allclose(tmp, np.zeros((dim,)))\n\n\ndef test_gemv_dot_strides():\n # Reported in https://github.com/Aesara/Aesara/issues/6142\n xv = rand(5)\n yv = rand(5, 1)\n x = gpuarray_shared_constructor(xv)\n y = gpuarray_shared_constructor(yv, broadcastable=(False, True))\n f = aesara.function([], tensor.dot(x, y[::-1]), mode=mode_with_gpu)\n out = f()\n utt.assert_allclose(out, np.dot(xv, yv[::-1]))\n",
"import numpy as np\nimport pytest\n\nimport aesara\nimport aesara.gpuarray\nimport aesara.tensor.slinalg as slinalg\nfrom aesara import tensor\nfrom aesara.breakpoint import PdbBreakpoint\nfrom aesara.gof.opt import check_stack_trace\nfrom aesara.gpuarray import basic_ops, blas, dnn, opt\nfrom aesara.gpuarray.basic_ops import (\n GpuAlloc,\n GpuAllocEmpty,\n GpuFromHost,\n GpuReshape,\n HostFromGpu,\n host_from_gpu,\n)\nfrom aesara.gpuarray.blas import GpuGemm\nfrom aesara.gpuarray.dnn import GpuDnnReduction\nfrom aesara.gpuarray.elemwise import (\n Elemwise,\n GpuCAReduceCPY,\n GpuCAReduceCuda,\n GpuElemwise,\n max_inputs_to_GpuElemwise,\n)\nfrom aesara.gpuarray.linalg import GpuCholesky, GpuCusolverSolve, cusolver_available\nfrom aesara.gpuarray.subtensor import GpuSubtensor\nfrom aesara.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\nfrom aesara.tensor.nnet import abstract_conv\nfrom tests import unittest_tools as utt\nfrom tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\nfrom tests.tensor.test_basic import TestSpecifyShape\nfrom tests.test_ifelse import TestIfelse\n\n\ndef _check_stack_trace(thing):\n def _ops_to_check(op):\n if not isinstance(op, aesara.gof.Op):\n op = op.op # assume it is an apply node\n return not isinstance(\n op,\n (\n aesara.compile.ops.Shape_i,\n aesara.compile.ops.Shape,\n aesara.compile.ops.DeepCopyOp,\n aesara.tensor.opt.MakeVector,\n aesara.tensor.subtensor.Subtensor,\n aesara.tensor.elemwise.Elemwise,\n aesara.ifelse.IfElse,\n GpuFromHost,\n HostFromGpu,\n ),\n )\n\n return check_stack_trace(thing, ops_to_check=_ops_to_check, bug_print=\"ignore\")\n\n\ndef test_local_assert():\n x = aesara.tensor.fmatrix()\n a = aesara.tensor.opt.assert_op(x, aesara.tensor.eq(x, 0).any())\n f = aesara.function([x], a, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n a_op = [n for n in topo if isinstance(n.op, aesara.tensor.opt.Assert)]\n assert len(a_op) == 1\n assert isinstance(a_op[0].inputs[0].type, GpuArrayType)\n\n\ndef test_local_remove_all_assert():\n x = aesara.tensor.fmatrix()\n a = aesara.tensor.opt.assert_op(x, aesara.tensor.eq(x, 0).any())\n\n # By default `unsafe` should not be there\n f = aesara.function([x], a, mode=mode_with_gpu.excluding(\"unsafe\"))\n topo = f.maker.fgraph.toposort()\n a_op = [n for n in topo if isinstance(n.op, aesara.tensor.opt.Assert)]\n assert len(a_op) == 1\n\n # Put `unsafe`\n f = aesara.function([x], a, mode=mode_with_gpu.including(\"unsafe\"))\n topo = f.maker.fgraph.toposort()\n a_op = [n for n in topo if isinstance(n.op, aesara.tensor.opt.Assert)]\n assert len(a_op) == 0\n\n # Remove `unsafe`\n f = aesara.function([x], a, mode=mode_with_gpu.excluding(\"unsafe\"))\n topo = f.maker.fgraph.toposort()\n a_op = [n for n in topo if isinstance(n.op, aesara.tensor.opt.Assert)]\n assert len(a_op) == 1\n\n\ndef test_local_gpu_contiguous_gpu_contiguous():\n a = tensor.fmatrix()\n o1 = basic_ops.gpu_contiguous(a)\n o2 = basic_ops.gpu_contiguous(o1)\n f1 = aesara.function([a], o1, mode=mode_with_gpu)\n f2 = aesara.function([a], o2, mode=mode_with_gpu)\n assert 1 == len(\n [\n node\n for node in f1.maker.fgraph.toposort()\n if isinstance(node.op, basic_ops.GpuContiguous)\n ]\n )\n assert 1 == len(\n [\n node\n for node in f2.maker.fgraph.toposort()\n if isinstance(node.op, basic_ops.GpuContiguous)\n ]\n )\n assert _check_stack_trace(f1)\n assert _check_stack_trace(f2)\n\n\ndef test_local_gpu_contiguous():\n a = tensor.fmatrix()\n o = tensor.extra_ops.cpu_contiguous(a)\n f = aesara.function([a], o, mode=mode_with_gpu)\n assert 1 == len(\n [\n node\n for node in f.maker.fgraph.toposort()\n if isinstance(node.op, basic_ops.GpuContiguous)\n ]\n )\n f([[2.0]])\n assert _check_stack_trace(f)\n\n\ndef test_flatten():\n m = aesara.tensor.fmatrix()\n f = aesara.function([m], m.flatten(), mode=mode_with_gpu)\n val = np.random.rand(10, 11).astype(\"float32\")\n res = f(val)\n utt.assert_allclose(res, val.flatten())\n assert res.shape == val.flatten().shape\n assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]\n val = np.random.rand(10, 11).astype(\"float32\")\n res = f(val)\n utt.assert_allclose(res, val.flatten())\n assert res.shape == val.flatten().shape\n assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]\n assert _check_stack_trace(f)\n\n f = aesara.function(\n [m], m.flatten(ndim=2), mode=mode_with_gpu.excluding(\"local_useless_reshape\")\n )\n val = np.random.rand(10, 11).astype(\"float32\")\n res = f(val)\n utt.assert_allclose(res, val)\n assert res.shape == val.shape\n assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]\n assert _check_stack_trace(f)\n\n m = aesara.tensor.tensor3()\n f = aesara.function([m], m.flatten(ndim=2), mode=mode_with_gpu)\n val = np.random.rand(10, 11, 12).astype(\"float32\")\n res = f(val)\n utt.assert_allclose(res, val.reshape(10, -1))\n assert res.shape == val.reshape(10, -1).shape\n assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]\n assert _check_stack_trace(f)\n\n\ndef test_reduce():\n kind = get_context(test_ctx_name).kind\n\n for method, param in [\n (\"sum\", dict(acc_dtype=\"float32\")),\n (\"prod\", dict(acc_dtype=\"float32\")),\n (\"max\", {}),\n (\"min\", {}),\n ]:\n m = aesara.tensor.fmatrix()\n f = aesara.function(\n [m], getattr(m, method)(axis=0, **param), mode=mode_with_gpu\n )\n # assert _check_stack_trace(f) this op is ok but since\n # it is using GpuCAReduceCuda that has an empty stack\n # trace, this assertion gives error.\n val = np.random.rand(10, 11).astype(\"float32\")\n res = f(val)\n utt.assert_allclose(res, getattr(val, method)(axis=0))\n assert res.shape == (11,)\n topo = f.maker.fgraph.toposort()\n ops = [type(node.op) for node in topo]\n\n if kind == b\"opencl\" and method in [\"max\", \"min\"]:\n assert not (\n GpuCAReduceCuda in ops\n or GpuCAReduceCPY in ops\n or GpuDnnReduction in ops\n )\n else:\n assert (\n GpuCAReduceCuda in ops\n or GpuCAReduceCPY in ops\n or GpuDnnReduction in ops\n )\n\n\ndef test_local_gpualloc_memset_0():\n i = aesara.tensor.iscalar()\n z = np.zeros((1,), dtype=\"float32\")\n o = np.ones((1,), dtype=\"float32\")\n ones = np.ones((2,), dtype=\"float32\")\n\n # Test with 0 from CPU op.\n # Should not be transferred as the only client is the output\n a = tensor.alloc(z, i)\n f = aesara.function([i], a, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, aesara.tensor.Alloc)\n assert (np.asarray(f(6)) == 0).all()\n assert _check_stack_trace(f)\n\n # Test with 0 from CPU op.\n # Should be transferred as it is used by another op.\n a = tensor.alloc(z, i)\n f = aesara.function([i], a.cumsum(), mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 3\n assert isinstance(topo[0].op, GpuAlloc)\n assert (np.asarray(f(6)) == 0).all()\n assert _check_stack_trace(f)\n\n # Test with 0\n a = GpuAlloc(test_ctx_name)(z, i)\n f = aesara.function([i], a, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, GpuAlloc) and topo[0].op.memset_0\n assert (np.asarray(f(6)) == 0).all()\n assert _check_stack_trace(f)\n\n # Test with 1\n a = GpuAlloc(test_ctx_name)(o, i)\n f = aesara.function([i], a, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, GpuAlloc)\n assert not topo[0].op.memset_0\n assert (np.asarray(f(6)) == 1).all()\n assert _check_stack_trace(f)\n\n # Test with 1, 1\n a = GpuAlloc(test_ctx_name)(ones, i)\n f = aesara.function([i], a, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, GpuAlloc)\n assert not topo[0].op.memset_0\n assert (np.asarray(f(2)) == 1).all()\n assert _check_stack_trace(f)\n\n\ndef test_local_gpualloc_empty():\n i = aesara.tensor.iscalar()\n ii = aesara.tensor.iscalar()\n\n # Test with vector\n # Should not be moved as the only client is the output\n a = tensor.AllocEmpty(\"float32\")(i)\n f = aesara.function([i], a, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, aesara.tensor.AllocEmpty)\n # This return not initilized data, so we can only check the shape\n assert f(3).shape == (3,)\n assert _check_stack_trace(f)\n\n # Test with vector\n # Should be moved\n a = tensor.AllocEmpty(\"float32\")(i)\n f = aesara.function([i], a.cumsum(), mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 3\n assert isinstance(topo[0].op, GpuAllocEmpty)\n # This return not initilized data, so we can only check the shape\n assert f(3).shape == (3,)\n assert _check_stack_trace(f)\n\n # Test with matrix\n a = tensor.AllocEmpty(\"float32\")(i, ii)\n f = aesara.function([i, ii], a.cumsum(axis=0), mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 3\n assert isinstance(topo[0].op, GpuAllocEmpty)\n # This return not initilized data, so we can only check the shape\n assert f(3, 4).shape == (3, 4)\n assert _check_stack_trace(f)\n\n\ndef test_rebroadcast():\n d = np.random.rand(10, 10).astype(\"float32\")\n v = aesara.tensor.fmatrix()\n up = tensor.unbroadcast(v.sum().dimshuffle(\"x\", \"x\"), 0, 1)\n f = aesara.function([v], [up], mode=mode_with_gpu)\n\n f(d)\n\n topo = f.maker.fgraph.toposort()\n rebrs = [node for node in topo if isinstance(node.op, tensor.Rebroadcast)]\n assert len(rebrs) == 1\n rebr = rebrs[0]\n\n assert isinstance(rebr.inputs[0].type, GpuArrayType)\n assert isinstance(rebr.outputs[0].type, GpuArrayType)\n assert _check_stack_trace(f)\n\n\nclass TestSpecifyShape(TestSpecifyShape):\n mode = mode_with_gpu\n input_type = GpuArrayType\n\n\nclass TestGpuIfelse(TestIfelse):\n mode = mode_with_gpu\n\n @staticmethod\n def cast_output(v):\n return basic_ops.as_gpuarray_variable(v, test_ctx_name)\n\n shared = staticmethod(gpuarray_shared_constructor)\n\n def get_ifelse(self, n):\n return aesara.ifelse.IfElse(n, gpu=True, as_view=True)\n\n def test_lifter_with_inputs_of_graph(self):\n x = tensor.vector()\n cond = tensor.iscalar()\n f = aesara.function(\n [x, cond], aesara.ifelse.ifelse(cond, x.mean(), x.sum()), mode=mode_with_gpu\n )\n assert f(np.float32([1, 2, 3]), 0) == 6\n assert _check_stack_trace(f)\n\n x = tensor.vector()\n cond = tensor.scalar()\n f = aesara.function(\n [x, cond], aesara.ifelse.ifelse(cond, x.mean(), x.sum()), mode=mode_with_gpu\n )\n assert f(np.float32([1, 2, 3]), 0) == 6\n assert _check_stack_trace(f)\n\n def test_lifter_with_shared_var(self):\n x = tensor.lscalar(\"x\")\n y = gpuarray_shared_constructor(\n np.asarray(1, dtype=\"float32\"), target=test_ctx_name\n )\n z = tensor.constant(2.0)\n\n a = aesara.ifelse.ifelse(x, y, z)\n with aesara.change_flags(on_opt_error=\"raise\"):\n aesara.function([x], [a], mode=mode_with_gpu)\n\n\ndef test_print_op():\n # Test that print ops don't block gpu optimization\n b = tensor.fmatrix()\n f = aesara.function([b], aesara.printing.Print()(b) * 2, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert isinstance(topo[0].op, GpuFromHost)\n assert isinstance(topo[1].op, aesara.printing.Print)\n assert isinstance(topo[2].op, GpuElemwise)\n assert topo[3].op == host_from_gpu\n assert _check_stack_trace(f)\n f(np.random.random((5, 5)).astype(\"float32\"))\n\n\ndef test_pdbbreakpoint_op():\n # Test that PdbBreakpoint ops don't block gpu optimization\n b = tensor.fmatrix()\n\n # Create a function composed of a breakpoint followed by\n # some computation\n condition = tensor.gt(b.sum(), 0)\n b_monitored = PdbBreakpoint(name=\"TestBreakpoint\")(condition, b)\n output = b_monitored ** 2\n\n f = aesara.function([b], output, mode=mode_with_gpu)\n\n # Ensure that, in the compiled function, the computation following the\n # breakpoint has been moved to the gpu.\n topo = f.maker.fgraph.toposort()\n assert isinstance(topo[-2].op, GpuElemwise)\n assert topo[-1].op == host_from_gpu\n assert _check_stack_trace(f)\n\n\ndef test_local_gpu_elemwise_careduce():\n mode_with_gpu_no_cudnn = mode_with_gpu.excluding(\"cudnn\")\n x = aesara.tensor.matrix()\n\n def fn_sum_square(x, axis):\n return (x * x).sum(axis=axis)\n\n def fn_sum_abs(x, axis):\n return abs(x).sum(axis=axis)\n\n def fn_max_abs(x, axis):\n return abs(x).max(axis=axis)\n\n for fn, pre_scalar_op in (\n (fn_sum_square, aesara.scalar.sqr),\n (fn_sum_abs, aesara.scalar.abs_),\n (fn_max_abs, aesara.scalar.abs_),\n ):\n for axis in (None, 0, 1):\n o = fn(x, axis)\n f = aesara.function([x], o, mode=mode_with_gpu_no_cudnn)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 3\n assert isinstance(topo[1].op, GpuCAReduceCuda)\n assert topo[1].op.pre_scalar_op == pre_scalar_op\n assert _check_stack_trace(f)\n data = np.random.rand(3, 4).astype(aesara.config.floatX)\n utt.assert_allclose(fn(data, axis), f(data))\n\n\ndef test_local_lift_dot22scalar():\n x = tensor.matrix()\n y = tensor.matrix()\n a = tensor.scalar()\n o = tensor.blas.Dot22Scalar()(x, y, a)\n f_cpu = aesara.function([x, y, a], o)\n f_gpu = aesara.function([x, y, a], o, mode=mode_with_gpu)\n assert not any(\n isinstance(n.op, tensor.blas.Dot22Scalar)\n for n in f_gpu.maker.fgraph.apply_nodes\n )\n assert any(isinstance(n.op, GpuGemm) for n in f_gpu.maker.fgraph.apply_nodes)\n x_val = np.random.random((2, 3)).astype(aesara.config.floatX)\n y_val = np.random.random((3, 4)).astype(aesara.config.floatX)\n a_val = 0.5\n utt.assert_allclose(f_cpu(x_val, y_val, a_val), f_gpu(x_val, y_val, a_val))\n assert _check_stack_trace(f_gpu)\n\n\ndef test_local_gpu_subtensor():\n # Test shared forced on CPU.\n t = tensor._shared(np.zeros(20, \"float32\"))\n f = aesara.function([], t[3:4], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert any([type(node.op) is tensor.Subtensor for node in topo])\n assert not any([isinstance(node.op, GpuSubtensor) for node in topo])\n assert _check_stack_trace(f)\n\n # Test graph input.\n t = tensor.fmatrix()\n f = aesara.function([t], t[3:4], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert any([type(node.op) is tensor.Subtensor for node in topo])\n assert not any([isinstance(node.op, GpuSubtensor) for node in topo])\n assert _check_stack_trace(f)\n\n # Test multiple use of the input\n # We want the subtensor to be on the GPU to prevent multiple transfer.\n t = tensor.fmatrix()\n f = aesara.function([t], [t[3:4], t + 1], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert not any([type(node.op) is tensor.Subtensor for node in topo])\n assert any([isinstance(node.op, GpuSubtensor) for node in topo])\n assert _check_stack_trace(f)\n\n # Test multiple use of the input + input as output\n # We want the subtensor to be on the GPU to prevent multiple transfer.\n t = tensor.fmatrix()\n f = aesara.function([t], [t[3:4], t + 1, t], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert not any([type(node.op) is tensor.Subtensor for node in topo])\n assert any([isinstance(node.op, GpuSubtensor) for node in topo])\n assert _check_stack_trace(f)\n\n # Test shared forced on CPU end we do computation on the output of\n # the subtensor.\n t = tensor._shared(np.zeros(20, \"float32\"))\n f = aesara.function([], t[3:4] + 1, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert any([type(node.op) is tensor.Subtensor for node in topo])\n assert not any([isinstance(node.op, GpuSubtensor) for node in topo])\n # Our optimizer isn't smart enough to move to the GPU Elemwise.\n # If it where just a little bit smarter, it could wrongly move it to the GPU.\n # If it where super smart, it would know it should not move it to the GPU.\n assert any([isinstance(node.op, tensor.Elemwise) for node in topo])\n assert _check_stack_trace(f)\n\n\ndef test_local_gpu_elemwise():\n # Test local_gpu_elemwise when there is a dtype upcastable to float32\n\n a = tensor.bmatrix()\n b = tensor.fmatrix()\n c = tensor.fmatrix()\n\n a_v = (np.random.rand(4, 5) * 10).astype(\"int8\")\n b_v = (np.random.rand(4, 5) * 10).astype(\"float32\")\n c_v = (np.random.rand(4, 5) * 10).astype(\"float32\")\n\n # Due to optimization order, this composite is created when all\n # the op are on the gpu.\n f = aesara.function([a, b, c], a + b + c, mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1\n assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0\n utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)\n assert _check_stack_trace(f)\n\n # Now test with the composite already on the cpu before we move it\n # to the gpu\n a_s = aesara.scalar.int8()\n b_s = aesara.scalar.float32()\n c_s = aesara.scalar.float32()\n out_s = aesara.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])\n out_op = tensor.Elemwise(out_s)\n f = aesara.function([a, b, c], out_op(a, b, c), mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1\n assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0\n utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)\n assert _check_stack_trace(f)\n\n return # Not yet implemeted\n # Test multiple output\n a_s = aesara.scalar.float32()\n a = tensor.fmatrix()\n from aesara.scalar.basic import identity\n\n out_s = aesara.scalar.Composite(\n [a_s, b_s, c_s], [identity(a_s), identity(c_s), identity(b_s)]\n )\n outs_op = tensor.Elemwise(out_s)\n f = aesara.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1\n assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0\n out = f(a_v, b_v, c_v)\n utt.assert_allclose(out[0], a_v)\n utt.assert_allclose(out[1], c_v)\n utt.assert_allclose(out[2], b_v)\n assert _check_stack_trace(f)\n\n # Test multiple output\n out_s = aesara.scalar.Composite([a_s, b_s, c_s], [a_s + b_s, a_s * b_s])\n outs_op = tensor.Elemwise(out_s)\n f = aesara.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1\n assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0\n out = f(a_v, b_v, c_v)\n utt.assert_allclose(out[0], a_v + b_v)\n utt.assert_allclose(out[1], a_v * c_v)\n assert _check_stack_trace(f)\n\n # Test non-contiguous input\n c = gpuarray_shared_constructor(np.asarray(c_v, dtype=\"float32\"))\n f = aesara.function([a, b], outs_op(a[::2], b[::2], c[::2]), mode=mode_with_gpu)\n out = f(a_v, b_v)\n utt.assert_allclose(out[0], a_v[::2] + b_v[::2])\n utt.assert_allclose(out[1], a_v[::2] * c_v[::2])\n assert _check_stack_trace(f)\n\n\ndef test_many_arg_elemwise():\n # This test checks whether the + and * elemwise ops can handle\n # extremely large numbers of arguments on gpu.\n\n rng = np.random.RandomState([1, 2, 3])\n nb_of_inputs_overflows = []\n for num_args in [64]:\n for op_to_test in [aesara.tensor.add, aesara.tensor.mul]:\n for nb_dim in [2, 8]:\n shapes = [rng.randint(1, 5) for i in range(nb_dim)]\n args = [\n np.cast[\"float32\"](rng.randn(*shapes)) for arg in range(0, num_args)\n ]\n\n symb_args = [\n aesara.tensor.TensorType(\"float32\", (False,) * nb_dim)()\n for arg in range(0, num_args)\n ]\n\n outputs = []\n for mode in [mode_with_gpu, mode_without_gpu]:\n # test the optimization local_gpua_elemwise\n output = op_to_test(*symb_args)\n f = aesara.function(symb_args, output, mode=mode)\n outputs.append(f(*args))\n\n # assert that the test was done on the gpu.\n if mode is mode_with_gpu:\n nb_of_inputs_overflows.append(\n max_inputs_to_GpuElemwise(output.owner) - num_args\n )\n nodelst = [node for node in f.maker.fgraph.apply_nodes]\n assert any(isinstance(node.op, GpuElemwise) for node in nodelst)\n assert not any(\n isinstance(node.op, Elemwise)\n for node in nodelst\n if not isinstance(node.op, GpuElemwise)\n )\n results_gpu, results_cpu = outputs\n utt.assert_allclose(results_gpu, results_cpu)\n\n # Make sure we test at least one case with no number of inputs overflow\n assert any(overflow >= 0 for overflow in nb_of_inputs_overflows)\n\n # Make sure we test at least one case with number of inputs overflow\n assert any(overflow < 0 for overflow in nb_of_inputs_overflows)\n\n\ndef test_not_useless_scalar_gpuelemwise():\n # We don't want to move elemwise on scalar on the GPU when the\n # result will not be used on the GPU!\n\n with aesara.change_flags(warn_float64=\"ignore\"):\n X = tensor.fmatrix()\n x = np.random.randn(32, 32).astype(np.float32)\n m1 = aesara.shared(np.random.randn(32, 32).astype(np.float32))\n loss = (X - tensor.dot(X, m1)).norm(L=2)\n lr = aesara.shared(np.asarray(0.001, dtype=np.float32))\n grad = tensor.grad(loss, m1)\n\n train = aesara.function(\n inputs=[X], updates=[(m1, m1 - lr * grad)], mode=mode_with_gpu\n )\n train(x)\n topo = train.maker.fgraph.toposort()\n gemms = [app for app in topo if isinstance(app.op, GpuGemm)]\n assert len(gemms) == 2\n assert isinstance(gemms[1].inputs[1].owner.op, tensor.Elemwise)\n\n\ndef test_local_lift_abstractconv_gpu_shape():\n prev = aesara.config.on_opt_error\n try:\n aesara.config.on_opt_error = \"raise\"\n s = tensor.ivector()\n a = tensor.ftensor4()\n b = tensor.ftensor4()\n c = tensor.nnet.abstract_conv.AbstractConv2d_gradWeights()(a, b, s)\n f = aesara.function([s, a, b], c, mode=mode_with_gpu)\n assert _check_stack_trace(f)\n finally:\n aesara.config.on_opt_error = prev\n\n\ndef test_local_assert_no_cpu_op():\n rng = np.random.RandomState(utt.fetch_seed())\n m = rng.uniform(-1, 1, (10, 10)).astype(\"float32\")\n ms = gpuarray_shared_constructor(m, name=\"m_shared\")\n out = aesara.tensor.tanh(ms).dot(ms.T)\n\n mode_local_assert = mode_with_gpu.including(\"assert_no_cpu_op\")\n mode_local_assert = mode_local_assert.excluding(\"local_gpua_elemwise\")\n\n old = aesara.config.assert_no_cpu_op\n old2 = aesara.config.on_opt_error\n # If the flag is raise\n try:\n aesara.config.assert_no_cpu_op = \"raise\"\n aesara.config.on_opt_error = \"ignore\"\n\n with pytest.raises(AssertionError):\n aesara.function([], out, mode=mode_local_assert)\n finally:\n aesara.config.assert_no_cpu_op = old\n aesara.config.on_opt_error = old2\n\n # If the flag is ignore\n try:\n aesara.config.assert_no_cpu_op = \"ignore\"\n f = aesara.function([], out, mode=mode_local_assert)\n assert _check_stack_trace(f)\n finally:\n aesara.config.assert_no_cpu_op = old\n\n\ndef test_no_complex():\n width_var = tensor.cscalar()\n freq_var = tensor.fscalar()\n signal_var = tensor.fscalar()\n stft_out = tensor.exp(width_var * freq_var) * signal_var\n f = aesara.function([width_var, freq_var, signal_var], stft_out, mode=mode_with_gpu)\n assert _check_stack_trace(f)\n\n\[email protected]_fast\[email protected](\n not cusolver_available or not slinalg.imported_scipy, reason=\"No cuSolver or SciPy\"\n)\ndef test_local_lift_solve():\n A = tensor.fmatrix()\n b = tensor.fmatrix()\n o = slinalg.solve(A, b)\n f_cpu = aesara.function([A, b], o, mode_without_gpu)\n f_gpu = aesara.function([A, b], o, mode=mode_with_gpu)\n assert not any(\n isinstance(n.op, slinalg.Solve) for n in f_gpu.maker.fgraph.apply_nodes\n )\n assert any(\n isinstance(n.op, GpuCusolverSolve) and n.op.inplace\n for n in f_gpu.maker.fgraph.apply_nodes\n )\n A_val = np.random.uniform(-0.4, 0.4, (5, 5)).astype(\"float32\")\n b_val = np.random.uniform(-0.4, 0.4, (5, 3)).astype(\"float32\")\n utt.assert_allclose(f_cpu(A_val, b_val), f_gpu(A_val, b_val))\n assert _check_stack_trace(f_gpu)\n\n\[email protected](\n not cusolver_available or not slinalg.imported_scipy, reason=\"No cuSolver or SciPy\"\n)\ndef test_gpu_solve_not_inplace():\n A = tensor.fmatrix()\n b = tensor.fmatrix()\n s = slinalg.solve(A, b)\n o = tensor.dot(A, s)\n f_cpu = aesara.function([A, b], o, mode_without_gpu)\n f_gpu = aesara.function([A, b], o, mode=mode_with_gpu)\n count_not_inplace = len(\n [\n n.op\n for n in f_gpu.maker.fgraph.apply_nodes\n if isinstance(n.op, GpuCusolverSolve) and not n.op.inplace\n ]\n )\n assert count_not_inplace == 1, count_not_inplace\n A_val = np.random.uniform(-0.4, 0.4, (5, 5)).astype(\"float32\")\n b_val = np.random.uniform(-0.4, 0.4, (5, 3)).astype(\"float32\")\n utt.assert_allclose(f_cpu(A_val, b_val), f_gpu(A_val, b_val))\n\n\[email protected]_fast\[email protected](\n not cusolver_available or not slinalg.imported_scipy, reason=\"No cuSolver or SciPy\"\n)\ndef test_local_lift_cholesky():\n A = tensor.fmatrix()\n o = slinalg.cholesky(A)\n f_cpu = aesara.function([A], o, mode=mode_without_gpu)\n f_gpu = aesara.function([A], o, mode=mode_with_gpu)\n assert not any(\n isinstance(n.op, slinalg.Cholesky) for n in f_gpu.maker.fgraph.apply_nodes\n )\n # GpuCholesky op in this graph should be inplace (as his input is not reused by other op).\n assert any(\n isinstance(n.op, GpuCholesky) and n.op.inplace\n for n in f_gpu.maker.fgraph.apply_nodes\n )\n M_val = np.random.normal(size=(3, 3)).astype(\"float32\")\n # A = M.dot(M) will be positive definite for all non-singular M\n A_val = M_val.dot(M_val.T)\n utt.assert_allclose(f_cpu(A_val), f_gpu(A_val))\n\n\[email protected](\n not cusolver_available or not slinalg.imported_scipy, reason=\"No cuSolver or SciPy\"\n)\ndef test_gpu_cholesky_not_inplace():\n A = tensor.fmatrix()\n A_squared = A ** 2\n B = slinalg.cholesky(A_squared)\n D = B + A_squared\n f_cpu = aesara.function([A], D, mode=mode_without_gpu)\n f_gpu = aesara.function([A], D, mode=mode_with_gpu)\n # GpuCholesky op in this graph should NOT be inplace (as his input is reused in another op)\n count_cholesky_not_inplace = len(\n [\n n.op\n for n in f_gpu.maker.fgraph.apply_nodes\n if isinstance(n.op, GpuCholesky) and not n.op.inplace\n ]\n )\n assert count_cholesky_not_inplace == 1, count_cholesky_not_inplace\n M_val = np.random.normal(size=(3, 3)).astype(\"float32\")\n # A = M.dot(M) will be positive definite for all non-singular M\n A_val = M_val.dot(M_val.T)\n utt.assert_allclose(f_cpu(A_val), f_gpu(A_val))\n\n\ndef test_local_gpua_advanced_incsubtensor():\n # test a corner case reported at gh-5589\n target = tensor.ftensor4()\n y = target.dimshuffle(1, 0, 2, 3).flatten(ndim=1)\n w = tensor.ones_like(y)\n w = tensor.set_subtensor(w[tensor.eq(y, 1.0).nonzero()], 100)\n w = tensor.set_subtensor(w[tensor.eq(y, -1.0).nonzero()], 0)\n f = aesara.function([target], w)\n assert _check_stack_trace(f)\n\n\ndef test_batched_dot_lifter():\n # The CPU Op accepts 2D and 3D inputs, as well as mixed dtypes.\n # Make sure the lifter adds the appropriate dimshuffles and casts\n rng = np.random.RandomState(utt.fetch_seed())\n\n def randX(*args):\n return rng.rand(*args).astype(aesara.config.floatX)\n\n cases = [\n (randX(3, 5, 7), randX(3, 7)),\n (randX(3, 5), randX(3, 5, 7)),\n (randX(3, 5), randX(3, 5)),\n (rng.rand(3, 5, 7).astype(\"float32\"), randX(3, 7, 9)),\n (rng.rand(3, 5, 7).astype(\"float64\"), randX(3, 7, 9)),\n ]\n for x_val, y_val in cases:\n x = tensor.TensorType(\n broadcastable=[s == 1 for s in x_val.shape], dtype=x_val.dtype\n )(\"x\")\n y = tensor.TensorType(\n broadcastable=[s == 1 for s in y_val.shape], dtype=y_val.dtype\n )(\"y\")\n z = tensor.batched_dot(x, y)\n f = aesara.function([x, y], z, mode=mode_with_gpu)\n f(x_val, y_val)\n assert check_stack_trace(f, ops_to_check=\"all\")\n\n\ndef test_crossentropycategorical1hot_lifter():\n rng = np.random.RandomState(utt.fetch_seed())\n x = tensor.matrix()\n y = tensor.lvector()\n z = tensor.nnet.crossentropy_categorical_1hot(x, y)\n gx = aesara.grad(z.mean(), x)\n f = aesara.function([x, y], [z, gx], mode=mode_with_gpu)\n assert not any(\n isinstance(\n n.op,\n (\n tensor.nnet.CrossentropyCategorical1Hot,\n tensor.nnet.CrossentropyCategorical1HotGrad,\n ),\n )\n for n in f.maker.fgraph.apply_nodes\n )\n f(\n rng.uniform(0.1, 0.9, (13, 5)).astype(aesara.config.floatX),\n rng.randint(5, size=(13,)),\n )\n\n\nclass TestConv_opt:\n def optimizer_2d(\n self,\n input_shapes,\n direction,\n include_tags,\n exclude_tags,\n op,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_dilation=(1, 1),\n num_groups=1,\n unshared=False,\n optimiser=None,\n ):\n\n inp1 = aesara.shared(\n np.random.random(input_shapes[0]).astype(aesara.config.floatX)\n )\n inp2 = aesara.shared(\n np.random.random(input_shapes[1]).astype(aesara.config.floatX)\n )\n if op is None:\n inp1 = basic_ops.as_gpuarray_variable(inp1, test_ctx_name)\n inp2 = basic_ops.as_gpuarray_variable(inp2, test_ctx_name)\n if direction == 0:\n conv_op = abstract_conv.AbstractConv2d(\n input_shapes[0],\n input_shapes[1],\n border_mode=border_mode,\n subsample=subsample,\n filter_dilation=filter_dilation,\n num_groups=num_groups,\n unshared=unshared,\n )(inp1, inp2)\n\n if direction == 1:\n conv_op = abstract_conv.AbstractConv2d_gradWeights(\n imshp=input_shapes[0],\n kshp=input_shapes[2],\n border_mode=border_mode,\n subsample=subsample,\n filter_dilation=filter_dilation,\n num_groups=num_groups,\n unshared=unshared,\n )(inp1, inp2, input_shapes[2][-2:])\n\n if direction == 2:\n conv_op = abstract_conv.AbstractConv2d_gradInputs(\n imshp=input_shapes[2],\n kshp=input_shapes[1],\n border_mode=border_mode,\n subsample=subsample,\n filter_dilation=filter_dilation,\n num_groups=num_groups,\n unshared=unshared,\n )(inp2, inp1, input_shapes[2][-2:])\n\n aesara.config.metaopt.optimizer_including = include_tags\n aesara.config.metaopt.optimizer_excluding = exclude_tags\n mode = (\n mode_with_gpu.including(\"conv_meta\")\n .excluding(\"conv_dnn\")\n .excluding(\"conv_gemm\")\n )\n\n # All meta optimizer compile a new function. This need to know\n # the current linker, but this information is not available,\n # so it use the default mode.\n if op is None:\n # No convolutions optimization takes place\n assert optimiser.transform(conv_op.owner) is None\n else:\n ref_func = aesara.function([], conv_op, mode=mode_with_gpu)\n with aesara.change_flags(mode=mode):\n conv_func = aesara.function([], conv_op, mode=mode)\n assert any(\n [isinstance(node.op, op) for node in conv_func.maker.fgraph.toposort()]\n )\n utt.assert_allclose(conv_func(), ref_func())\n\n def optimizer_3d(\n self,\n input_shapes,\n direction,\n include_tags,\n exclude_tags,\n op,\n border_mode=\"valid\",\n subsample=(1, 1, 1),\n filter_dilation=(1, 1, 1),\n num_groups=1,\n optimiser=None,\n ):\n inp1 = aesara.shared(\n np.random.random(input_shapes[0]).astype(aesara.config.floatX)\n )\n inp2 = aesara.shared(\n np.random.random(input_shapes[1]).astype(aesara.config.floatX)\n )\n\n if op is None:\n inp1 = basic_ops.as_gpuarray_variable(inp1, None)\n inp2 = basic_ops.as_gpuarray_variable(inp2, None)\n if direction == 0:\n conv_op = abstract_conv.AbstractConv3d(\n input_shapes[0],\n input_shapes[1],\n border_mode=border_mode,\n subsample=subsample,\n filter_dilation=filter_dilation,\n num_groups=num_groups,\n )(inp1, inp2)\n\n if direction == 1:\n conv_op = abstract_conv.AbstractConv3d_gradWeights(\n input_shapes[0],\n input_shapes[2],\n border_mode=border_mode,\n subsample=subsample,\n filter_dilation=filter_dilation,\n num_groups=num_groups,\n )(inp1, inp2, input_shapes[2][-3:])\n\n if direction == 2:\n conv_op = abstract_conv.AbstractConv3d_gradInputs(\n input_shapes[2],\n input_shapes[1],\n border_mode=border_mode,\n subsample=subsample,\n filter_dilation=filter_dilation,\n num_groups=num_groups,\n )(inp2, inp1, input_shapes[2][-3:])\n\n aesara.config.metaopt.optimizer_including = include_tags\n aesara.config.metaopt.optimizer_excluding = exclude_tags\n mode = (\n mode_with_gpu.including(\"conv_meta\")\n .excluding(\"conv_dnn\")\n .excluding(\"conv_gemm\")\n )\n\n # All meta optimizer compile a new function. This need to know\n # the current linker, but this information is not available,\n # so it use the default mode.\n if op is None:\n # No convolutions optimization takes place\n assert optimiser.transform(conv_op.owner) is None\n return\n elif op != \"conv3d2d\":\n with aesara.change_flags(mode=mode):\n conv_func = aesara.function([], conv_op, mode=mode)\n assert any(\n [isinstance(node.op, op) for node in conv_func.maker.fgraph.toposort()]\n )\n else:\n with aesara.change_flags(mode=mode):\n conv_func = aesara.function(\n [], conv_op, mode=mode_with_gpu.including(\"conv_meta\")\n )\n ref_func = aesara.function([], conv_op, mode=mode_with_gpu)\n utt.assert_allclose(conv_func(), ref_func())\n\n @pytest.mark.skipif(aesara.config.cxx == \"\", reason=\"Need a c compiler.\")\n def test_optimizers_2d(self):\n imshp2d = [(2, 3, 5, 5), (2, 2, 5, 7), (2, 1, 3, 3)]\n kshp2d = [(4, 3, 3, 3), (3, 2, 3, 5), (4, 1, 1, 1)]\n tshp2d = [(2, 4, 3, 3), (2, 3, 3, 3), (2, 4, 3, 3)]\n\n for imshp, kshp, tshp in zip(imshp2d, kshp2d, tshp2d):\n # forward passes\n self.optimizer_2d(\n [imshp, kshp, tshp], 0, \"\", \"conv_dnn:alternative\", blas.GpuCorrMM\n )\n self.optimizer_2d(\n [imshp, kshp, tshp],\n 0,\n \"alternative\",\n \"conv_dnn:default\",\n blas.GpuCorrMM_gradWeights,\n )\n self.optimizer_2d(\n [imshp, kshp, tshp], 0, \"\", \"conv_gemm:alternative\", dnn.GpuDnnConv\n )\n self.optimizer_2d(\n [imshp, kshp, tshp],\n 0,\n \"alternative\",\n \"conv_gemm:default\",\n dnn.GpuDnnConvGradW,\n )\n # backwards wrt weights\n self.optimizer_2d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorrMM_gradWeights,\n )\n self.optimizer_2d(\n [imshp, tshp, kshp],\n 1,\n \"alternative\",\n \"conv_dnn:default\",\n blas.GpuCorrMM,\n )\n self.optimizer_2d(\n [imshp, tshp, kshp], 1, \"\", \"conv_gemm:alternative\", dnn.GpuDnnConvGradW\n )\n self.optimizer_2d(\n [imshp, tshp, kshp],\n 1,\n \"alternative\",\n \"conv_gemm:default\",\n dnn.GpuDnnConv,\n )\n # backwards wrt to inputs\n self.optimizer_2d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorrMM_gradInputs,\n )\n self.optimizer_2d(\n [tshp, kshp, imshp],\n 2,\n \"alternative\",\n \"conv_dnn:default\",\n blas.GpuCorrMM,\n )\n self.optimizer_2d(\n [tshp, kshp, imshp], 2, \"\", \"conv_gemm:alternative\", dnn.GpuDnnConvGradI\n )\n self.optimizer_2d(\n [tshp, kshp, imshp],\n 2,\n \"alternative\",\n \"conv_gemm:default\",\n dnn.GpuDnnConv,\n )\n\n @pytest.mark.skipif(aesara.config.cxx == \"\", reason=\"Need a c compiler.\")\n def test_optimizers_3d(self):\n imshp3d = [(2, 3, 5, 5, 5), (2, 2, 5, 7, 5), (2, 1, 3, 3, 3)]\n kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 5, 3), (4, 1, 1, 1, 1)]\n tshp3d = [(2, 4, 3, 3, 3), (2, 3, 3, 3, 3), (2, 4, 3, 3, 3)]\n\n for imshp, kshp, tshp in zip(imshp3d, kshp3d, tshp3d):\n # forwards passes\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 0,\n \"\",\n \"conv_dnn:alternative:conv3d2d\",\n blas.GpuCorr3dMM,\n )\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 0,\n \"alternative\",\n \"conv_dnn:default:conv3d2d\",\n blas.GpuCorr3dMM_gradWeights,\n )\n self.optimizer_3d([imshp, kshp, tshp], 0, \"conv3d2d\", \"default\", \"conv3d2d\")\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 0,\n \"alternative\",\n \"conv_gemm:default:conv3d2d\",\n dnn.GpuDnnConvGradW,\n )\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 0,\n \"\",\n \"conv_gemm:alternative:conv3d2d\",\n dnn.GpuDnnConv,\n )\n # backward pass wrt weight\n self.optimizer_3d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorr3dMM_gradWeights,\n )\n self.optimizer_3d(\n [imshp, tshp, kshp],\n 1,\n \"alternative\",\n \"conv_dnn:default\",\n blas.GpuCorr3dMM,\n )\n self.optimizer_3d(\n [imshp, tshp, kshp],\n 1,\n \"alternative\",\n \"conv_gemm:default\",\n dnn.GpuDnnConv,\n )\n self.optimizer_3d(\n [imshp, tshp, kshp], 1, \"\", \"conv_gemm:alternative\", dnn.GpuDnnConvGradW\n )\n\n # backward pass wrt inputs\n self.optimizer_3d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorr3dMM_gradInputs,\n )\n self.optimizer_3d(\n [tshp, kshp, imshp],\n 2,\n \"alternative\",\n \"conv_dnn:default\",\n blas.GpuCorr3dMM,\n )\n self.optimizer_3d(\n [tshp, kshp, imshp],\n 2,\n \"alternative\",\n \"conv_gemm:default\",\n dnn.GpuDnnConv,\n )\n self.optimizer_3d(\n [tshp, kshp, imshp], 2, \"\", \"conv_gemm:alternative\", dnn.GpuDnnConvGradI\n )\n\n @pytest.mark.skipif(aesara.config.cxx == \"\", reason=\"Need a c compiler.\")\n def test_optimizers_non_default(self):\n # conv2d forward pass with Non-default border_mode and filter_dilation\n imshp2d = [(2, 3, 5, 5), (4, 2, 5, 5)]\n kshp2d = [(4, 3, 3, 3), (3, 2, 3, 3)]\n filter_dilation = [(1, 1), (2, 2)]\n for imshp, kshp, fdil in zip(imshp2d, kshp2d, filter_dilation):\n self.optimizer_2d(\n [imshp, kshp],\n 0,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorrMM,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n self.optimizer_2d(\n [imshp, kshp],\n 0,\n \"alternative\",\n \"conv_dnn:default\",\n blas.GpuCorrMM_gradInputs,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n self.optimizer_2d(\n [imshp, kshp],\n 0,\n \"\",\n \"conv_gemm:alternative\",\n dnn.GpuDnnConv,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n self.optimizer_2d(\n [imshp, kshp],\n 0,\n \"alternative\",\n \"conv_gemm:default\",\n dnn.GpuDnnConvGradI,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n # conv3d forward pass with Non-default border_mode and filter_dilation\n imshp3d = [(2, 3, 5, 5, 5), (4, 2, 5, 5, 5)]\n kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 3, 3)]\n filter_dilation = [(1, 1, 1), (2, 2, 2)]\n for imshp, kshp, fdil in zip(imshp3d, kshp3d, filter_dilation):\n self.optimizer_3d(\n [imshp, kshp],\n 0,\n \"\",\n \"conv_dnn:alternative:conv3d2d\",\n blas.GpuCorr3dMM,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n self.optimizer_3d(\n [imshp, kshp],\n 0,\n \"alternative\",\n \"conv_dnn:default:conv3d2d\",\n blas.GpuCorr3dMM_gradInputs,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n self.optimizer_3d(\n [imshp, kshp],\n 0,\n \"\",\n \"conv_gemm:alternative:conv3d2d\",\n dnn.GpuDnnConv,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n self.optimizer_3d(\n [imshp, kshp],\n 0,\n \"alternative\",\n \"conv_gemm:default:conv3d2d\",\n dnn.GpuDnnConvGradI,\n border_mode=\"full\",\n filter_dilation=fdil,\n )\n\n # test non default num_groups for default optimizers\n imshp2d = [(2, 6, 5, 5), (2, 4, 5, 5)]\n kshp2d = [(3, 2, 3, 3), (2, 2, 3, 3)]\n tshp2d = [(2, 3, 3, 3), (2, 2, 3, 3)]\n num_groups = [3, 2]\n for imshp, kshp, tshp, groups in zip(imshp2d, kshp2d, tshp2d, num_groups):\n # forward pass\n self.optimizer_2d(\n [imshp, kshp, tshp],\n 0,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorrMM,\n num_groups=groups,\n )\n self.optimizer_2d(\n [imshp, kshp, tshp],\n 0,\n \"\",\n \"conv_gemm:alternative\",\n dnn.GpuDnnConv,\n num_groups=groups,\n )\n # grad with respect to weights\n self.optimizer_2d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorrMM_gradWeights,\n num_groups=groups,\n )\n self.optimizer_2d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"conv_gemm:alternative\",\n dnn.GpuDnnConvGradW,\n num_groups=groups,\n )\n # grad with respect to inputs\n self.optimizer_2d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"conv_dnn:alternative\",\n blas.GpuCorrMM_gradInputs,\n num_groups=groups,\n )\n self.optimizer_2d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"conv_gemm:alternative\",\n dnn.GpuDnnConvGradI,\n num_groups=groups,\n )\n\n # test unshared for default optimizers\n imshp2d = [(2, 2, 4, 4), (3, 2, 5, 3)]\n kshp2d = [(2, 2, 2, 2, 3, 3), (2, 3, 1, 2, 3, 3)]\n tshp2d = [(2, 2, 2, 2), (3, 2, 3, 1)]\n for imshp, kshp, tshp, groups in zip(imshp2d, kshp2d, tshp2d, num_groups):\n # forward pass\n self.optimizer_2d(\n [imshp, kshp, tshp], 0, \"\", \"alternative\", blas.GpuCorrMM, unshared=True\n )\n # grad with respect to weights\n self.optimizer_2d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"alternative\",\n blas.GpuCorrMM_gradWeights,\n unshared=True,\n )\n # grad with respect to inputs\n self.optimizer_2d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"alternative\",\n blas.GpuCorrMM_gradInputs,\n unshared=True,\n )\n\n imshp3d = [(2, 6, 5, 5, 5), (2, 4, 5, 5, 5)]\n kshp3d = [(3, 2, 3, 3, 3), (2, 2, 3, 3, 3)]\n tshp3d = [(2, 3, 3, 3, 3), (2, 2, 3, 3, 3)]\n num_groups = [3, 2]\n for imshp, kshp, tshp, groups in zip(imshp3d, kshp3d, tshp3d, num_groups):\n # forward pass\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 0,\n \"\",\n \"conv_dnn:alternative:conv3d2d\",\n blas.GpuCorr3dMM,\n num_groups=groups,\n )\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 0,\n \"\",\n \"conv_gemm:alternative:conv3d2d\",\n dnn.GpuDnnConv,\n num_groups=groups,\n )\n # grad with respect to weights\n self.optimizer_3d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"conv_dnn:alternative:conv3d2d\",\n blas.GpuCorr3dMM_gradWeights,\n num_groups=groups,\n )\n self.optimizer_3d(\n [imshp, tshp, kshp],\n 1,\n \"\",\n \"conv_gemm:alternative:conv3d2d\",\n dnn.GpuDnnConvGradW,\n num_groups=groups,\n )\n # grad with respect to inputs\n self.optimizer_3d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"conv_dnn:alternative:conv3d2d\",\n blas.GpuCorr3dMM_gradInputs,\n num_groups=groups,\n )\n self.optimizer_3d(\n [tshp, kshp, imshp],\n 2,\n \"\",\n \"conv_gemm:alternative:conv3d2d\",\n dnn.GpuDnnConvGradI,\n num_groups=groups,\n )\n\n @pytest.mark.skipif(aesara.config.cxx == \"\", reason=\"Need a c compiler.\")\n def test_returns_none_2d(self):\n # values given don't matter since it returns None\n imshp = (2, 3, 5, 5)\n kshp = (4, 3, 3, 3)\n tshp = (2, 4, 3, 3)\n conv_direction = [0, 1, 2]\n optimisers = [\n [opt.local_abstractconv_gemm_alt, opt.local_abstractconv_cudnn_alt],\n [\n opt.local_abstractconv_gemm_gradweights_alt,\n opt.local_abstractconv_cudnn_alt,\n ],\n [\n opt.local_abstractconv_gradinputs_gemm_alt,\n opt.local_abstractconv_cudnn_alt,\n ],\n ]\n # test that non default subsample returns None\n for opt_direction, direction in zip(optimisers, conv_direction):\n for optimiser in opt_direction:\n self.optimizer_2d(\n [imshp, kshp, tshp],\n direction,\n \"\",\n \"\",\n None,\n subsample=(2, 2),\n optimiser=optimiser,\n )\n # test that non default num_groups returns None\n for opt_direction, direction in zip(optimisers, conv_direction):\n for optimiser in opt_direction:\n self.optimizer_2d(\n [imshp, kshp, tshp],\n direction,\n \"\",\n \"\",\n None,\n num_groups=3,\n optimiser=optimiser,\n )\n # test that border_mode=half returns None\n for opt_direction, direction in zip(optimisers, conv_direction):\n for optimiser in opt_direction:\n self.optimizer_2d(\n [imshp, kshp, tshp],\n direction,\n \"\",\n \"\",\n None,\n border_mode=\"half\",\n optimiser=optimiser,\n )\n # test that Non-default filter dilation return None for\n # direction 1\n for optimiser in optimisers[1]:\n self.optimizer_2d(\n [imshp, kshp, tshp],\n 1,\n \"\",\n \"\",\n None,\n filter_dilation=(2, 2),\n optimiser=optimiser,\n )\n imshp = (2, 2, 4, 4)\n kshp = (2, 2, 2, 2, 3, 3)\n tshp = (2, 2, 2, 2)\n shape_perms = [[imshp, kshp, tshp], [imshp, tshp, kshp], [tshp, kshp, imshp]]\n # test unshared convolution returns None\n for opt_direction, direction, perms in zip(\n optimisers, conv_direction, shape_perms\n ):\n for optimiser in opt_direction:\n self.optimizer_2d(\n perms, direction, \"\", \"\", None, unshared=True, optimiser=optimiser\n )\n\n @pytest.mark.skipif(aesara.config.cxx == \"\", reason=\"Need a c compiler.\")\n def test_returns_none_3d(self):\n imshp = (2, 3, 5, 5, 5)\n kshp = (4, 3, 3, 3, 3)\n tshp = (2, 4, 3, 3, 3)\n conv_direction = [0, 1, 2]\n optimisers = [\n [opt.local_abstractconv3d_alt, opt.local_abstractconv3d_cudnn_alt],\n [\n opt.local_abstractconv3d_gemm_gradweights_alt,\n opt.local_abstractconv3d_cudnn_alt,\n ],\n [\n opt.local_abstractconv3d_gradinputs_gemm_alt,\n opt.local_abstractconv3d_cudnn_alt,\n ],\n ]\n # test that non default subsample returns None\n for opt_direction, direction in zip(optimisers, conv_direction):\n for optimiser in opt_direction:\n self.optimizer_3d(\n [imshp, kshp, tshp],\n direction,\n \"\",\n \"\",\n None,\n subsample=(2, 2, 2),\n optimiser=optimiser,\n )\n # test that non default num_groups returns None\n for opt_direction, direction in zip(optimisers, conv_direction):\n for optimiser in opt_direction:\n self.optimizer_3d(\n [imshp, kshp, tshp],\n direction,\n \"\",\n \"\",\n None,\n num_groups=3,\n optimiser=optimiser,\n )\n # test that border_mode=half returns None\n for opt_direction, direction in zip(optimisers, conv_direction):\n for optimiser in opt_direction:\n self.optimizer_3d(\n [imshp, kshp, tshp],\n direction,\n \"\",\n \"\",\n None,\n border_mode=\"half\",\n optimiser=optimiser,\n )\n # test that Non-default filter dilation return None for\n # direction 1\n for optimiser in optimisers[1]:\n self.optimizer_3d(\n [imshp, kshp, tshp],\n 1,\n \"\",\n \"\",\n None,\n filter_dilation=(2, 2, 2),\n optimiser=optimiser,\n )\n"
] |
[
[
"numpy.log2",
"numpy.asarray",
"numpy.arange",
"numpy.int8",
"numpy.dtype",
"numpy.all",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.asarray",
"numpy.int8",
"numpy.abs"
],
[
"numpy.zeros",
"numpy.random.rand",
"numpy.empty"
],
[
"numpy.random.rand"
],
[
"numpy.allclose",
"numpy.asarray",
"numpy.arange",
"numpy.ones",
"numpy.all",
"numpy.random.rand",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.arange",
"numpy.float64",
"numpy.float32",
"numpy.zeros"
],
[
"numpy.random.random",
"numpy.asarray",
"numpy.ones",
"numpy.random.normal",
"numpy.random.randn",
"numpy.random.rand",
"numpy.float32",
"numpy.random.uniform",
"numpy.random.RandomState",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dllahr/summer2020
|
[
"4c8204f72ecdd8c44d2abb1e3e2669d1e6a478e8"
] |
[
"summer2020py/make_genebody_coverage_graphs/test/test_make_genebody_coverage_graphs.py"
] |
[
"import unittest\nimport logging\nimport summer2020py.setup_logger as setup_logger\nimport summer2020py.make_genebody_coverage_graphs.make_genebody_coverage_graphs as mgcg\n\nimport pandas\nimport tempfile\nimport os\ntemp_wkdir_prefix = \"TestMakeGeneBodyCoverageGraphs\"\n\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\n# Some notes on testing conventions (more in cuppers convention doc):\n# (1) Use \"self.assert...\" over \"assert\"\n# - self.assert* methods: https://docs.python.org/2.7/library/unittest.html#assert-methods\n# - This will ensure that if one assertion fails inside a test method,\n# exectution won't halt and the rest of the test method will be executed\n# and other assertions are also verified in the same run.\n# (2) For testing exceptions use:\n# with self.assertRaises(some_exception) as context:\n# [call method that should raise some_exception]\n# self.assertEqual(str(context.exception), \"expected exception message\")\n#\n# self.assertAlmostEquals(...) for comparing floats\n\n\nclass TestMakeGeneBodyCoverageGraphs(unittest.TestCase):\n def test_main(self):\n logger.debug(\"\\n \\n \\n test_main \\n \\n \")\n\n input_dir = os.path.join(\"assets\", \"notebook_inputs\", \"output_gbdy_cov\")\n logger.debug(\"input_dir: {}\".format(input_dir))\n\n with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:\n logger.debug(\"wkdir: {}\".format(wkdir))\n\n args = mgcg.build_parser().parse_args([\n #\"-s\", source_dir, \n \"-i\", input_dir, \n \"-o\", wkdir,\n \"-of\", \"MYEXPERIMENTID\"\n ])\n\n mgcg.main(args)\n \n #check that html files were outputted \n self.assertTrue(os.path.exists(os.path.join(wkdir, \"MYEXPERIMENTID_genebody_histogram_coverage_diff.html\")))\n self.assertTrue(os.path.exists(os.path.join(wkdir, \"MYEXPERIMENTID_genebody_histogram_cov_diff_pct.html\")))\n self.assertTrue(os.path.exists(os.path.join(wkdir, \"MYEXPERIMENTID_genebody_coverage_percentile.html\")))\n self.assertTrue(os.path.exists(os.path.join(wkdir, \"MYEXPERIMENTID_genebody_coverage_counts.html\")))\n\n\n #check that the text files are the same as example outputss\n #doesn't work for html files\n outputted_files = [\n os.path.join(wkdir, \"MYEXPERIMENTID_all_genebody_coverage_r1200x6.txt\"),\n os.path.join(wkdir, \"MYEXPERIMENTID_asymmetry_compare_80_20_r12x6.txt\")\n ]\n\n expected_files = [\n os.path.join(\"assets\", \"example_notebook_outputs\", \"MYEXPERIMENTID_all_genebody_coverage_r1200x6.txt\"),\n os.path.join(\"assets\", \"example_notebook_outputs\", \"MYEXPERIMENTID_asymmetry_compare_80_20_r12x6.txt\")\n ]\n\n for i in range(0, len(outputted_files)):\n opened_output = open(outputted_files[i], \"r\")\n opened_expected = open(expected_files[i], \"r\")\n logger.debug(\"checking {} against expected\".format(outputted_files[i]))\n self.assertEqual(opened_output.read(), opened_expected.read())\n opened_output.close()\n opened_expected.close()\n\n\n\n def test_input_file_search(self):\n logger.debug(\"\\n \\n \\n test_input_file_search\\n \\n \")\n input_dir = os.path.join(\"assets\",\"notebook_inputs\", \"output_gbdy_cov\")\n logger.debug(\"input_dir: {}\".format(input_dir))\n input_files = mgcg.input_file_search(input_dir)\n\n self.assertEqual(len(input_files), 12)\n\n #check that the first 3 files are the correct ones\n self.assertEqual(\n os.path.join('assets','notebook_inputs','output_gbdy_cov','D121','D121.geneBodyCoverage.txt'), \n input_files[0]\n )\n self.assertEqual(\n os.path.join('assets','notebook_inputs','output_gbdy_cov','D122','D122.geneBodyCoverage.txt'), \n input_files[1]\n )\n self.assertEqual(\n os.path.join('assets','notebook_inputs','output_gbdy_cov','D123','D123.geneBodyCoverage.txt'), \n input_files[2]\n )\n\n def test_load_genebody_coverage_data(self):\n input_files = [\n os.path.join(\"assets\", \"notebook_inputs\", \"output_gbdy_cov\", \"D121\", \"D121.geneBodyCoverage.txt\"),\n os.path.join(\"assets\", \"notebook_inputs\", \"output_gbdy_cov\", \"D122\", \"D122.geneBodyCoverage.txt\")\n ]\n inp_df_list = mgcg.load_genebody_coverage_data(input_files)\n\n #check that there are two data frames\n self.assertEqual(len(inp_df_list), 2)\n\n #check that first df is the right shape\n self.assertEqual(inp_df_list[0].shape[0], 100)\n self.assertEqual(inp_df_list[0].shape[1], 2)\n\n #check that second df is the right shape\n self.assertEqual(inp_df_list[1].shape[0], 100)\n self.assertEqual(inp_df_list[1].shape[1], 2)\n\n #check that sample id are the right ones\n self.assertEqual(inp_df_list[0].sample_id[0], \"D121\")\n self.assertEqual(inp_df_list[1].sample_id[0], \"D122\")\n\n def test_merge_dfs_into_one(self):\n logger.debug(\"\\n \\n \\n test_merge_dfs_into_one\\n \\n \")\n\n #create first fake data frame\n df = pandas.DataFrame({\"coverage_counts\":range(100000,500000, 4000), \"sample_id\":\"FAKE\"})\n df.index.name = \"genebody_pct\"\n df.index += 1 \n\n #create second fake data frame\n df2 = pandas.DataFrame({\"coverage_counts\":range(120000,520000, 4000), \"sample_id\":\"FACE\"})\n df2.index.name = \"genebody_pct\"\n df2.index += 1 \n\n counts_df = mgcg.merge_dfs_into_one([df, df2])\n\n logger.debug(\"counts_df: {}\".format(counts_df))\n\n #check that df is the right shape\n self.assertEqual(counts_df.shape[0], 200)\n self.assertEqual(counts_df.shape[1], 3)\n\n #check that first sample id is fake and that 11th is face \n self.assertEqual(counts_df.sample_id[0], \"FAKE\")\n self.assertEqual(counts_df.sample_id[100], \"FACE\")\n\n def test_sum_counts(self):\n logger.debug(\"\\n \\n \\n test_sum_counts\\n \\n \")\n\n sample_ids =[]\n for i in range(0, 200):\n if i < 100:\n sample_ids.append(\"FAKE\")\n else:\n sample_ids.append(\"FACE\")\n\n #create fake data frame \n counts_df = pandas.DataFrame({\"coverage_counts\":list(range(100000,500000, 4000)) + list(range(120000,520000, 4000)), \"sample_id\":sample_ids})\n\n sum_counts_df = mgcg.sum_counts(counts_df)\n\n logger.debug(\"counts_df: {}\".format(counts_df))\n logger.debug(\"sum_counts_df: {}\".format(sum_counts_df))\n\n #check that df is the right shape\n self.assertEqual(sum_counts_df.shape[0], 2)\n self.assertEqual(sum_counts_df.shape[1], 1)\n\n #check that the sums are correct \n self.assertEqual(sum_counts_df.total_coverage_counts[0], 31800000)\n self.assertEqual(sum_counts_df.total_coverage_counts[1], 29800000)\n\n\n def test_calculate_percentile_df(self):\n logger.debug(\"\\n \\n \\n test_jcalculate_percentile_df\\n \\n \")\n\n sample_ids =[]\n for i in range(0, 200):\n if i < 100:\n sample_ids.append(\"FAKE\")\n else:\n sample_ids.append(\"FACE\")\n\n counts_df = pandas.DataFrame({\"coverage_counts\":list(range(100000,500000, 4000)) + list(range(120000,520000, 4000)), \"sample_id\":sample_ids})\n\n sum_counts_df = pandas.DataFrame(data = {\"total_coverage_counts\":[31800000, 29800000]}, index = [\"FACE\", \"FAKE\"])\n sum_counts_df.index.name = \"sample_id\"\n\n percentile_df = mgcg.calculate_percentile_df(counts_df, sum_counts_df)\n \n\n #check that df is the right shape\n self.assertEqual(percentile_df.shape[0], 200)\n self.assertEqual(percentile_df.shape[1], 4)\n\n #check that first sample id is fake and that 11th is face \n self.assertEqual(percentile_df.sample_id[0], \"FAKE\")\n self.assertEqual(percentile_df.sample_id[100], \"FACE\")\n\n #check that FAKE coveragecounts are 2.8 mil and FACE are 3 mil\n self.assertEqual(percentile_df.total_coverage_counts[0], 29800000)\n self.assertEqual(percentile_df.total_coverage_counts[100], 31800000)\n\n #check first twenty percentiles to make sure they are correct\n for i in range(0, 20):\n self.assertEqual(percentile_df.coverage_percentile[i], percentile_df.coverage_counts[i] / percentile_df.total_coverage_counts[i])\n\n\n\n\n def test_create_pct_df_list(self):\n logger.debug(\"\\n \\n \\n test_create_pct_df_list\\n \\n \")\n \n sample_ids =[]\n for i in range(0, 200):\n if i < 100:\n sample_ids.append(\"FAKE\")\n else:\n sample_ids.append(\"FACE\")\n\n coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))\n for num in range(len(coverage_percentile)):\n coverage_percentile[num] = coverage_percentile[num] / 1000000\n\n percentile_df = pandas.DataFrame({\"coverage_percentile\":coverage_percentile, \"sample_id\":sample_ids, \"genebody_pct\":list(range(1,101))+ list(range(1,101))})\n\n pct_df_list = mgcg.create_pct_df_list(percentile_df)\n\n logger.debug(\"pct_df_list: {}\".format(pct_df_list))\n\n #checking 20th\n self.assertEqual(pct_df_list[0].coverage_20pct[0], 0.005902)\n self.assertEqual(pct_df_list[0].coverage_20pct[1], 0.005291)\n\n #checking 50th\n self.assertEqual(pct_df_list[1].coverage_50pct[0], 0.009922)\n self.assertEqual(pct_df_list[1].coverage_50pct[1], 0.009341)\n\n #checking 80th\n self.assertEqual(pct_df_list[2].coverage_80pct[0], 0.013942)\n self.assertEqual(pct_df_list[2].coverage_80pct[1], 0.013391)\n\n def test_create_pct_comp_df(self):\n logger.debug(\"\\n \\n \\n test_create_pct_comp_df\\n \\n \")\n \n df20 = pandas.DataFrame(data = {\"coverage_20pct\":[0.005902,0.005291]}, index = [\"FAKE\", \"FACE\"])\n df20.index.name = \"sample_id\"\n\n df50 = pandas.DataFrame(data = {\"coverage_50pct\":[0.009922,0.009341]}, index = [\"FAKE\", \"FACE\"])\n df50.index.name = \"sample_id\"\n\n df80 = pandas.DataFrame(data = {\"coverage_80pct\":[0.013942,0.013391]}, index = [\"FAKE\", \"FACE\"])\n df80.index.name = \"sample_id\"\n\n pct_comp_df = mgcg.create_pct_comp_df([df20, df50, df80])\n\n logger.debug(\"pct_comp_df: {}\".format(pct_comp_df))\n\n self.assertAlmostEqual(pct_comp_df.cov_diff_pct[0], 0.810320, places=5)\n self.assertAlmostEqual(pct_comp_df.cov_diff_pct[1], 0.867145, places=5)\n \n\n def test_add_label_col(self):\n logger.debug(\"\\n \\n \\n test_add_label_col\\n \\n \")\n\n pct_comp_df = pandas.DataFrame(data = {\"cov_diff_pct\":[0.810320,0.867145]}, index = [\"FAKE\", \"FACE\"])\n pct_comp_df.index.name = \"sample_id\"\n\n pct_comp_df = mgcg.add_label_col(pct_comp_df)\n\n logger.debug(\"pct_comp_df: {}\".format(pct_comp_df))\n\n self.assertEqual(pct_comp_df.label[0], \"FAKE 0.81\")\n self.assertEqual(pct_comp_df.label[1], \"FACE 0.87\")\n\n def test_add_labels_based_on_sample_id(self):\n logger.debug(\"\\n \\n \\n test_add_labels_based_on_sample_id\\n \\n \")\n\n sample_ids =[]\n for i in range(0, 200):\n if i < 100:\n sample_ids.append(\"FAKE\")\n else:\n sample_ids.append(\"FACE\") \n\n pct_comp_df = pandas.DataFrame(data = {\"cov_diff_pct\":[0.810320,0.867145], \"label\":[\"FAKE 0.81\", \"FACE 0.87\"]}, index = [\"FAKE\", \"FACE\"])\n\n percentile_df = pandas.DataFrame({\"sample_id\":sample_ids})\n\n percentile_df = mgcg.add_labels_based_on_sample_id(percentile_df, pct_comp_df)\n\n self.assertEqual(percentile_df.label[0], \"FAKE 0.81\")\n self.assertEqual(percentile_df.label[100], \"FACE 0.87\")\n\n\n \n\n def test_save_to_tsv(self):\n with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:\n logger.debug(\"\\n \\n \\n test_save_to_tsv: {}\\n \\n \".format(wkdir))\n\n output_all_pct_template = \"{exp_id}_all_genebody_coverage_r{{}}x{{}}.txt\".format(exp_id=\"MYEXPERIMENTID\")\n logger.debug(\"output_all_pct_template: {}\".format(output_all_pct_template))\n\n output_compare_80_20_template = \"{exp_id}_asymmetry_compare_80_20_r{{}}x{{}}.txt\".format(exp_id=\"MYEXPERIMENTID\")\n logger.debug(\"output_compare_80_20_template: {}\".format(output_compare_80_20_template))\n\n pct_comp_df = pandas.DataFrame(data = {\"cov_diff_pct\":[0.810320,0.867145], \"label\":[\"FAKE 0.81\", \"FACE 0.87\"]}, index = [\"FAKE\", \"FACE\"])\n\n sample_ids =[]\n for i in range(0, 200):\n if i < 100:\n sample_ids.append(\"FAKE\")\n else:\n sample_ids.append(\"FACE\")\n\n coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))\n for num in range(len(coverage_percentile)):\n coverage_percentile[num] = coverage_percentile[num] / 1000000\n\n percentile_df = pandas.DataFrame({\"coverage_percentile\":coverage_percentile, \"sample_id\":sample_ids, \"genebody_pct\":list(range(1,101))+ list(range(1,101))})\n\n out_f_pct = mgcg.save_to_tsv(wkdir, output_compare_80_20_template, pct_comp_df)\n out_f_percentile = mgcg.save_to_tsv(wkdir, output_all_pct_template, percentile_df)\n\n logger.debug(\"out_f_pct: {}\".format(out_f_pct))\n logger.debug(\"out_f_percentile: {}\".format(out_f_percentile))\n\n self.assertTrue(os.path.exists(out_f_pct))\n self.assertTrue(os.path.exists(out_f_percentile))\n\n\n def test_create_and_save_genebody_coverage_graph(self):\n with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:\n logger.debug(\"\\n \\n \\n test_create_and_save_genebody_coverage_graph: {}\\n \\n \".format(wkdir))\n\n output_line_html_template = \"{exp_id}_genebody_{{}}.html\".format(exp_id=\"MYEXPERIMENTID\")\n logger.debug(\"output_line_html_template: {}\".format(output_line_html_template))\n\n sample_ids =[]\n labels = []\n for i in range(0, 200):\n if i < 100:\n sample_ids.append(\"FAKE\")\n labels.append(\"FAKE 0.81\")\n else:\n sample_ids.append(\"FACE\")\n labels.append(\"FAcE 0.87\")\n\n coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))\n for num in range(len(coverage_percentile)):\n coverage_percentile[num] = coverage_percentile[num] / 1000000\n\n percentile_df = pandas.DataFrame({\"coverage_percentile\":coverage_percentile, \"sample_id\":sample_ids, \"genebody_pct\":list(range(1,101))+ list(range(1,101)), \"label\":labels})\n\n output_filepath = mgcg.create_and_save_genebody_coverage_graph(\"coverage_percentile\", wkdir, percentile_df, output_line_html_template)\n\n self.assertTrue(os.path.exists(output_filepath))\n\n\n\n\n\n def test_create_and_save_histograms(self):\n with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:\n logger.debug(\"\\n \\n \\n test_create_and_save_histograms: {}\\n \\n \".format(wkdir))\n\n output_histogram_html_template = \"{exp_id}_genebody_histogram_{{}}.html\".format(exp_id=\"MYEXPERIMENTID\")\n logger.debug(\"output_histogram_html_template: {}\".format(output_histogram_html_template))\n\n pct_comp_df = pandas.DataFrame(data = {\"cov_diff_pct\":[0.810320,0.867145], \"label\":[\"FAKE 0.81\", \"FACE 0.87\"]}, index = [\"FAKE\", \"FACE\"])\n\n output_filepath = mgcg.create_and_save_histograms(\"cov_diff_pct\", wkdir, pct_comp_df, output_histogram_html_template)\n\n self.assertTrue(os.path.exists(output_filepath))\n\n\n def test_build_output_template_dict(self):\n with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:\n logger.debug(\"\\n \\n \\n test_build_output_template_dict: {}\\n \\n \".format(wkdir))\n\n output_compare_80_20_template = \"{exp_id}_asymmetry_compare_80_20_r{{}}x{{}}.txt\".format(exp_id=\"MYEXPERIMENTID\")\n output_all_pct_template = \"{exp_id}_all_genebody_coverage_r{{}}x{{}}.txt\".format(exp_id=\"MYEXPERIMENTID\")\n output_line_html_template = \"{exp_id}_genebody_{{}}.html\".format(exp_id=\"MYEXPERIMENTID\")\n output_histogram_html_template = \"{exp_id}_genebody_histogram_{{}}.html\".format(exp_id=\"MYEXPERIMENTID\")\n\n output_template_dict = mgcg.build_output_template_dict(\"MYEXPERIMENTID\")\n\n #check that the values in the dict are the correct templates\n self.assertEqual( output_template_dict[\"compare_80_20\"], output_compare_80_20_template)\n self.assertEqual( output_template_dict[\"all_pct\"], output_all_pct_template)\n self.assertEqual( output_template_dict[\"line_html\"], output_line_html_template)\n self.assertEqual( output_template_dict[\"histogram_html\"], output_histogram_html_template)\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\nif __name__ == \"__main__\":\n setup_logger.setup(verbose=True)\n\n unittest.main()\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eslickj/idaes-pse
|
[
"328ed07ffb0b4d98c03e972675ea32c41dd2531a"
] |
[
"idaes/power_generation/carbon_capture/mea_solvent_system/unit_models/column.py"
] |
[
"#################################################################################\n# The Institute for the Design of Advanced Energy Systems Integrated Platform\n# Framework (IDAES IP) was produced under the DOE Institute for the\n# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021\n# by the software owners: The Regents of the University of California, through\n# Lawrence Berkeley National Laboratory, National Technology & Engineering\n# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University\n# Research Corporation, et al. All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and\n# license information.\n#################################################################################\n\"\"\"\nIDAES First Generation (GEN1) MEA Rate-Based Packed Column.\n\nDetailed model equations can be found in the supplimentary information\nof the paper :\nAkula, Paul; Eslick, John; Bhattacharyya, Debangsu; Miller, David\n\"Model Development, Validation, and Part-Load Optimization of a\nMEA-Based Post-Combustion CO2 Capture Process\nUnder Part-Load and Variable Capture Operation,\nIndustrial & Engineering Chemistry Research,2021. (submitted)\n\n\"\"\"\n\n# Import Python libraries and third-party\nimport numpy as np\nimport warnings\nimport matplotlib.pyplot as plt\nfrom enum import Enum\n\n# Import Pyomo libraries\nfrom pyomo.environ import (Constraint, Expression, Param, Reals, NonNegativeReals,\n value, Var, exp, SolverStatus,\n units as pyunits)\nfrom pyomo.common.config import ConfigBlock, ConfigValue, In, Bool\n\n# Import IDAES Libraries\nfrom idaes.core.util.constants import Constants as CONST\nfrom idaes.core import (ControlVolume1DBlock, UnitModelBlockData,\n declare_process_block_class,\n MaterialBalanceType,\n EnergyBalanceType,\n MomentumBalanceType,\n FlowDirection)\nfrom idaes.core.util import get_solver\nfrom idaes.core.util.config import is_physical_parameter_block\nfrom idaes.generic_models.unit_models.heat_exchanger_1D import \\\n HeatExchangerFlowPattern as FlowPattern\nfrom idaes.core.util.misc import add_object_reference\nfrom idaes.core.util.exceptions import ConfigurationError\nfrom idaes.core.control_volume1d import DistributedVars\nimport idaes.logger as idaeslog\n\n\n__author__ = \"Paul Akula, John Eslick\"\n\n\n# Set up logger\n_log = idaeslog.getLogger(__name__)\n\n\nclass ProcessType(Enum):\n absorber = 1\n stripper = 2\n\n\n@declare_process_block_class(\"PackedColumn\")\nclass PackedColumnData(UnitModelBlockData):\n \"\"\"\n Standard Continous Differential Contactor (CDC) Model Class.\n\n \"\"\"\n\n # Configuration template for unit level arguments applicable to both phases\n CONFIG = UnitModelBlockData.CONFIG()\n\n # Configuration template for phase specific arguments\n _PhaseCONFIG = ConfigBlock()\n\n CONFIG.declare(\"area_definition\", ConfigValue(\n default=DistributedVars.variant,\n domain=In(DistributedVars),\n description=\"Argument for defining form of area variable\",\n doc=\"\"\"Argument defining whether area variable should be spatially\nvariant or not.\n**default** - DistributedVars.uniform.\n**Valid values:** {\nDistributedVars.uniform - area does not vary across spatial domian,\nDistributedVars.variant - area can vary over the domain and is indexed\nby time and space.}\"\"\"))\n\n CONFIG.declare(\"finite_elements\", ConfigValue(\n default=10,\n domain=int,\n description=\"Number of finite elements length domain\",\n doc=\"\"\"Number of finite elements to use when discretizing length\ndomain (default=20)\"\"\"))\n CONFIG.declare(\"length_domain_set\", ConfigValue(\n default=[0.0, 1.0],\n domain=list,\n description=\"Number of finite elements length domain\",\n doc=\"\"\"length_domain_set - (optional) list of point to use to\ninitialize a new ContinuousSet if length_domain is not\nprovided (default = [0.0, 1.0])\"\"\"))\n CONFIG.declare(\"transformation_method\", ConfigValue(\n default=\"dae.finite_difference\",\n description=\"Method to use for DAE transformation\",\n doc=\"\"\"Method to use to transform domain. Must be a method recognised\nby the Pyomo TransformationFactory,\n**default** - \"dae.finite_difference\".\n**Valid values:** {\n**\"dae.finite_difference\"** - Use a finite difference transformation method,\n**\"dae.collocation\"** - use a collocation transformation method}\"\"\"))\n CONFIG.declare(\"collocation_points\", ConfigValue(\n default=3,\n domain=int,\n description=\"Number of collocation points per finite element\",\n doc=\"\"\"Number of collocation points to use per finite element when\ndiscretizing length domain (default=3)\"\"\"))\n CONFIG.declare(\"flow_type\", ConfigValue(\n default=FlowPattern.countercurrent,\n domain=In(FlowPattern),\n description=\"Flow configuration of PackedColumn\",\n doc=\"\"\"PackedColumn flow pattern,\n**default** - FlowPattern.countercurrent.\n**Valid values:** {\n**FlowPattern.countercurrent** - countercurrent flow,\n**FlowPattern.cocurrent** - cocurrent flow}\"\"\"))\n CONFIG.declare(\"process_type\", ConfigValue(\n default=ProcessType.absorber,\n domain=In(ProcessType),\n description=\"Flag indicating the type of process\",\n doc=\"\"\"Flag indicating either absorption or stripping process.\n**default** - ProcessType.absorber.\n**Valid values:** {\n**ProcessType.absorber** - absorption process,\n**ProcessType.stripper** - stripping process.}\"\"\"))\n CONFIG.declare(\"packing_specific_area\", ConfigValue(\n default=250,\n domain=float,\n description=\"Specific surface area of packing (m^2/m^3)\",\n doc=\"Surface area of packing per unit volume of column(default= 250 m2/m3)\"))\n CONFIG.declare(\"packing_void_fraction\", ConfigValue(\n default=0.97,\n domain=float,\n description=\"Void fraction of the packing\",\n doc=\"Packing porosity or void fraction (default= 0.97 )\"))\n CONFIG.declare(\"fix_column_pressure\", ConfigValue(\n default=True,\n domain=Bool,\n description=\"Indicates whether the column pressure should be fixed\",\n doc=\"\"\"Indicates whether the column pressure should be fixed or not.\nThe momentum balances are not added when this is True.\n**default** - True.\n**Valid values:** {\n**True** - fix the column pressure and do not add momentum balances,\n**False** -Do not fix the column pressure and add momentum balances}\"\"\"))\n CONFIG.declare(\"column_pressure\", ConfigValue(\n default=107650,\n domain=float,\n description=\"fixed column pressure in Pa\",\n doc=\"Fixed column operating pressure in Pa\"))\n # Populate the phase side template to default values\n _PhaseCONFIG.declare(\"has_pressure_change\", ConfigValue(\n default=False,\n domain=Bool,\n description=\"Pressure change term construction flag\",\n doc=\"\"\"Indicates whether terms for pressure change should be constructed,\n**default** - False.\n**Valid values:** {\n**True** - include pressure change terms,\n**False** - exclude pressure change terms.}\"\"\"))\n _PhaseCONFIG.declare(\"pressure_drop_type\", ConfigValue(\n default=None,\n domain=In([\"Billet_Schultes_correlation\",\n \"Stichlmair_Fair_Bravo_correlation\",\n \"GPDC-Kister\"]),\n description=\"Construction flag for type of pressure drop\",\n doc=\"\"\"Indicates what type of pressure drop correlation should be used,\n**default**- None.\n**Valid values:** {\n**None** - set pressure drop to zero,\n**\"Stichlmair_Fair_Bravo_correlation\"** - Use the Stichlmair_Fair_Bravo_correlation model\n**\"GPDC-Kister\"** - Use the Generalized Pressure Drop Correlation of Kister 2007\n**\"Billet_Schultes_correlation\"** - Use the Billet_Schultes_correlation model}\"\"\"))\n _PhaseCONFIG.declare(\"property_package\", ConfigValue(\n default=None,\n domain=is_physical_parameter_block,\n description=\"Property package to use for control volume\",\n doc=\"\"\"Property parameter object used to define property calculations\n(default = 'use_parent_value')\n- 'use_parent_value' - get package from parent (default = None)\n- a ParameterBlock object\"\"\"))\n _PhaseCONFIG.declare(\"property_package_args\", ConfigValue(\n default={},\n description=\"Arguments for constructing vapor property package\",\n doc=\"\"\"A dict of arguments to be passed to the PropertyBlockData\nand used when constructing these\n(default = 'use_parent_value')\n- 'use_parent_value' - get package from parent (default = None)\n- a dict (see property package for documentation)\n\n \"\"\"))\n _PhaseCONFIG.declare(\"transformation_scheme\", ConfigValue(\n default=\"BACKWARD\",\n description=\"Scheme to use for DAE transformation\",\n doc=\"\"\"Scheme to use when transformating domain. See Pyomo\ndocumentation for supported schemes,\n**default** - \"BACKWARD\".\n**Valid values:** {\n**\"BACKWARD\"** - Use a BACKWARD finite difference transformation method,\n**\"FORWARD\"\"** - Use a FORWARD finite difference transformation method,\n**\"LAGRANGE-RADAU\"\"** - use a collocation transformation method}\"\"\"))\n\n # Create individual config blocks for vapor(gas) and liquid sides\n CONFIG.declare(\"vapor_side\",\n _PhaseCONFIG(doc=\"vapor side config arguments\"))\n\n CONFIG.declare(\"liquid_side\",\n _PhaseCONFIG(doc=\"liquid side config arguments\"))\n\n # ==========================================================================\n\n def build(self):\n \"\"\"\n Begin building model (pre-DAE transformation).\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n # Call UnitModel.build to build default attributes\n super(PackedColumnData, self).build()\n\n # ==========================================================================\n \"\"\" Set argument values for vapor and liquid sides\"\"\"\n\n # Set flow directions for the control volume blocks\n # Gas flows from 0 to 1, Liquid flows from 1 to 0\n if self.config.flow_type == FlowPattern.countercurrent:\n set_direction_vapor = FlowDirection.forward\n set_direction_liquid = FlowDirection.backward\n else:\n raise NotImplementedError(\n \"{} Packed Column class has implemented only counter-current \"\n \"flow pattern. Please contact the \"\n \"developer of the unit model you are using.\".format(self.name))\n\n # ==========================================================================\n \"\"\" Build Control volume 1D for vapor phase and\n populate vapor control volume\"\"\"\n\n self.vapor_phase = ControlVolume1DBlock(default={\n \"transformation_method\": self.config.transformation_method,\n \"transformation_scheme\": self.config.vapor_side.transformation_scheme,\n \"finite_elements\": self.config.finite_elements,\n \"collocation_points\": self.config.collocation_points,\n \"dynamic\": self.config.dynamic,\n \"has_holdup\": self.config.has_holdup,\n \"area_definition\": self.config.area_definition,\n \"property_package\": self.config.vapor_side.property_package,\n \"property_package_args\":\n self.config.vapor_side.property_package_args})\n\n self.vapor_phase.add_geometry(flow_direction=set_direction_vapor,\n length_domain_set=self.config.length_domain_set)\n\n self.vapor_phase.add_state_blocks(\n information_flow=set_direction_vapor,\n has_phase_equilibrium=False)\n\n self.vapor_phase.add_material_balances(\n balance_type=MaterialBalanceType.componentTotal,\n has_phase_equilibrium=False,\n has_mass_transfer=True)\n\n self.vapor_phase.add_energy_balances(\n balance_type=EnergyBalanceType.enthalpyTotal,\n has_heat_transfer=True)\n\n if not self.config.fix_column_pressure:\n self.vapor_phase.add_momentum_balances(\n balance_type=MomentumBalanceType.pressureTotal,\n has_pressure_change=self.config.vapor_side.has_pressure_change)\n\n # TO DO : remove this warning when there is support for deltaP\n warnings.warn(\"\"\"{} WARNING! WARNING!! WARNING!!!\n control volume class has not implemented a method\n for pressure drop. Constraint for deltaP must be provided if\n has_pressure_change is set to True\"\"\".format(self.name))\n # consistency check\n if (self.config.vapor_side.has_pressure_change and\n self.config.fix_column_pressure):\n raise ConfigurationError(\n \" has_pressure_change is set to {} \"\n \" while fix_colume_pressure is set to {}. \"\n \" Set fix_column_pressure to False if has_pressure_change is True.\"\n .format(self.config.vapor_side.has_pressure_change,\n self.config.fix_column_pressure))\n\n # TO DO\n # pressure drop calculation\n # Correlations for pressure drop and flooding required for design cases\n if (self.config.vapor_side.has_pressure_change and\n self.config.vapor_side.pressure_drop_type ==\n \"Stichlmair_Fair_Bravo_correlation\"):\n raise NotImplementedError(\n \"{} control volume class has not implemented a method for \"\n \"pressure drop. Please contact the \"\n \"developer of the property_package you are using.\"\n .format(self.name))\n\n if (self.config.vapor_side.has_pressure_change and\n self.config.vapor_side.pressure_drop_type == \"GPDC-Kister\"):\n raise NotImplementedError(\n \"{} control volume class has not implemented a method for \"\n \"pressure drop. Please contact the \"\n \"developer of the property_package you are using.\"\n .format(self.name))\n\n self.vapor_phase.apply_transformation()\n\n # ==========================================================================\n \"\"\" Build Control volume 1D for liquid phase and\n populate liquid control volume\n\n \"\"\"\n self.liquid_phase = ControlVolume1DBlock(default={\n \"transformation_method\": self.config.transformation_method,\n \"transformation_scheme\": self.config.liquid_side.transformation_scheme,\n \"finite_elements\": self.config.finite_elements,\n \"collocation_points\": self.config.collocation_points,\n \"dynamic\": self.config.dynamic,\n \"has_holdup\": self.config.has_holdup,\n \"area_definition\": self.config.area_definition,\n \"property_package\": self.config.liquid_side.property_package,\n \"property_package_args\":\n self.config.liquid_side.property_package_args})\n\n self.liquid_phase.add_geometry(flow_direction=set_direction_liquid,\n length_domain_set=self.config.\n length_domain_set)\n\n self.liquid_phase.add_state_blocks(\n information_flow=set_direction_liquid,\n has_phase_equilibrium=False)\n\n self.liquid_phase.add_material_balances(\n balance_type=MaterialBalanceType.componentTotal,\n has_phase_equilibrium=False,\n has_mass_transfer=True)\n\n self.liquid_phase.add_energy_balances(\n balance_type=EnergyBalanceType.enthalpyTotal,\n has_heat_transfer=True)\n\n self.liquid_phase.apply_transformation()\n\n # Add Ports for vapor side\n self.add_inlet_port(name=\"vapor_inlet\", block=self.vapor_phase)\n self.add_outlet_port(name=\"vapor_outlet\", block=self.vapor_phase)\n\n # Add Ports for liquid side\n self.add_inlet_port(name=\"liquid_inlet\", block=self.liquid_phase)\n self.add_outlet_port(name=\"liquid_outlet\", block=self.liquid_phase)\n\n # ==========================================================================\n \"\"\" Add performace equation method\"\"\"\n self._make_performance()\n\n\n def _make_performance(self):\n \"\"\"\n Constraints for unit model.\n\n Args: None\n\n Returns: None\n\n \"\"\"\n\n # ======================================================================\n # Aliases for Sets\n vap_comp = self.config.vapor_side.property_package.component_list\n liq_comp = self.config.liquid_side.property_package.component_list\n dcomp = self.config.liquid_side.property_package.component_list_d\n vapor_phase_list_ref = self.config.vapor_side.property_package.phase_list\n liquid_phase_list_ref = self.config.liquid_side.property_package.phase_list\n\n # Add object reference - time\n add_object_reference(self,\n \"t\",\n self.flowsheet().time)\n\n # Packing parameters\n self.eps_ref = Param(initialize=self.config.packing_void_fraction,\n units=None,\n doc=\"Packing void space m3/m3\")\n\n self.a_ref = Param(initialize=self.config.packing_specific_area,\n units=pyunits.m**2 / pyunits.m**3,\n doc=\"Packing specific surface area m2/m3\")\n\n self.dh_ref = Expression(expr= 4 * self.eps_ref /self.a_ref,\n doc=\"Hydraulic diameter\")\n\n # specific constants for volumetric mass transfer coefficients\n # reference: Billet and Schultes, 1999\n self.Cv_ref = Var(initialize=0.357,\n doc='''Vapor packing specific constant in\n Billet and Schultes' (1999) volumetric\n mass transfer coefficient correlation''')\n self.Cl_ref = Var(initialize=0.5,\n doc='''Liquid packing specific constant in\n Billet and Schultes' (1999) volumetric\n mass transfer coefficient correlation''')\n self.Cv_ref.fix()\n self.Cl_ref.fix()\n\n # Add object references - others\n R_ref = CONST.gas_constant\n\n # Unit Model Parameters/sets\n self.zi = Param(self.vapor_phase.length_domain, mutable=True,\n doc='''Integer indexing parameter required for transfer\n across boundaries of a given volume element''')\n # Set the integer indices\n for i, x in enumerate(self.vapor_phase.length_domain, 1):\n self.zi[x] = i\n\n # Continuation parameters for initialization\n self._homotopy_par_m = Param(initialize=0, mutable=True, units=None,\n doc='''Continuation parameter to turn on mass\n transfer terms gradually''')\n self._homotopy_par_h = Param(initialize=0, mutable=True, units=None,\n doc='''Continuation parameter to turn on heat\n transfer terms gradually''')\n\n # fixed column pressure\n if self.config.fix_column_pressure:\n self.column_pressure = Param(initialize=self.config.column_pressure,\n mutable=True,\n units=pyunits.Pa,\n doc='Fixed operating pressure of column')\n\n # Interfacial area parameters\n self.area_interfacial_parA = Var(initialize=0.6486,\n units=None,\n doc='''Interfacial area parameter A''')\n\n self.area_interfacial_parB = Var(initialize=0.12,\n units=None,\n doc='''Interfacial area parameter B''')\n self.area_interfacial_parA.fix(0.6486)\n self.area_interfacial_parB.fix(0.12)\n\n # Holdup parameters\n self.holdup_parA = Var(initialize=24.2355,\n units=None,\n doc='''holdup parameter A''')\n\n self.holdup_parB = Var(initialize=0.6471,\n units=None,\n doc='''holdup parameter B''')\n self.holdup_parA.fix(24.2355)\n self.holdup_parB.fix(0.6471)\n\n\n # Unit Model Variables\n # Geometry\n self.diameter_column = Var(domain=Reals,\n initialize=0.1,\n units=pyunits.m,\n doc='Column diameter')\n self.area_column = Var(domain=Reals,\n initialize=0.5,\n units=pyunits.m**2,\n doc='Column cross-sectional area')\n self.length_column = Var(domain=Reals,\n initialize=4.9,\n units=pyunits.m,\n doc='Column length')\n\n # Hydrodynamics\n self.velocity_vap = Var(self.t,\n self.vapor_phase.length_domain,\n domain=NonNegativeReals,\n initialize=2,\n units=pyunits.m / pyunits.s,\n doc='Vapor superficial velocity')\n self.velocity_liq = Var(self.t,\n self.liquid_phase.length_domain,\n units=pyunits.m / pyunits.s,\n domain=NonNegativeReals,\n initialize=0.01,\n doc='Liquid superficial velocity')\n # mass and heat transfer terms\n # mass transfer\n self.pressure_equil = Var(self.t,\n self.vapor_phase.length_domain,\n dcomp,\n domain=NonNegativeReals,\n initialize=500,\n units=pyunits.Pa,\n doc='''Equilibruim pressure of diffusing\n components at the interface ''')\n self.N_v = Var(self.t,\n self.liquid_phase.length_domain,\n dcomp,\n domain=Reals,\n initialize=0.0,\n units=pyunits.mol / (pyunits.s * pyunits.m),\n doc='''Moles of diffusing species transfered\n into liquid ''')\n self.enhancement_factor = Var(self.t,\n self.liquid_phase.length_domain,\n units=None,\n domain=NonNegativeReals,\n initialize=160,\n doc='Enhancement factor')\n\n self.yi_MEA = Var(self.t,\n self.liquid_phase.length_domain,\n domain=NonNegativeReals,\n initialize=0.5,\n units=None,\n doc='''Dimensionless concentration of MEA\n at interface ''')\n self.yeq_CO2 = Var(self.t,\n self.liquid_phase.length_domain,\n domain=NonNegativeReals,\n initialize=0.5,\n units=None,\n doc='''Dimensionless concentration of CO2\n in equilibruim with the bulk''')\n\n # heat transfer\n self.heat_vap = Var(self.t,\n self.vapor_phase.length_domain,\n domain=Reals,\n initialize=0.0,\n units=pyunits.J / (pyunits.s * pyunits.m),\n doc='Heat transfer rate in vapor phase')\n self.heat_liq = Var(self.t,\n self.vapor_phase.length_domain,\n domain=Reals,\n initialize=0.0,\n units=pyunits.J / (pyunits.s * pyunits.m),\n doc='Heat transfer rate in liquid phase')\n\n # =====================================================================\n # Add performance equations\n\n # Inter-facial Area model ([m2/m3]):\n # reference: Tsai correlation,regressed by Chinen et al. 2018\n def rule_interfacial_area(blk, t, x):\n if x == self.vapor_phase.length_domain.first():\n return Expression.Skip\n else:\n return blk.a_ref * blk.area_interfacial_parA * (\n blk.liquid_phase.properties[t, x].dens_mass /\n blk.liquid_phase.properties[t, x].surf_tens *\n (blk.velocity_liq[t, x])**(4.0 / 3.0))**blk.area_interfacial_parB\n\n self.area_interfacial = Expression(self.t,\n self.vapor_phase.length_domain,\n rule=rule_interfacial_area,\n doc='Specific inter-facial area')\n\n # liquid holdup model\n # reference: Tsai correlation,regressed by Chinen et al. 2018\n def rule_holdup_liq(blk, t, x):\n return blk.holdup_parA * (blk.velocity_liq[t, x] *\n (blk.liquid_phase.properties[t, x].visc_d /\n blk.liquid_phase.properties[t, x].dens_mass) **\n (0.333))**blk.holdup_parB\n\n self.holdup_liq = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_holdup_liq,\n doc='Volumetric liquid holdup [-]')\n\n # vapor holdup model\n # reference: Tsai correlation,regressed by Chinen et al. 2018\n def rule_holdup_vap(blk, t, x):\n return blk.eps_ref - blk.holdup_liq[t, x]\n\n self.holdup_vap = Expression(self.t,\n self.vapor_phase.length_domain,\n rule=rule_holdup_vap,\n doc='Volumetric vapor holdup [-]')\n\n # ---------------------------------------------------------------------\n # Geometry contraints\n\n # Column area [m2]\n @self.Constraint(doc=\"Column cross-sectional area\")\n def column_cross_section_area(blk):\n return blk.area_column == (CONST.pi * 0.25 * (blk.diameter_column)**2)\n\n # Area of control volume : vapor side and liquid side\n control_volume_area_definition = ''' column_area * phase_holdup.\n The void fraction of the vapor phase (volumetric vapor holdup) and that\n of the liquid phase(volumetric liquid holdup) are\n lumped into the definition of the cross-sectional area of the\n vapor-side and liquid-side control volume respectively. Hence, the\n cross-sectional area of the control volume changes with time and space.\n '''\n\n if self.config.dynamic:\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n doc=control_volume_area_definition)\n def vapor_side_area(bk, t, x):\n return bk.vapor_phase.area[t, x] == bk.area_column * bk.holdup_vap[t, x]\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc=control_volume_area_definition)\n def liquid_side_area(bk, t, x):\n return bk.liquid_phase.area[t, x] == bk.area_column * bk.holdup_liq[t, x]\n else:\n self.vapor_phase.area.fix(value(self.area_column))\n self.liquid_phase.area.fix(value(self.area_column))\n\n # if column pressure is fixed\n if self.config.fix_column_pressure:\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n doc='Sets the fixed column pressure')\n def vapor_side_pressure(bk, t, x):\n if x == self.vapor_phase.length_domain.first():\n return Constraint.Skip\n else:\n return bk.column_pressure == \\\n bk.vapor_phase.properties[t, x].pressure\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc='Sets the fixed column pressure')\n def liquid_side_pressure(bk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Constraint.Skip\n else:\n return bk.liquid_phase.properties[t, x].pressure == \\\n bk.column_pressure\n else:\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc='''Mechanical equilibruim: vapor-side pressure\n equal liquid -side pressure''')\n def mechanical_equil(bk, t, x):\n return bk.liquid_phase.properties[t, x].pressure == \\\n bk.vapor_phase.properties[t, x].pressure\n\n # Length of control volume : vapor side and liquid side\n @self.Constraint(doc=\"Vapor side length\")\n def vapor_side_length(blk):\n return blk.vapor_phase.length == blk.length_column\n\n @self.Constraint(doc=\"Liquid side length\")\n def liquid_side_length(blk):\n return blk.liquid_phase.length == blk.length_column\n\n # ---------------------------------------------------------------------\n # Hydrodynamic contraints\n # Vapor superficial velocity\n\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n doc=\"Vapor superficial velocity\")\n def eq_velocity_vap(blk, t, x):\n return blk.velocity_vap[t, x] * blk.area_column * \\\n blk.vapor_phase.properties[t, x].conc_mol == \\\n blk.vapor_phase.properties[t, x].flow_mol\n\n # Liquid superficial velocity\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc=\"Liquid superficial velocity\")\n def eq_velocity_liq(blk, t, x):\n return blk.velocity_liq[t, x] * blk.area_column * \\\n blk.liquid_phase.properties[t, x].conc_mol == \\\n blk.liquid_phase.properties[t, x].flow_mol\n\n\n # ---------------------------------------------------------------------\n # Mass transfer coefficients, Billet and Schultes (1999) correlation,\n # where parameters are regressed by Chinen et al. (2018).\n\n # vapor mass transfer coefficients for diffusing components [mol/m2.s.Pa]\n def rule_mass_transfer_coeff_vap(blk, t, x, j):\n if x == self.vapor_phase.length_domain.first():\n return Expression.Skip\n else:\n return 1 /\\\n (R_ref * blk.vapor_phase.properties[t, x].temperature) *\\\n blk.Cv_ref / (blk.holdup_vap[t, x])**0.5 *\\\n (blk.a_ref / blk.dh_ref)**0.5 *\\\n (blk.vapor_phase.properties[t, x].diffus[j])**(2 / 3) *\\\n (blk.vapor_phase.properties[t, x].visc_d /\n blk.vapor_phase.properties[t, x].dens_mass)**(1 / 3) *\\\n ((blk.velocity_vap[t, x] * blk.vapor_phase.properties[t, x].dens_mass) /\n (blk.a_ref * blk.vapor_phase.properties[t, x].visc_d))**(3 / 4)\n\n self.k_v = Expression(self.t,\n self.vapor_phase.length_domain,\n dcomp,\n rule=rule_mass_transfer_coeff_vap,\n doc=' Vapor mass transfer coefficient ')\n\n # mass transfer coefficients of CO2 in liquid phase [m/s]\n def rule_mass_transfer_coeff_CO2(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n return blk.Cl_ref * 12**(1 / 6) * (blk.velocity_liq[t, x] *\n blk.liquid_phase.properties[t, x].diffus['CO2'] /\n (blk.dh_ref * blk.holdup_liq[t, x]))**0.5\n\n self.k_l_CO2 = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_mass_transfer_coeff_CO2,\n doc='''CO2 mass transfer coefficient in solvent''')\n\n # mass tranfer terms\n def rule_phi(blk, t, x):\n if x == self.vapor_phase.length_domain.first():\n return Expression.Skip\n else:\n zb = self.vapor_phase.length_domain[self.zi[x].value - 1]\n return blk.enhancement_factor[t, zb] * blk.k_l_CO2[t, zb] / blk.k_v[t, x, 'CO2']\n\n self.phi = Expression(self.t,\n self.vapor_phase.length_domain,\n rule=rule_phi,\n doc='''CO2 Equilibruim partial pressure\n intermediate term''')\n\n # Equilibruim partial pressure of diffusing components at interface\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n dcomp,\n doc='''Equilibruim partial pressure of diffusing\n components at interface''')\n def pressure_at_interface(blk, t, x, j):\n if x == self.vapor_phase.length_domain.first():\n return blk.pressure_equil[t, x, j] == 0.0\n else:\n zb = self.vapor_phase.length_domain[self.zi[x].value - 1]\n if j == 'H2O':\n return blk.pressure_equil[t, x, j] == (\n blk.liquid_phase.properties[t, zb].vol_mol *\n blk.liquid_phase.properties[t, zb].conc_mol_comp_true[j] *\n blk.liquid_phase.properties[t, zb].pressure_sat[j])\n elif j == 'CO2':\n return blk.pressure_equil[t, x, j] == (\n (blk.vapor_phase.properties[t, x].mole_frac_comp[j] *\n blk.vapor_phase.properties[t, x].pressure + blk.phi[t, x] *\n blk.liquid_phase.properties[t, zb].conc_mol_comp_true[j]) /\n (1 + blk.phi[t, x] /\n blk.liquid_phase.properties[t, zb].henry_N2O_analogy))\n\n # mass transfer of diffusing components\n def rule_mass_transfer(blk, t, x, j):\n if x == self.vapor_phase.length_domain.first():\n return blk.N_v[t, x, j] == 0.0\n else:\n return blk.N_v[t, x, j] == (blk.k_v[t, x, j] *\n blk.area_interfacial[t, x] * blk.area_column *\n (blk.vapor_phase.properties[t, x].mole_frac_comp[j] *\n blk.vapor_phase.properties[t, x].pressure -\n blk.pressure_equil[t, x, j])) * blk._homotopy_par_m\n\n self.mass_transfer = Constraint(self.t,\n self.vapor_phase.length_domain,\n dcomp, rule=rule_mass_transfer,\n doc=\"mass transfer to liquid\")\n\n # mass tranfer term handle\n # liquid side\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n liquid_phase_list_ref,\n liq_comp,\n doc=\"mass transfer to liquid\")\n def liquid_phase_mass_transfer_handle(blk, t, x, p, j):\n if x == self.liquid_phase.length_domain.last():\n return blk.liquid_phase.mass_transfer_term[t, x, p, j] == 0.0\n else:\n zf = self.vapor_phase.length_domain[self.zi[x].value + 1]\n if j == 'MEA':\n return blk.liquid_phase.mass_transfer_term[t, x, p, j] == \\\n 0.0\n else:\n return blk.liquid_phase.mass_transfer_term[t, x, p, j] == \\\n blk.N_v[t, zf, j]\n # vapor side\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n vapor_phase_list_ref,\n vap_comp,\n doc=\"mass transfer from vapor\")\n def vapor_phase_mass_transfer_handle(blk, t, x, p, j):\n if x == self.vapor_phase.length_domain.first():\n return blk.vapor_phase.mass_transfer_term[t, x, p, j] == 0.0\n else:\n if j in ['N2', 'O2']:\n return blk.vapor_phase.mass_transfer_term[t, x, p, j] == \\\n 0.0\n else:\n return blk.vapor_phase.mass_transfer_term[t, x, p, j] == \\\n -blk.N_v[t, x, j]\n\n # Heat transfer coefficients, Chilton Colburn analogy\n # Vapor-liquid heat transfer coefficient [J/m2.s.K]\n\n def rule_heat_transfer_coeff(blk, t, x):\n if x == self.vapor_phase.length_domain.first():\n return Expression.Skip\n else:\n return blk.k_v[t, x, 'CO2'] *\\\n blk.vapor_phase.properties[t, x].pressure *\\\n blk.vapor_phase.properties[t, x].cp_mol_mean *\\\n (blk.vapor_phase.properties[t, x].therm_cond /\n (blk.vapor_phase.properties[t, x].conc_mol *\n blk.vapor_phase.properties[t, x].cp_mol_mean *\n blk.vapor_phase.properties[t, x].diffus['CO2']))**(2 / 3)\n\n self.h_v = Expression(self.t,\n self.vapor_phase.length_domain,\n rule=rule_heat_transfer_coeff,\n doc='''vap-liq heat transfer coefficient''')\n\n # Vapor-liquid heat transfer coefficient modified by Ackmann factor [J/m.s.K]\n def rule_heat_transfer_coeff_Ack(blk, t, x):\n if x == self.vapor_phase.length_domain.first():\n return Expression.Skip\n else:\n Ackmann_factor =\\\n (blk.vapor_phase.properties[t, x].cp_mol_comp_mean['CO2'] *\n blk.N_v[t, x, 'CO2'] +\n blk.vapor_phase.properties[t, x].cp_mol_comp_mean['H2O'] *\n blk.N_v[t, x, 'H2O'])\n return Ackmann_factor /\\\n (1 - exp(-Ackmann_factor /\n (blk.h_v[t, x] * blk.area_interfacial[t, x] * blk.area_column)))\n self.h_v_Ack = Expression(self.t,\n self.vapor_phase.length_domain,\n rule=rule_heat_transfer_coeff_Ack,\n doc='''vap-liq heat transfer coefficient corrected\n by Ackmann factor''')\n\n # heat transfer vapor side [J/s.m]\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n doc=\"heat transfer - vapor side \")\n def vapor_phase_heat_transfer(blk, t, x):\n if x == self.vapor_phase.length_domain.first():\n return blk.heat_vap[t, x] == 0\n else:\n zb = self.vapor_phase.length_domain[value(self.zi[x]) - 1]\n return blk.heat_vap[t, x] == blk.h_v_Ack[t, x] * \\\n (blk.liquid_phase.properties[t, zb].temperature -\n blk.vapor_phase.properties[t, x].temperature) * \\\n blk._homotopy_par_h\n\n # heat transfer liquid side [J/s.m]\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc=\"heat transfer - liquid side \")\n def liquid_phase_heat_transfer(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return blk.heat_liq[t, x] == 0\n else:\n zf = self.vapor_phase.length_domain[value(self.zi[x]) + 1]\n return blk.heat_liq[t, x] == blk.heat_vap[t, zf] + \\\n (blk.liquid_phase.properties[t, x].habs * blk.N_v[t, zf, 'CO2'] -\n blk.liquid_phase.properties[t, x].hvap * blk.N_v[t, zf, 'H2O']) *\\\n blk._homotopy_par_h\n\n # heat transfer handle\n # vapor heat transfer handle\n\n @self.Constraint(self.t,\n self.vapor_phase.length_domain,\n doc=\"vapor - heat transfer handle\")\n def vapor_phase_heat_transfer_handle(blk, t, x):\n return blk.vapor_phase.heat[t, x] == blk.heat_vap[t, x]\n\n # liquid heat transfer handle\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc=\"liquid - heat transfer handle\")\n def liquid_phase_heat_transfer_handle(blk, t, x):\n return blk.liquid_phase.heat[t, x] == -blk.heat_liq[t, x]\n\n # Enhancement factor model\n # reference: Jozsef Gaspar,Philip Loldrup Fosbol, (2015)\n # self.yi_MEA[z] is equivalent to sqrt(yi_MEA) in the document\n\n def rule_conc_mol_comp_interface_CO2(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n zf = self.liquid_phase.length_domain[self.zi[x].value + 1]\n return blk.pressure_equil[t, zf, 'CO2'] /\\\n blk.liquid_phase.properties[t, x].henry_N2O_analogy\n\n self.conc_mol_comp_CO2_eq = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_conc_mol_comp_interface_CO2,\n doc='''Concentration of CO2\n at the interface ]''')\n\n def rule_Hatta(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n return (blk.liquid_phase.properties[t, x].k2_rxn *\n blk.liquid_phase.properties[t, x].conc_mol_comp_true['MEA'] *\n blk.liquid_phase.properties[t, x].diffus['CO2'])**0.5 /\\\n blk.k_l_CO2[t, x]\n\n self.Hatta = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_Hatta,\n doc='Hatta number')\n\n def rule_yb_CO2(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n return blk.liquid_phase.properties[t, x].conc_mol_comp_true['CO2'] /\\\n blk.conc_mol_comp_CO2_eq[t, x]\n\n self.yb_CO2 = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_yb_CO2,\n doc='''Dimensionless concentration of CO2,\n Driving force term where\n Absortion implies yb_CO2 < 1 and\n Desorption impies yb_CO2 > 1 ''')\n\n def rule_instantaneous_E(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n return 1 + (blk.liquid_phase.properties[t, x].diffus['MEA'] *\n blk.liquid_phase.properties[t, x].conc_mol_comp_true['MEA']) /\\\n (2 * blk.liquid_phase.properties[t, x].diffus['CO2'] *\n blk.conc_mol_comp_CO2_eq[t, x])\n\n self.instant_E = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_instantaneous_E,\n doc='Instantaneous Enhancement factor')\n\n def rule_yi_MEACOO(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n return 1 + \\\n (blk.liquid_phase.properties[t, x].diffus['MEA'] *\n blk.liquid_phase.properties[t, x].conc_mol_comp_true['MEA']) * \\\n (1 - blk.yi_MEA[t, x] * blk.yi_MEA[t, x]) / \\\n (2 * blk.liquid_phase.properties[t, x].diffus['MEACOO-'] *\n blk.liquid_phase.properties[t, x].conc_mol_comp_true['MEACOO-'])\n\n self.yi_MEACOO = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_yi_MEACOO,\n doc='Dimensionless concentration of MEACOO-')\n\n def rule_yi_MEAH(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Expression.Skip\n else:\n return 1 + \\\n (blk.liquid_phase.properties[t, x].diffus['MEA'] *\n blk.liquid_phase.properties[t, x].conc_mol_comp_true['MEA']) * \\\n (1 - blk.yi_MEA[t, x] * blk.yi_MEA[t, x]) / \\\n (2 * blk.liquid_phase.properties[t, x].diffus['MEA+'] *\n blk.liquid_phase.properties[t, x].conc_mol_comp_true['MEA+'])\n\n self.yi_MEAH = Expression(self.t,\n self.liquid_phase.length_domain,\n rule=rule_yi_MEAH,\n doc='Dimensionless concentration of MEA+')\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc='''dimensionless concentration of CO2\n at equilibruim with the bulk ''')\n def yeq_CO2_eqn(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return blk.yeq_CO2[t, x] == 0.0\n else:\n return blk.yeq_CO2[t, x] * blk.yi_MEA[t, x]**4 == \\\n blk.yb_CO2[t, x] * blk.yi_MEAH[t, x] * blk.yi_MEACOO[t, x]\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc='''Enhancement factor model Eqn 1 ''')\n def E1_eqn(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return blk.enhancement_factor[t, x] == 1\n else:\n return (blk.enhancement_factor[t, x] - 1) * (1 - blk.yb_CO2[t, x]) == \\\n (blk.instant_E[t, x] - 1) * (1 - blk.yi_MEA[t, x]**2)\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc='''Enhancement factor model Eqn 2 ''')\n def E2_eqn(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return blk.yi_MEA[t, x] == 0\n else:\n return blk.enhancement_factor[t, x] * (1 - blk.yb_CO2[t, x]) == \\\n blk.Hatta[t, x] * blk.yi_MEA[t, x] * \\\n (1 - blk.yeq_CO2[t, x])\n\n @self.Constraint(self.t,\n self.liquid_phase.length_domain,\n doc='Enhancement factor lower bound ')\n def E3_eqn(blk, t, x):\n if x == self.liquid_phase.length_domain.last():\n return Constraint.Skip\n else:\n return 1 - blk.enhancement_factor[t, x] <= 0.0\n\n if self.config.dynamic:\n self.fix_initial_condition()\n\n # ==========================================================================\n # Model initialization routine\n\n def initialize(blk,\n vapor_phase_state_args=None,\n liquid_phase_state_args=None,\n state_vars_fixed=False,\n homotopy_steps_m=None,\n homotopy_steps_h=None,\n outlvl=idaeslog.NOTSET,\n solver=None,\n optarg=None):\n \"\"\"\n Column initialization.\n\n Arguments:\n state_args : a dict of arguments to be passed to the property\n package(s) to provide an initial state for\n initialization (see documentation of the specific\n property package) (default = None).\n homotopy_steps_m : List of continuations steps between 0 and 1\n for turning mass transfer constrainst gradually\n homotopy_steps_h : List of continuations steps between 0 and 1\n for turning heat transfer constraints gradually\n optarg : solver options dictionary object (default=None, use\n default solver options)\n solver : str indicating which solver to use during initialization\n (default = None, use IDAES default solver)\n\n \"\"\"\n\n # Set up logger for initialization and solve\n init_log = idaeslog.getInitLogger(blk.name, outlvl, tag=\"unit\")\n solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag=\"unit\")\n\n # Set solver options\n # TODO: Work out why using default solver here doubles test run time\n opt = get_solver(solver, optarg)\n\n if homotopy_steps_m is None:\n homotopy_steps_m = [0, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 1]\n\n if homotopy_steps_h is None:\n homotopy_steps_h = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n\n dynamic_constraints = [\n \"vapor_side_area\",\n \"liquid_side_area\",\n \"eq_velocity_vap\",\n \"eq_velocity_liq\",\n \"E1_eqn\",\n \"E2_eqn\",\n \"pressure_at_interface\",\n \"mass_transfer\",\n \"liquid_phase_mass_transfer_handle\",\n \"vapor_phase_mass_transfer_handle\",\n \"vapor_phase_heat_transfer\",\n \"liquid_phase_heat_transfer\",\n \"vapor_phase_heat_transfer_handle\",\n \"liquid_phase_heat_transfer_handle\",\n \"yeq_CO2_eqn\",\n \"material_balances\",\n \"material_flow_linking_constraints\",\n \"material_holdup_calculation\",\n \"enthalpy_flow_linking_constraint\",\n \"energy_holdup_calculation\",\n \"material_flow_dx_disc_eq\",\n \"enthalpy_flow_dx_disc_eq\",\n \"pressure_dx_disc_eq\",\n \"enthalpy_balances\",\n \"material_accumulation_disc_eq\",\n \"energy_accumulation_disc_eq\",\n \"mechanical_equil\",\n \"pressure_balance\"]\n\n # ---------------------------------------------------------------------\n # Deactivate unit model level constraints (asides geometry constraints)\n for c in blk.component_objects(Constraint, descend_into=True):\n if c.local_name in dynamic_constraints:\n c.deactivate()\n\n # Fix some variables\n # Hydrodynamics - velocity\n blk.velocity_liq.fix()\n blk.velocity_vap.fix()\n\n # interface pressure\n blk.pressure_equil.fix()\n\n # flux\n blk.N_v.fix(0.0)\n blk.vapor_phase.mass_transfer_term.fix(0.0)\n blk.liquid_phase.mass_transfer_term.fix(0.0)\n\n # Enhancement factor model\n blk.enhancement_factor.fix()\n blk.yi_MEA.fix()\n blk.yeq_CO2.fix()\n\n # heat transfer\n blk.heat_vap.fix(0.0)\n blk.heat_liq.fix(0.0)\n blk.vapor_phase.heat.fix(0.0)\n blk.liquid_phase.heat.fix(0.0)\n # area\n blk.vapor_phase.area.fix(value(blk.area_column))\n blk.liquid_phase.area.fix(value(blk.area_column))\n\n # other variables\n # Pressure_dx\n if not blk.config.fix_column_pressure:\n blk.vapor_phase.pressure_dx[:, :].fix(0.0)\n\n # vapor side flow terms\n blk.vapor_phase._enthalpy_flow.fix(1.0)\n blk.vapor_phase.enthalpy_flow_dx[:, :, :].fix(0.0)\n blk.vapor_phase._flow_terms.fix(1.0)\n blk.vapor_phase.material_flow_dx[:, :, :, :].fix(0.0)\n\n # liquid side flow terms\n blk.liquid_phase._enthalpy_flow.fix(1.0)\n blk.liquid_phase.enthalpy_flow_dx[:, :, :].fix(0.0)\n blk.liquid_phase._flow_terms.fix(1.0)\n blk.liquid_phase.material_flow_dx[:, :, :, :].fix(0.0)\n\n # accumulation terms\n # fix accumulation terms to zero and holdup to 1\n if blk.config.dynamic:\n # liquid\n blk.liquid_phase.energy_holdup[:, :, :].fix(1.0)\n blk.liquid_phase.energy_accumulation[:, :, :].fix(0.0)\n blk.liquid_phase.material_holdup[:, :, :, :].fix(1.0)\n blk.liquid_phase.material_accumulation[:, :, :, :].fix(0.0)\n # vapor\n blk.vapor_phase.energy_holdup[:, :, :].fix(1.0)\n blk.vapor_phase.energy_accumulation[:, :, :].fix(0.0)\n blk.vapor_phase.material_holdup[:, :, :, :].fix(1.0)\n blk.vapor_phase.material_accumulation[:, :, :, :].fix(0.0)\n blk.unfix_initial_condition()\n\n # ---------------------------------------------------------------------\n # get values for state variables for initialization\n if vapor_phase_state_args is None:\n if blk.config.process_type == ProcessType.absorber:\n vapor_phase_state_args = {\n 'flow_mol': blk.vapor_inlet.flow_mol[0].value,\n 'temperature': blk.vapor_inlet.temperature[0].value,\n 'pressure': blk.vapor_inlet.pressure[0].value,\n 'mole_frac_comp':\n {'H2O': blk.vapor_inlet.mole_frac_comp[0, 'H2O'].value,\n 'CO2': blk.vapor_inlet.mole_frac_comp[0, 'CO2'].value,\n 'N2': blk.vapor_inlet.mole_frac_comp[0, 'N2'].value,\n 'O2': blk.vapor_inlet.mole_frac_comp[0, 'O2'].value}}\n elif blk.config.process_type == ProcessType.stripper:\n vapor_phase_state_args = {\n 'flow_mol': blk.vapor_inlet.flow_mol[0].value,\n 'temperature': blk.vapor_inlet.temperature[0].value,\n 'pressure': blk.vapor_inlet.pressure[0].value,\n 'mole_frac_comp':\n {'H2O': blk.vapor_inlet.mole_frac_comp[0, 'H2O'].value,\n 'CO2': blk.vapor_inlet.mole_frac_comp[0, 'CO2'].value}}\n\n if liquid_phase_state_args is None:\n liquid_phase_state_args = {\n 'flow_mol': blk.liquid_inlet.flow_mol[0].value,\n 'temperature': blk.liquid_inlet.temperature[0].value,\n 'pressure': blk.vapor_inlet.pressure[0].value,\n 'mole_frac_comp':\n {'H2O': blk.liquid_inlet.mole_frac_comp[0, 'H2O'].value,\n 'CO2': blk.liquid_inlet.mole_frac_comp[0, 'CO2'].value,\n 'MEA': blk.liquid_inlet.mole_frac_comp[0, 'MEA'].value}}\n\n init_log.info(\"STEP 1: Property Package initialization\")\n\n # Initialize vapor_phase block\n vflag = blk.vapor_phase.properties.initialize(\n state_args=vapor_phase_state_args,\n state_vars_fixed=False,\n outlvl=outlvl,\n optarg=optarg,\n solver=solver,\n hold_state=True)\n\n # Initialize liquid_phase properties block\n lflag = blk.liquid_phase.properties.initialize(\n state_args=liquid_phase_state_args,\n state_vars_fixed=False,\n outlvl=outlvl,\n optarg=optarg,\n solver=solver,\n hold_state=True)\n\n init_log.info(\"STEP 2: Steady-State ISOTHERMAL MASS BALANCE\")\n init_log.info_high('No mass transfer ')\n init_log.info_high('No heat transfer')\n\n # unfix flow variable terms\n # vapor side\n\n blk.vapor_phase.properties.release_state(flags=vflag)\n blk.vapor_phase.properties[:, :].temperature.fix()\n if not blk.config.fix_column_pressure:\n blk.vapor_phase.properties[:, :].pressure.fix()\n\n blk.vapor_phase._flow_terms[:, :, :, :].unfix()\n blk.vapor_phase.material_flow_dx[:, :, :, :].unfix()\n # liquid-side\n blk.liquid_phase.properties.release_state(flags=lflag)\n blk.liquid_phase.properties[:, :].temperature.fix()\n if not blk.config.fix_column_pressure:\n blk.liquid_phase.properties[:, :].pressure.fix()\n\n blk.liquid_phase._flow_terms[:, :, :, :].unfix()\n blk.liquid_phase.material_flow_dx[:, :, :, :].unfix()\n\n # activate mass balance related equations\n # liquid control volume\n\n for c in [\n \"material_balances\",\n \"material_flow_linking_constraints\",\n \"material_flow_dx_disc_eq\"]:\n getattr(blk.liquid_phase, c).activate()\n # vapor control volume\n for c in [\n \"material_balances\",\n \"material_flow_linking_constraints\",\n \"material_flow_dx_disc_eq\"]:\n getattr(blk.vapor_phase, c).activate()\n\n # solve for a small length if stripper\n if (blk.config.process_type == ProcessType.stripper):\n _specified_length = value(blk.length_column)\n blk.length_column.fix(0.6)\n\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n init_log.info_high(\"Step 2: {}.\".format(idaeslog.condition(res)))\n\n # ---------------------------------------------------------------------\n init_log.info('STEP 3: Add Mass tranfer terms')\n init_log.info_high('(3a) Velocities & Interface pressure')\n init_log.info_high('(3b) Enhancement factor')\n\n # Initialize : Velocities, Interface pressure, Enhancement factor\n\n # velocity\n blk.velocity_vap.unfix()\n blk.velocity_liq.unfix()\n blk.eq_velocity_vap.activate()\n blk.eq_velocity_liq.activate()\n for t in blk.t:\n for x in blk.vapor_phase.length_domain:\n blk.velocity_vap[t, x].value = value(\n blk.vapor_phase.properties[t, x].flow_mol /\n (blk.area_column * blk.vapor_phase.properties[t, x].conc_mol))\n for x in blk.liquid_phase.length_domain:\n blk.velocity_liq[t, x].value = value(\n blk.liquid_phase.properties[t, x].flow_mol /\n (blk.area_column * blk.liquid_phase.properties[t, x].conc_mol))\n\n # Interface pressure\n blk.pressure_equil.unfix()\n blk.pressure_at_interface.activate()\n blk.enhancement_factor.fix(1)\n\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n init_log.info_high(\"Step 3a: {}.\".format(idaeslog.condition(res)))\n # ----------------------------------------------------------------------\n # Enhancement factor model\n blk.enhancement_factor.unfix()\n for t in blk.t:\n for x in blk.liquid_phase.length_domain:\n blk.enhancement_factor[t, x].value = 100\n blk.yi_MEA.unfix()\n blk.yeq_CO2.unfix()\n blk.E1_eqn.activate()\n blk.E2_eqn.activate()\n blk.yeq_CO2_eqn.activate()\n\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n init_log.info_high(\"Step 3 complete: {}.\".format(idaeslog.condition(res)))\n # ---------------------------------------------------------------------\n\n init_log.info('STEP 4: Isothermal chemical absoption')\n init_log.info_high(\"Homotopy steps: \")\n init_log.info_high(\"No mass transfer (0.0) --> (1.0) mass transfer\")\n\n # ISOTHERMAL CHEMICAL ABSORPTION\n blk.N_v.unfix()\n blk.vapor_phase.mass_transfer_term.unfix()\n blk.liquid_phase.mass_transfer_term.unfix()\n blk.mass_transfer.activate()\n blk.vapor_phase_mass_transfer_handle.activate()\n blk.liquid_phase_mass_transfer_handle.activate()\n\n for i in homotopy_steps_m:\n init_log.info('homotopy step -->{0:5.2f}'.format(i))\n blk._homotopy_par_m = i\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n if res.solver.status != SolverStatus.warning:\n print('')\n\n init_log.info_high(\"Step 4 complete: {}.\".format(idaeslog.condition(res)))\n # ---------------------------------------------------------------------\n if not blk.config.fix_column_pressure:\n for c in [\"mechanical_equil\"]:\n getattr(blk, c).activate()\n for c in [\"pressure_balance\", \"pressure_dx_disc_eq\"]:\n getattr(blk.vapor_phase, c).activate()\n\n blk.vapor_phase.pressure_dx[:, :].unfix()\n\n # Unfix pressure\n for t in blk.t:\n for x in blk.vapor_phase.length_domain:\n # Unfix all vapor pressure variables except at the inlet\n if (blk.vapor_phase.properties[t, x].config.defined_state\n is False):\n blk.vapor_phase.properties[t, x].pressure.unfix()\n for x in blk.liquid_phase.length_domain:\n blk.liquid_phase.properties[t, x].pressure.unfix()\n\n # ---------------------------------------------------------------------\n init_log.info('STEP 5: Adiabatic chemical absoption')\n init_log.info_high(\"Homotopy steps:\")\n init_log.info_high(\"Isothermal (0.0) --> (1.0) Adiabatic \")\n\n # Unfix temperature\n for t in blk.t:\n for x in blk.vapor_phase.length_domain:\n # Unfix all vapor temperature variables except at the inlet\n if (blk.vapor_phase.properties[t, x].config.defined_state\n is False):\n blk.vapor_phase.properties[t, x].temperature.unfix()\n for x in blk.liquid_phase.length_domain:\n # Unfix all liquid temperature variables except at the inlet\n if (blk.liquid_phase.properties[t, x].config.defined_state\n is False):\n blk.liquid_phase.properties[t, x].temperature.unfix()\n\n # unfix heat transfer terms\n blk.heat_vap.unfix()\n blk.heat_liq.unfix()\n blk.vapor_phase.heat.unfix()\n blk.liquid_phase.heat.unfix()\n\n # unfix enthalpy flow variable terms\n blk.vapor_phase._enthalpy_flow[:, :, :].unfix()\n blk.vapor_phase.enthalpy_flow_dx[:, :, :].unfix()\n blk.liquid_phase._enthalpy_flow[:, :, :].unfix()\n blk.liquid_phase.enthalpy_flow_dx[:, :, :].unfix()\n\n # activate steady-state energy balance related equations\n # unit model\n for c in [\n \"vapor_phase_heat_transfer\",\n \"liquid_phase_heat_transfer\",\n \"vapor_phase_heat_transfer_handle\",\n \"liquid_phase_heat_transfer_handle\"]:\n getattr(blk, c).activate()\n\n # liquid control volume\n for c in [\n \"enthalpy_flow_linking_constraint\",\n \"enthalpy_flow_dx_disc_eq\",\n \"enthalpy_balances\"]:\n getattr(blk.liquid_phase, c).activate()\n\n # vapor control volume\n for c in [\n \"enthalpy_flow_linking_constraint\",\n \"enthalpy_flow_dx_disc_eq\",\n \"enthalpy_balances\"]:\n getattr(blk.vapor_phase, c).activate()\n\n for i in homotopy_steps_h:\n init_log.info('homotopy step -->{0:5.2f}'.format(i))\n blk._homotopy_par_h = i\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n\n init_log.info_high(\"Step 5 complete: {}.\".format(idaeslog.condition(res)))\n\n # ---------------------------------------------------------------------\n # scale up at this if stripper\n if (blk.config.process_type == ProcessType.stripper):\n packing_height = np.linspace(0.6, _specified_length, num=10)\n init_log.info_high('SCALEUP Stripper height')\n for L in packing_height:\n blk.length_column.fix(L)\n print('Packing height = {:6.2f}'.format(L))\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n init_log.info_high(\"Scaleup: {}.\".format(idaeslog.condition(res)))\n\n if not blk.config.dynamic:\n init_log.info('STEADY-STATE INITIALIZATION COMPLETE')\n\n if blk.config.dynamic:\n init_log.info('STEP 6: unfix Accumulation and Holdup terms')\n init_log.info_high(\"6a Holdup calculations\")\n init_log.info_high(\"6b Include Accumulation terms\")\n\n # activate holdup constraints\n # unit model\n for c in [\n \"vapor_side_area\",\n \"liquid_side_area\"]:\n getattr(blk, c).activate()\n # liquid control volume\n for c in [\n \"material_holdup_calculation\",\n \"energy_holdup_calculation\"]:\n getattr(blk.liquid_phase, c).activate()\n # vapor control volume\n for c in [\n \"material_holdup_calculation\",\n \"energy_holdup_calculation\"]:\n getattr(blk.vapor_phase, c).activate()\n\n # unfix holdup terms\n blk.vapor_phase.energy_holdup[:, :, :].unfix()\n blk.vapor_phase.material_holdup[:, :, :, :].unfix()\n blk.liquid_phase.energy_holdup[:, :, :].unfix()\n blk.liquid_phase.material_holdup[:, :, :, :].unfix()\n\n # unfix CV1D area lumped with phase volumetric holdup fraction\n blk.vapor_phase.area.unfix()\n blk.liquid_phase.area.unfix()\n\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n init_log.info_high(\"Step 6a complete: {}.\".format(\n idaeslog.condition(res)))\n\n # Step 6b:\n # unfix accumulation terms(derivative variables)\n blk.vapor_phase.energy_accumulation[:, :, :].unfix()\n blk.vapor_phase.material_accumulation[:, :, :, :].unfix()\n blk.liquid_phase.energy_accumulation[:, :, :].unfix()\n blk.liquid_phase.material_accumulation[:, :, :, :].unfix()\n\n # activate constraints for accumulation terms\n # liquid control volume\n for c in [\n \"material_accumulation_disc_eq\",\n \"energy_accumulation_disc_eq\"]:\n getattr(blk.liquid_phase, c).activate()\n\n # vapor control volume\n for c in [\n \"material_accumulation_disc_eq\",\n \"energy_accumulation_disc_eq\"]:\n getattr(blk.vapor_phase, c).activate()\n\n blk.fix_initial_condition()\n\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = opt.solve(blk, tee=slc.tee)\n init_log.info_high(\"Step 6 complete: {}.\".format(\n idaeslog.condition(res)))\n init_log.info('INITIALIZATION COMPLETE')\n\n def fix_initial_condition(blk):\n \"\"\"\n Initial condition for material and enthalpy balance.\n\n Mass balance : Initial condition is determined by\n fixing n-1 mole fraction and the total molar flowrate\n\n Energy balance :Initial condition is determined by\n fixing the temperature.\n\n \"\"\"\n\n vap_comp = blk.config.vapor_side.property_package.component_list\n liq_comp = blk.config.liquid_side.property_package.component_list\n\n for x in blk.vapor_phase.length_domain:\n if x != 0:\n blk.vapor_phase.properties[0, x].temperature.fix()\n blk.vapor_phase.properties[0, x].flow_mol.fix()\n for j in vap_comp:\n if (x != 0 and j != 'CO2'):\n blk.vapor_phase.properties[0, x].mole_frac_comp[j].fix()\n for x in blk.liquid_phase.length_domain:\n if x != 1:\n blk.liquid_phase.properties[0, x].temperature.fix()\n blk.liquid_phase.properties[0, x].flow_mol.fix()\n for j in liq_comp:\n if (x != 1 and j != 'CO2'):\n blk.liquid_phase.properties[0, x].mole_frac_comp[j].fix()\n\n def unfix_initial_condition(blk):\n \"\"\"\n Function to unfix initial condition for material and enthalpy balance.\n\n \"\"\"\n\n vap_comp = blk.config.vapor_side.property_package.component_list\n liq_comp = blk.config.liquid_side.property_package.component_list\n\n for x in blk.vapor_phase.length_domain:\n if x != 0:\n blk.vapor_phase.properties[0, x].temperature.unfix()\n blk.vapor_phase.properties[0, x].flow_mol.unfix()\n for j in vap_comp:\n if (x != 0 and j != 'CO2'):\n blk.vapor_phase.properties[0, x].mole_frac_comp[j].unfix()\n for x in blk.liquid_phase.length_domain:\n if x != 1:\n blk.liquid_phase.properties[0, x].temperature.unfix()\n blk.liquid_phase.properties[0, x].flow_mol.unfix()\n for j in liq_comp:\n if (x != 1 and j != 'CO2'):\n blk.liquid_phase.properties[0, x].mole_frac_comp[j].unfix()\n\n def make_steady_state_column_profile(blk):\n \"\"\"\n Steady-state Plot function for Temperature and CO2 Pressure profile.\n\n \"\"\"\n\n normalised_column_height = [x for x in blk.vapor_phase.length_domain]\n simulation_time = [t for t in blk.t]\n\n # final time\n tf = simulation_time[-1]\n CO2_profile = []\n liquid_temperature_profile = []\n\n # APPEND RESULTS\n for x in blk.vapor_phase.length_domain:\n CO2_profile.append(\n value(1e-3 * blk.vapor_phase.properties[tf, x].pressure *\n blk.vapor_phase.properties[tf, x].mole_frac_comp['CO2']))\n liquid_temperature_profile.append(\n value(blk.liquid_phase.properties[tf, x].temperature))\n\n # plot properties\n fontsize = 18\n labelsize = 18\n fig = plt.figure(figsize=(9, 7))\n ax1 = fig.add_subplot(111)\n ax1.set_title('Steady-state column profile',\n fontsize=16, fontweight='bold')\n\n # plot primary axis\n lab1 = ax1.plot(normalised_column_height, CO2_profile,\n linestyle='--', mec=\"b\", mfc=\"None\",\n color='b', label='CO$_{2}$ partial pressure [kPa]',\n marker='o')\n\n ax1.tick_params(axis='y', labelcolor='b',\n direction='in', labelsize=labelsize)\n ax1.tick_params(axis='x', direction='in', labelsize=labelsize)\n\n ax1.set_xlabel('Normalise column height from bottom',\n fontsize=fontsize)\n ax1.set_ylabel('P$_{CO_{2}}$ [ kPa]', color='b', fontweight='bold',\n fontsize=fontsize)\n # plot secondary axis\n ax2 = ax1.twinx()\n lab2 = ax2.plot(normalised_column_height,\n liquid_temperature_profile,\n color='g',\n linestyle='-',\n label='Liquid temperature profile',\n marker='s')\n ax2.set_ylabel('T$_{liq}$ [ K ] ', color='g', fontweight='bold',\n fontsize=fontsize)\n ax2.tick_params(axis='y', labelcolor='g',\n direction='in', labelsize=labelsize)\n\n # get the labels\n lab_1 = lab1 + lab2\n labels_1 = [l.get_label() for l in lab_1]\n ax1.legend(lab_1, labels_1, loc='lower center', fontsize=fontsize)\n fig.tight_layout()\n\n # show graph\n plt.show()\n\n def make_dynamic_column_profile(blk):\n \"\"\"\n Dynamic Plot function for Temperature and CO2 Pressure profile.\n\n \"\"\"\n\n normalised_column_height = [x for x in blk.vapor_phase.length_domain]\n simulation_time = [t for t in blk.t]\n fluegas_flow = [value(blk.vapor_inlet.flow_mol[t]) for t in blk.t]\n\n # final time\n tf = simulation_time[-1]\n nf = len(simulation_time)\n\n # mid-time\n if nf % 2 == 0:\n tm = int(nf / 2)\n else:\n tm = int(nf / 2 + 1)\n\n CO2_profile_mid = []\n CO2_profile_fin = []\n liquid_temperature_profile_mid = []\n liquid_temperature_profile_fin = []\n\n # APPEND RESULTS\n for x in blk.vapor_phase.length_domain:\n CO2_profile_mid.append(\n value(1e-3 * blk.vapor_phase.properties[tm, x].pressure *\n blk.vapor_phase.properties[tm, x].mole_frac_comp['CO2']))\n CO2_profile_fin.append(\n value(1e-3 * blk.vapor_phase.properties[tf, x].pressure *\n blk.vapor_phase.properties[tf, x].mole_frac_comp['CO2']))\n\n liquid_temperature_profile_mid.append(\n value(blk.liquid_phase.properties[tm, x].temperature))\n liquid_temperature_profile_fin.append(\n value(blk.liquid_phase.properties[tf, x].temperature))\n\n # plot properties\n fontsize = 18\n labelsize = 18\n fig = plt.figure(figsize=(12, 7))\n ax1 = fig.add_subplot(211)\n ax1.set_title('Column profile @ {0:6.2f} & {1:6.2f} sec'.format(tm, tf),\n fontsize=16, fontweight='bold')\n\n # plot primary axis\n lab1 = ax1.plot(normalised_column_height, CO2_profile_mid,\n linestyle='--', color='b',\n label='CO$_{2}$ partial pressure [kPa] @ %d' % tm)\n lab2 = ax1.plot(normalised_column_height, CO2_profile_fin,\n linestyle='-', color='b',\n label='CO$_{2}$ partial pressure [kPa] @ %d' % tf)\n\n ax1.tick_params(axis='y', labelcolor='b',\n direction='in', labelsize=labelsize)\n ax1.tick_params(axis='x', direction='in', labelsize=labelsize)\n\n ax1.set_xlabel('Normalise column height from bottom',\n fontsize=fontsize)\n ax1.set_ylabel('P$_{CO_{2}}$ [ kPa]', color='b', fontweight='bold',\n fontsize=fontsize)\n\n # plot secondary axis\n ax2 = ax1.twinx()\n lab3 = ax2.plot(normalised_column_height,\n liquid_temperature_profile_mid,\n color='g', linestyle='--',\n label='Liquid temperature profile @ {0:6.1f}'.format(tm))\n lab4 = ax2.plot(normalised_column_height,\n liquid_temperature_profile_fin,\n color='g', linestyle='-',\n label='Liquid temperature profile @ {0:6.1f}'.format(tf))\n ax2.set_ylabel('T$_{liq}$ [ K ] ', color='g', fontweight='bold',\n fontsize=fontsize)\n ax2.tick_params(axis='y', labelcolor='g',\n direction='in', labelsize=labelsize)\n # get the labels\n lab_1 = lab1 + lab2 + lab3 + lab4\n labels_1 = [l.get_label() for l in lab_1]\n ax1.legend(lab_1, labels_1, fontsize=fontsize)\n\n # plot flowgas flow\n ax3 = fig.add_subplot(212)\n ax3.plot(simulation_time, fluegas_flow,\n linestyle='--', mec=\"g\", mfc=\"None\",\n color='g', label='Fluegas flow [mol/s]',\n marker='o')\n ax3.tick_params(labelsize=labelsize)\n ax3.set_xlabel('Simulation time (sec)', fontsize=fontsize)\n ax3.set_ylabel(' Fv [ mol/s]', color='b', fontweight='bold',\n fontsize=fontsize)\n ax3.legend(['Fluegas flow [mol/s]'], fontsize=fontsize)\n fig.tight_layout()\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"numpy.linspace",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaekTec/calvin_env
|
[
"37c8abe860b6cfbbd94dd9ad64b4245b1d402d60"
] |
[
"calvin_env/robot/robot.py"
] |
[
"import logging\nimport math\nimport os\n\nimport cv2\nimport numpy as np\nimport pybullet as p\nimport quaternion\n\nfrom calvin_env.robot.mixed_ik import MixedIK\nfrom calvin_env.utils.utils import timeit\n\n# A logger for this file\nlog = logging.getLogger(__name__)\n\n\nclass Robot:\n def __init__(\n self,\n filename,\n base_position,\n base_orientation,\n initial_joint_positions,\n max_joint_force,\n gripper_force,\n arm_joint_ids,\n gripper_joint_ids,\n gripper_joint_limits,\n tcp_link_id,\n end_effector_link_id,\n cid,\n use_nullspace,\n max_velocity,\n use_ik_fast,\n euler_obs,\n lower_joint_limits=(-2.8973, -1.7628, -2.8973, -3.0718, -2.8973, -0.0175, -2.8973),\n upper_joint_limits=(2.8973, 1.7628, 2.8973, -0.0698, 2.8973, 3.7525, 2.8973),\n max_rel_pos=0.02,\n max_rel_orn=0.05,\n magic_scaling_factor_pos=1,\n magic_scaling_factor_orn=1,\n use_target_pose=True,\n **kwargs,\n ):\n log.info(\"Loading robot\")\n self.cid = cid\n self.filename = filename\n self.use_nullspace = use_nullspace\n self.max_velocity = max_velocity\n self.use_ik_fast = use_ik_fast\n self.base_position = base_position\n self.base_orientation = p.getQuaternionFromEuler(base_orientation)\n self.arm_joint_ids = arm_joint_ids\n self.initial_joint_positions = np.array(initial_joint_positions)\n self.gripper_joint_ids = gripper_joint_ids\n self.max_joint_force = max_joint_force\n self.gripper_force = gripper_force\n self.gripper_joint_limits = gripper_joint_limits\n self.tcp_link_id = tcp_link_id\n # Setup constraint\n self.prev_ee_orn = p.getQuaternionFromEuler([0, 0, 0])\n self.robot_uid = None\n self.end_effector_link_id = end_effector_link_id\n self.gripper_action = 1\n self.ll = self.ul = self.jr = self.rp = None\n self.ll_real = np.array(lower_joint_limits)\n self.ul_real = np.array(upper_joint_limits)\n self.mixed_ik = None\n self.euler_obs = euler_obs\n self.max_rel_pos = max_rel_pos\n self.max_rel_orn = max_rel_orn\n self.magic_scaling_factor_pos = magic_scaling_factor_pos\n self.magic_scaling_factor_orn = magic_scaling_factor_orn\n self.target_pos = None\n self.target_orn = None\n self.use_target_pose = use_target_pose\n # self.reconfigure = False\n\n def load(self):\n self.robot_uid = p.loadURDF(\n fileName=self.filename,\n basePosition=self.base_position,\n baseOrientation=self.base_orientation,\n useFixedBase=True,\n physicsClientId=self.cid,\n )\n self.add_base_cylinder()\n # create a constraint to keep the fingers centered\n c = p.createConstraint(\n self.robot_uid,\n self.gripper_joint_ids[0],\n self.robot_uid,\n self.gripper_joint_ids[1],\n jointType=p.JOINT_GEAR,\n jointAxis=[1, 0, 0],\n parentFramePosition=[0, 0, 0],\n childFramePosition=[0, 0, 0],\n physicsClientId=self.cid,\n )\n p.changeConstraint(c, gearRatio=-1, erp=0.1, maxForce=50, physicsClientId=self.cid)\n num_dof = p.computeDofCount(self.robot_uid)\n # lower limits for null space (todo: set them to proper range)\n self.ll = [-7] * num_dof\n # upper limits for null space (todo: set them to proper range)\n self.ul = [7] * num_dof\n # joint ranges for null space (todo: set them to proper range)\n self.jr = [7] * num_dof\n # restposes for null space\n self.rp = list(self.initial_joint_positions) + [self.gripper_joint_limits[1]] * 2\n self.reset()\n self.mixed_ik = MixedIK(\n self.robot_uid,\n self.cid,\n self.ll_real,\n self.ul_real,\n self.base_position,\n self.base_orientation,\n self.tcp_link_id,\n self.ll,\n self.ul,\n self.jr,\n self.rp,\n self.use_ik_fast,\n threshold_pos=0.03,\n threshold_orn=0.1,\n weights=(10, 8, 6, 6, 2, 2, 1),\n num_angles=30,\n )\n\n def add_base_cylinder(self):\n \"\"\"\n TODO: this should happen in load(), but that would break compatibility with old recorded data\n \"\"\"\n pos = self.base_position.copy()\n pos[2] /= 2\n angle = p.getEulerFromQuaternion(self.base_orientation)[2]\n pos[0] -= np.cos(angle) * 0.05\n pos[1] -= np.sin(angle) * 0.05\n cylinder = p.createVisualShape(\n shapeType=p.GEOM_CYLINDER,\n rgbaColor=[1, 1, 1, 1],\n radius=0.13,\n length=self.base_position[2],\n visualFramePosition=pos,\n )\n p.createMultiBody(baseVisualShapeIndex=cylinder)\n\n def reset(self, robot_state=None):\n if robot_state is None:\n gripper_state = self.gripper_joint_limits[1]\n joint_states = self.initial_joint_positions\n else:\n joint_indices = [i for i, x in enumerate(self.get_observation_labels()) if x.startswith(\"robot_joint\")]\n joint_states = robot_state[joint_indices]\n gripper_state = robot_state[self.get_observation_labels().index(\"gripper_opening_width\")] / 2\n\n assert len(joint_states) == len(self.arm_joint_ids)\n for i, _id in enumerate(self.arm_joint_ids):\n p.resetJointState(self.robot_uid, _id, joint_states[i], physicsClientId=self.cid)\n p.setJointMotorControl2(\n bodyIndex=self.robot_uid,\n jointIndex=_id,\n controlMode=p.POSITION_CONTROL,\n force=self.max_joint_force,\n targetPosition=joint_states[i],\n maxVelocity=self.max_velocity,\n physicsClientId=self.cid,\n )\n for i in self.gripper_joint_ids:\n p.resetJointState(self.robot_uid, i, gripper_state, physicsClientId=self.cid)\n p.setJointMotorControl2(\n bodyIndex=self.robot_uid,\n jointIndex=i,\n controlMode=p.POSITION_CONTROL,\n force=self.gripper_force,\n targetPosition=gripper_state,\n maxVelocity=1,\n physicsClientId=self.cid,\n )\n tcp_pos, tcp_orn = p.getLinkState(self.robot_uid, self.tcp_link_id, physicsClientId=self.cid)[:2]\n if self.euler_obs:\n tcp_orn = p.getEulerFromQuaternion(tcp_orn)\n self.target_pos = np.array(tcp_pos)\n self.target_orn = np.array(tcp_orn)\n\n def get_observation(self):\n \"\"\"\n returns:\n - robot_state: ndarray (16,)\n - tcp_pos: robot_state[:3]\n - tcp_orn: robot_state[3:7] (quat) / [3:6] (euler)\n - gripper_opening_width: robot_state[7:8] (quat) / [6:7] (euler)\n - arm_joint_states: robot_state[8:15] (quat) / [7:14] (euler)\n - gripper_action: robot_state[15:] (quat) / [14:] (euler)\n - robot_info: Dict\n \"\"\"\n tcp_pos, tcp_orn = p.getLinkState(self.robot_uid, self.tcp_link_id, physicsClientId=self.cid)[:2]\n if self.euler_obs:\n tcp_orn = p.getEulerFromQuaternion(tcp_orn)\n gripper_opening_width = (\n p.getJointState(self.robot_uid, self.gripper_joint_ids[0], physicsClientId=self.cid)[0]\n + p.getJointState(self.robot_uid, self.gripper_joint_ids[1], physicsClientId=self.cid)[0]\n )\n arm_joint_states = []\n for i in self.arm_joint_ids:\n arm_joint_states.append(p.getJointState(self.robot_uid, i, physicsClientId=self.cid)[0])\n robot_state = np.array([*tcp_pos, *tcp_orn, gripper_opening_width, *arm_joint_states, self.gripper_action])\n robot_info = {\n \"tcp_pos\": tcp_pos,\n \"tcp_orn\": tcp_orn,\n \"gripper_opening_width\": gripper_opening_width,\n \"arm_joint_states\": arm_joint_states,\n \"gripper_action\": self.gripper_action,\n \"uid\": self.robot_uid,\n \"contacts\": p.getContactPoints(bodyA=self.robot_uid, physicsClientId=self.cid),\n }\n return robot_state, robot_info\n\n def get_observation_labels(self):\n tcp_pos_labels = [f\"tcp_pos_{ax}\" for ax in (\"x\", \"y\", \"z\")]\n if self.euler_obs:\n tcp_orn_labels = [f\"tcp_orn_{ax}\" for ax in (\"x\", \"y\", \"z\")]\n else:\n tcp_orn_labels = [f\"tcp_orn_{ax}\" for ax in (\"x\", \"y\", \"z\", \"w\")]\n return [\n *tcp_pos_labels,\n *tcp_orn_labels,\n \"gripper_opening_width\",\n *[f\"robot_joint_{i}\" for i in self.arm_joint_ids],\n \"gripper_action\",\n ]\n\n def relative_to_absolute(self, action):\n assert len(action) == 7\n rel_pos, rel_orn, gripper = np.split(action, [3, 6])\n rel_pos *= self.max_rel_pos * self.magic_scaling_factor_pos\n rel_orn *= self.max_rel_orn * self.magic_scaling_factor_orn\n if self.use_target_pose:\n self.target_pos += rel_pos\n self.target_orn += rel_orn\n return self.target_pos, self.target_orn, gripper\n else:\n tcp_pos, tcp_orn = p.getLinkState(self.robot_uid, self.tcp_link_id, physicsClientId=self.cid)[:2]\n tcp_orn = p.getEulerFromQuaternion(tcp_orn)\n abs_pos = np.array(tcp_pos) + rel_pos\n abs_orn = np.array(tcp_orn) + rel_orn\n return abs_pos, abs_orn, gripper\n\n def apply_action(self, action):\n # cv2.imshow(\"win\", np.zeros((300,300)))\n # k = cv2.waitKey(1) % 255\n # if k == ord('w'):\n # self.base_position[1] += 0.01\n # elif k == ord('s'):\n # self.base_position[1] -= 0.01\n # elif k == ord('d'):\n # self.base_position[0] += 0.01\n # elif k == ord('a'):\n # self.base_position[0] -= 0.01\n # elif k == ord('e'):\n # self.base_position[2] += 0.01\n # elif k == ord('q'):\n # self.base_position[2] -= 0.01\n # elif k == ord('r'):\n # self.initial_joint_positions[0] -= 0.1\n # elif k == ord('f'):\n # self.initial_joint_positions[0] += 0.1\n # elif k == ord('t'):\n # self.initial_joint_positions[1] -= 0.1\n # elif k == ord('g'):\n # self.initial_joint_positions[1] += 0.1\n # elif k == ord('y'):\n # self.initial_joint_positions[2] -= 0.1\n # elif k == ord('h'):\n # self.initial_joint_positions[2] += 0.1\n # elif k == ord('u'):\n # self.initial_joint_positions[3] -= 0.1\n # elif k == ord('j'):\n # self.initial_joint_positions[3] += 0.1\n # elif k == ord('i'):\n # self.initial_joint_positions[4] -= 0.1\n # elif k == ord('k'):\n # self.initial_joint_positions[4] += 0.1\n # elif k == ord('o'):\n # self.initial_joint_positions[5] -= 0.1\n # elif k == ord('l'):\n # self.initial_joint_positions[5] += 0.1\n # elif k == ord('p'):\n # self.initial_joint_positions[6] -= 0.1\n # elif k == ord(';'):\n # self.initial_joint_positions[6] += 0.1\n # elif k == ord('z'):\n # self.reconfigure = not self.reconfigure\n # print(f\"{self.initial_joint_positions=}\")\n # print(f\"{self.base_position=}\")\n # if k != 254:\n # self.initial_joint_positions = np.clip(self.initial_joint_positions, self.ll_real, self.ul_real)\n # p.resetBasePositionAndOrientation(self.robot_uid, self.base_position, self.base_orientation, physicsClientId=self.cid)\n # self.rp = list(self.initial_joint_positions) + [self.gripper_joint_limits[1]] * 2\n # self.mixed_ik.rp = self.rp\n # for i, _id in enumerate(self.arm_joint_ids):\n # p.resetJointState(self.robot_uid, _id, self.initial_joint_positions[i], physicsClientId=self.cid)\n # p.setJointMotorControl2(\n # bodyIndex=self.robot_uid,\n # jointIndex=_id,\n # controlMode=p.POSITION_CONTROL,\n # force=self.max_joint_force,\n # targetPosition=self.initial_joint_positions[i],\n # maxVelocity=self.max_velocity,\n # physicsClientId=self.cid,\n # )\n # if self.reconfigure:\n # return\n #\n\n if not len(action) == 3:\n action = self.relative_to_absolute(action)\n target_ee_pos, target_ee_orn, self.gripper_action = action\n\n assert len(target_ee_pos) == 3\n assert len(target_ee_orn) in (3, 4)\n # automatically transform euler actions to quaternion\n if len(target_ee_orn) == 3:\n target_ee_orn = p.getQuaternionFromEuler(target_ee_orn)\n\n if not isinstance(self.gripper_action, int) and len(self.gripper_action) == 1:\n self.gripper_action = self.gripper_action[0]\n assert self.gripper_action in (-1, 1)\n\n # #\n # cam_rot = p.getMatrixFromQuaternion(target_ee_orn)\n # cam_rot = np.array(cam_rot).reshape(3, 3)\n # cam_rot_x, cam_rot_y, cam_rot_z = cam_rot[:, 0], cam_rot[:, 1], cam_rot[:, 2]\n # p.addUserDebugLine(target_ee_pos, target_ee_pos + cam_rot_x, lineWidth=3, lineColorRGB=[1,0,0])\n # p.addUserDebugLine(target_ee_pos, target_ee_pos +cam_rot_y, lineWidth=3, lineColorRGB=[0,1,0])\n # p.addUserDebugLine(target_ee_pos, target_ee_pos +cam_rot_z, lineWidth=3, lineColorRGB=[0,0,1])\n #\n # tcp_pos, tcp_orn = p.getLinkState(self.robotId, self.tcp_link_id)[:2]\n # tcp_euler = p.getEulerFromQuaternion(tcp_orn)\n # p.addUserDebugLine([0,0,0], target_ee_pos, lineWidth=8, lineColorRGB=[0,1,0])\n # p.addUserDebugLine([0,0,0], p.getLinkState(self.robot_uid, 6)[4], lineWidth=3, lineColorRGB=[1,0,0])\n # p.addUserDebugLine([0,0,0], p.getLinkState(self.robot_uid, 13)[4], lineWidth=3, lineColorRGB=[0,1,0])\n # target_ee_pos, target_ee_orn = self.tcp_to_ee(target_ee_pos, target_ee_orn)\n # p.addUserDebugLine([0,0,0], target_ee_pos, lineWidth=8, lineColorRGB=[1,0,0])\n jnt_ps = self.mixed_ik.get_ik(target_ee_pos, target_ee_orn)\n for i in range(self.end_effector_link_id):\n # p.resetJointState(self.robot_uid, i, jnt_ps[i])\n p.setJointMotorControl2(\n bodyIndex=self.robot_uid,\n jointIndex=i,\n controlMode=p.POSITION_CONTROL,\n force=self.max_joint_force,\n targetPosition=jnt_ps[i],\n maxVelocity=self.max_velocity,\n physicsClientId=self.cid,\n )\n\n self.control_gripper(self.gripper_action)\n\n def control_gripper(self, gripper_action):\n if gripper_action == 1:\n gripper_finger_position = self.gripper_joint_limits[1]\n gripper_force = self.gripper_force / 100\n else:\n gripper_finger_position = self.gripper_joint_limits[0]\n gripper_force = self.gripper_force\n for id in self.gripper_joint_ids:\n p.setJointMotorControl2(\n bodyIndex=self.robot_uid,\n jointIndex=id,\n controlMode=p.POSITION_CONTROL,\n targetPosition=gripper_finger_position,\n force=gripper_force,\n maxVelocity=1,\n physicsClientId=self.cid,\n )\n\n def serialize(self):\n return {\n \"uid\": self.robot_uid,\n \"info\": p.getBodyInfo(self.robot_uid, physicsClientId=self.cid),\n \"pose\": p.getBasePositionAndOrientation(self.robot_uid, physicsClientId=self.cid),\n \"joints\": p.getJointStates(\n self.robot_uid,\n list(range(p.getNumJoints(self.robot_uid, physicsClientId=self.cid))),\n physicsClientId=self.cid,\n ),\n \"gripper_action\": self.gripper_action,\n }\n\n def reset_from_storage(self, data):\n p.resetBasePositionAndOrientation(\n bodyUniqueId=self.robot_uid, posObj=data[\"pose\"][0], ornObj=data[\"pose\"][1], physicsClientId=self.cid\n )\n num_joints = len(data[\"joints\"])\n assert num_joints == p.getNumJoints(self.robot_uid, physicsClientId=self.cid)\n for i, (value, velocity, *_) in enumerate(data[\"joints\"]):\n p.resetJointState(\n bodyUniqueId=self.robot_uid,\n jointIndex=i,\n targetValue=value,\n targetVelocity=velocity,\n physicsClientId=self.cid,\n )\n p.setJointMotorControl2(\n bodyIndex=self.robot_uid,\n jointIndex=i,\n controlMode=p.POSITION_CONTROL,\n force=self.max_joint_force,\n targetPosition=value,\n maxVelocity=self.max_velocity,\n physicsClientId=self.cid,\n )\n self.control_gripper(data[\"gripper_action\"])\n\n def __str__(self):\n return f\"{self.filename} : {self.__dict__}\"\n"
] |
[
[
"numpy.split",
"numpy.array",
"numpy.cos",
"numpy.sin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eristoddle/catalyst
|
[
"b16366e828991e39f0604dacc2527e522bb3f9c8"
] |
[
"catalyst/examples/Sure_Fire_Hedge_Catalyst.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport talib as ta\nimport matplotlib.pyplot as plt\nfrom catalyst import run_algorithm\nfrom catalyst.exchange.utils.stats_utils import extract_transactions\nfrom catalyst.api import (\n record,\n order,\n order_target,\n symbol,\n get_order\n)\nfrom logbook import Logger\n\nlog = Logger(\"Hedge\")\n\n\ndef initialize(context):\n log.info('Initializing Sure Fire Hedge Algorithm')\n context.bitfinex = context.exchanges['poloniex']\n context.asset = symbol('btc_usdt', context.bitfinex.name)\n\n context.ORDER_SIZE = 0.1\n context.BARS = 365\n context.frequency = 'minute'\n\n context.swallow_errors = True\n context.errors = []\n\n context.upper = 150\n context.lower = 300\n context.multiplyBy = 3\n context.distance = 150\n context.level = 1\n\n context.SMA_FAST = 50\n context.SMA_SLOW = 100\n\n context.in_long = False\n context.in_short = False\n context.cost_basis = 1\n\n pass\n\n\ndef _handle_data(context, data):\n s1 = context.asset\n prices = data.history(s1, bar_count=context.BARS, fields=['price', 'open', 'high', 'low', 'close'],\n frequency=context.frequency)\n analysis = pd.DataFrame(index=prices.index)\n\n # SMA FAST\n analysis['sma_f'] = ta.SMA(prices.close.as_matrix(), context.SMA_FAST)\n # SMA SLOW\n analysis['sma_s'] = ta.SMA(prices.close.as_matrix(), context.SMA_SLOW)\n # SMA FAST over SLOW Crossover\n analysis['sma_test'] = np.where(analysis.sma_f > analysis.sma_s, 1, 0)\n\n # Save the prices and analysis to send to analyze\n context.prices = prices\n context.analysis = analysis\n context.price = data.current(context.asset, 'price')\n\n record(price=data.current(context.asset, 'price'),\n cash=context.portfolio.cash,\n short_mavg=analysis['sma_f'],\n long_mavg=analysis['sma_s'])\n\n makeOrders(context, analysis)\n\n\ndef handle_data(context, data):\n # log.info('----------------------------------------------------------')\n try:\n _handle_data(context, data)\n except Exception as e:\n log.warn('aborting the bar on error {}'.format(e))\n context.errors.append(e)\n\n # log.info('completed bar {}, total execution errors {}'.format(data.current_dt, len(context.errors)))\n\n if len(context.errors) > 0:\n log.info('the errors:\\n{}'.format(context.errors))\n\n\ndef analyze(context, perf):\n\n # Get the base_currency that was passed as a parameter to the simulation\n exchange = list(context.exchanges.values())[0]\n base_currency = exchange.base_currency.upper()\n\n # First chart: Plot portfolio value using base_currency\n ax1 = plt.subplot(411)\n perf.loc[:, ['portfolio_value']].plot(ax=ax1)\n ax1.legend_.remove()\n ax1.set_ylabel('Portfolio Value\\n({})'.format(base_currency))\n start, end = ax1.get_ylim()\n ax1.yaxis.set_ticks(np.arange(start, end, (end - start) / 5))\n\n\n # Second chart: Plot asset price, moving averages and buys/sells\n ax2 = plt.subplot(412, sharex=ax1)\n perf.loc[:, ['price']].plot(\n ax=ax2,\n label='Price')\n ax2.legend_.remove()\n ax2.set_ylabel('{asset}\\n({base})'.format(\n asset=context.asset.symbol,\n base=base_currency\n ))\n start, end = ax2.get_ylim()\n ax2.yaxis.set_ticks(np.arange(start, end, (end - start) / 5))\n\n transaction_df = extract_transactions(perf)\n if not transaction_df.empty:\n buy_df = transaction_df[transaction_df['amount'] > 0]\n sell_df = transaction_df[transaction_df['amount'] < 0]\n ax2.scatter(\n buy_df.index.to_pydatetime(),\n perf.loc[buy_df.index, 'price'],\n marker='^',\n s=100,\n c='green',\n label=''\n )\n ax2.scatter(\n sell_df.index.to_pydatetime(),\n perf.loc[sell_df.index, 'price'],\n marker='v',\n s=100,\n c='red',\n label=''\n )\n\n plt.show()\n\n\ndef makeOrders(context, analysis):\n if context.in_long:\n weAreLong(context, analysis)\n\n elif context.in_short:\n weAreShort(context, analysis)\n\n else:\n if getLast(analysis, 'sma_test') == 1:\n order(context.asset, amount=context.ORDER_SIZE)\n context.in_long = True\n context.in_short = False\n context.level = 1\n context.cost_basis = context.price\n '''\n context.position = position\n '''\n log.info('Bought {amount} @ {price}'.format(amount=context.ORDER_SIZE, price=context.price))\n\n\ndef weAreLong(context, analysis):\n s1 = context.asset\n TP = context.cost_basis + context.upper\n SL = context.cost_basis - context.lower\n Crit = context.cost_basis - context.distance\n position = context.portfolio.positions[context.asset]\n log.info('We Are Long. Holdings: {amount} @ {cost_basis}'.format(amount=position.amount,\n cost_basis=context.cost_basis))\n\n if context.price < Crit:\n order(s1, amount=-(context.ORDER_SIZE * context.multiplyBy * context.level))\n context.in_long = False\n context.in_short = True\n context.level += 1\n log.info('Kill Long! GO SHORT! Sold {amount} @ {price}'.format(amount=position.amount, price=context.price))\n\n elif context.price > TP:\n context.in_long = False\n context.in_short = False\n context.level = 1\n order_target(s1, 0)\n log.info('We made it! Sold {amount} @ {price}'.format(amount=position.amount, price=context.price))\n\n elif context.price < SL:\n context.in_long = False\n context.in_short = False\n context.level = 1\n order_target(s1, 0)\n log.info('We lost it all! Sold {amount} @ {price}'.format(amount=position.amount, price=context.price))\n\n else:\n log.info('no buy or sell opportunity found')\n\n\ndef weAreShort(context, analysis):\n s1 = context.asset\n TP = context.cost_basis - context.lower\n SL = context.cost_basis + context.upper\n Crit = context.cost_basis\n position = context.portfolio.positions[context.asset]\n log.info('We are Short. Holdings: {amount} @ {cost_basis}'.format(amount=position.amount, cost_basis=context.cost_basis))\n\n if context.price > Crit:\n order(s1, amount=(context.ORDER_SIZE * context.multiplyBy * context.level))\n context.in_long = True\n context.in_short = False\n context.level += 1\n log.info('Kill Short! GO LONG! Sold {amount} @ {price}'.format(amount=position.amount, price=context.price))\n\n elif context.price < TP:\n context.in_long = False\n context.in_short = False\n context.level = 1\n order_target(s1, 0)\n log.info('We made it! Sold {amount} @ {price}'.format(amount=position.amount, price=context.price))\n\n elif context.price > SL:\n context.in_long = False\n context.in_short = False\n context.level = 1\n order_target(s1, 0)\n log.info('We lost it all! Sold {amount} @ {price}'.format(amount=position.amount, price=context.price))\n\n else:\n log.info('no buy or sell opportunity found')\n\n\ndef getLast(arr, name):\n return arr[name][arr[name].index[-1]]\n\n\nif __name__ == '__main__':\n live = False\n if live:\n run_algorithm(\n capital_base=3000,\n initialize=initialize,\n handle_data=handle_data,\n analyze=analyze,\n exchange_name='bittrex',\n live=True,\n algo_namespace='hedge',\n base_currency='usdt',\n simulate_orders=True,\n )\n else:\n run_algorithm(\n capital_base=10000,\n data_frequency='minute',\n initialize=initialize,\n handle_data=handle_data,\n analyze=analyze,\n exchange_name='poloniex',\n algo_namespace='hedge',\n base_currency='usdt',\n start=pd.to_datetime('2018-04-01', utc=True),\n end=pd.to_datetime('2018-04-02', utc=True),\n )\n"
] |
[
[
"pandas.to_datetime",
"numpy.arange",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
IrTrez/msp-examples
|
[
"463ed5e4fe703bf770f0e710266b0e0e112136f3"
] |
[
"Examples/Aerobraking.py"
] |
[
"from msp import msp, simtools\nimport time\nimport math\nimport numpy as np\n\nDATAFILE = \"runs/Aerobraking.csv\"\nATMOSPHEREDATA = \"densityModels/MarsDensity.csv\"\nSPEED = 2000 # __ times speed\nRUNTIME = 300000\nprint(\"Total runtime will be:\", RUNTIME, \"s or:\", RUNTIME/3600, \"hours or:\", RUNTIME/86400, \"days\")\n\n# USE km AS STANDARD DISTANCE UNIT\n# USE s AS STANDARD TIME UNIT\nAU = 149.6e6 # km\nmuSun = 1.327178e11\ncurrentTime = time.time()\nlimitAltitude = 200 # 260 #[km]. At this altitude density is just below 1*10^-10\n\n\nMarsAtmosphere=msp.Atmosphere(limitAltitude, densityFile=ATMOSPHEREDATA)\nMars = msp.Planet(4.282837e4, 3396.2, 1.52367934 * AU, muSun, MarsAtmosphere)\n\nr = np.array([21508.114845629447, 0.0, 982.3450283462487])\nv = np.array([-2.968111925169866, 0.0, -1.4808260236254678])\n\n# CD is increased times 100 here to see the effect.\nCD = 1.23 * 100\nsurfaceArea = 3.6**2 * math.pi\n\nspacecraft = msp.Body(Mars, 3000, CD, surfaceArea )\nspacecraft.initPositionOrbit(r,v)\n\n# PROPAGATE Here\ndt = 1\n# These are some precalculated manoeuvres to see the effects\nspacecraft.addManoeuvreByDirection(spacecraft.start + 100, -1.35, \"t\")\nspacecraft.addManoeuvreByDirection(spacecraft.start + 8900, -0.2, \"t\")\n\nrlist = spacecraft.propagate(RUNTIME, DATAFILE, True, dtAtmospheric = dt, dtNormal = dt)\n\n\nprint(f\"Final eccentricity {spacecraft.e}\")\nprint(f\"Final velocity {np.sqrt(spacecraft.v.dot(spacecraft.v))}\")\nprint(f\"Periapsis alt {spacecraft.periapsis-Mars.r}\")\nprint(f\"Apoapsis alt {spacecraft.apoapsis-Mars.r}\")\n\nsimtools.quickAnimate(SPEED,DATAFILE,Mars)"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tpltnt/SimpleCV
|
[
"9fd75457cce5fd111c4d251b1076b9447fa4f1a2"
] |
[
"scripts/install/win/OpenKinect/freenect-examples/demo_cv_threshold.py"
] |
[
"#!/usr/bin/env python\nfrom __future__ import print_function\nimport freenect\nimport cv\nimport frame_convert\nimport numpy as np\n\n\nthreshold = 100\ncurrent_depth = 0\n\n\ndef change_threshold(value):\n global threshold\n threshold = value\n\n\ndef change_depth(value):\n global current_depth\n current_depth = value\n\n\ndef show_depth():\n global threshold\n global current_depth\n\n depth, timestamp = freenect.sync_get_depth()\n depth = 255 * np.logical_and(depth >= current_depth - threshold,\n depth <= current_depth + threshold)\n depth = depth.astype(np.uint8)\n image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),\n cv.IPL_DEPTH_8U,\n 1)\n cv.SetData(image, depth.tostring(),\n depth.dtype.itemsize * depth.shape[1])\n cv.ShowImage('Depth', image)\n\n\ndef show_video():\n cv.ShowImage('Video', frame_convert.video_cv(freenect.sync_get_video()[0]))\n\n\ncv.NamedWindow('Depth')\ncv.NamedWindow('Video')\ncv.CreateTrackbar('threshold', 'Depth', threshold, 500, change_threshold)\ncv.CreateTrackbar('depth', 'Depth', current_depth, 2048, change_depth)\n\nprint('Press ESC in window to stop')\n\n\nwhile 1:\n show_depth()\n show_video()\n if cv.WaitKey(10) == 27:\n break\n"
] |
[
[
"numpy.logical_and"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
avrech/learning2cut
|
[
"c0febe84db5097413823821510a4ae3c996dec93",
"c0febe84db5097413823821510a4ae3c996dec93"
] |
[
"utils/functions.py",
"utils/sweet-spot-er.py"
] |
[
"from heapq import heappop, heappush\nfrom collections import defaultdict\nimport networkx as nx\nimport numpy as np\n\n\ndef dijkstra(edges, s, t):\n \"\"\"\n Find the shortest path from node s to t in graph G.\n :param edges: a list of tuples (i, j, w), where (i,j) is an undirected edge, and w is its weight\n :param s: source node\n :param t: target node\n :return: cost, path if any, otherwise Inf\n \"\"\"\n # adjacency dictionary,\n # for each node l, store a list of its neighbors (r) and their distances (c) from l\n g = defaultdict(list)\n for l, r, c in edges:\n g[l].append((c, r))\n\n q = [(0, s, [])] # priority queue, prioritizing according to distances from s\n visited = set() # visited nodes\n mindist = {s: 0} # min distances from s\n\n while q:\n cost, v1, path = heappop(q)\n if v1 not in visited:\n visited.add(v1)\n # path.append(v1)\n path = [v1] + path\n if v1 == t:\n return cost, path\n\n for c, v2 in g.get(v1, ()):\n if v2 in visited:\n continue\n prev = mindist.get(v2, None)\n next = cost + c\n if prev is None or next < prev:\n mindist[v2] = next\n heappush(q, (next, v2, path))\n\n return float(\"inf\"), []\n\n\ndef dijkstra_best_shortest_path(edges, s, t):\n \"\"\"\n Find the shortest path from node s to t in graph G.\n :param edges: a list of tuples (i, j, w), where (i,j) is an undirected edge, and w is its weight\n :param s: source node\n :param t: target node\n :return: cost, path if any, otherwise Inf\n \"\"\"\n # adjacency dictionary,\n # for each node l, store a list of its neighbors (r) and their distances (c) from l\n g = defaultdict(list)\n for l, r, c in edges:\n g[l].append((c, r))\n\n # priority queue:\n # structure: (cost, pathlen, node, path)\n # where: cost - the sum of edge weights of the path from s\n # pathlen - path length (number of edges from s)\n # node - node identifier (could be a tuple)\n # path - a list of the nodes on the path from s to node\n # the heap \"should\" sort the elements in q according to the tuple elements.\n # so first will come the node with the smaller cost,\n # an if there are two nodes with the same cost, we will prefer the closest one\n # in terms of path length.\n q = [(0, 0, s, [])]\n visited = set() # visited nodes\n mincosts = {s: 0} # min distances from s\n pathlens = {s: 0}\n while q:\n v1_cost, v1_pathlen, v1, path = heappop(q)\n if v1 not in visited:\n visited.add(v1)\n # path.append(v1)\n path = [v1] + path\n if v1 == t:\n return v1_cost, path\n\n # relax the costs of v1 neighbors\n for c, v2 in g.get(v1, ()):\n if v2 in visited:\n continue\n v2_cur_cost = mincosts.get(v2, None)\n v2_new_cost = v1_cost + c\n v2_cur_pathlen = pathlens.get(v2, None)\n v2_new_pathlen = v1_pathlen + 1\n # if the path to v2 via v1 is cheaper,\n # or even if it is equal cost, but shorter in terms of pathlen,\n # then update v2\n if v2_cur_cost is None or v2_new_cost < v2_cur_cost or (v2_new_cost == v2_cur_cost and v2_new_pathlen < v2_cur_pathlen):\n mincosts[v2] = v2_new_cost\n pathlens[v2] = v2_new_pathlen\n heappush(q, (v2_new_cost, v2_new_pathlen, v2, path))\n\n return float(\"inf\"), []\n\n\ndef verify_maxcut_sol(model, x, G):\n edge_weights = nx.get_edge_attributes(G, 'weight')\n sol = {i: model.getVal(x[i]) for i in x.keys()}\n for v in sol.values():\n assert v == 0 or v == 1\n cut = 0\n for i, j in G.edges:\n if sol[i] != sol[j]:\n cut += edge_weights[(i, j)]\n return cut\n\n\ndef get_normalized_areas(t, ft, t_support=None, reference=0, return_slope_and_diff=False):\n \"\"\"\n Compute the area under f(t) vs. t on t_support.\n If the last point (t[-1], ft[-1]) is outside t_support (t[-1] > t_support),\n we cut the curve, overriding t[-1] by t_support,\n and overriding ft[-1] by the linear interpolation of ft at t=t_support.\n :param t: lp_iterations\n :param ft: dualbound (or gap)\n :param t_support: scalar\n :param reference: optimal dualbound (or 0 for gap integral)\n :return: array of length = len(t) -1 , containing the area under the normalized curve\n for each interval in t,\n using 1st order interpolation to approximate ft between each adjacent points in t.\n \"\"\"\n t_support = t[-1] if t_support is None else t_support\n\n # if t[-1] < t_support, extend t to t_support\n # and extend ft with a constant value ft[-1]\n extended = False\n if t[-1] < t_support:\n ft = ft + [ft[-1]]\n t = t + [t_support]\n extended = True\n # truncate t and ft if t[-1] exceeded t_support\n if t[-1] > t_support:\n assert t[-2] < t_support\n # compute ft slope in the last interval [t[-2], t[-1]]\n slope = (ft[-1] - ft[-2]) / (t[-1] - t[-2])\n # compute the linear interpolation of ft at t_support\n ft[-1] = ft[-2] + slope * (t_support - t[-2])\n t[-1] = t_support\n\n ft = np.array(ft)\n t = np.array(t)\n\n # normalize ft to [0,1] according to the reference value,\n # such that it will start at 0 and end at 1 (if optimal).\n # if ft is decreasing function, we flip it to increasing.\n # finally, it should looks like:\n # 1__^_ _ _ _ _ _ _ _ _ _ _\n # | _______|\n # | ___/ | |\n # | ___/ | | |\n # |/a0|a1| ...|aN-1|\n # 0__|___|__|____|____|________\n # t0 t1 t2 tN-1 t_support (normalized such that t_support = 1)\n # the areas returned are a0, a1, aN-1\n # old and incorrect : ft = np.abs(ft - reference) / np.abs(ft[0])\n if reference - ft[0] != 0:\n ft = np.abs(ft - ft[0]) / np.abs(reference - ft[0])\n else:\n # the curve is optimal from the beginning. override with ones\n ft = np.ones_like(ft)\n\n # normalize t to [0,1]\n t = t / t_support\n\n # compute the area under the curve using first order interpolation\n ft_diff = ft[1:] - ft[:-1] # ft is assumed to be non-decreasing, so ft_diff is non-negative.\n assert all(ft_diff >= 0), f'ft_diff = {ft_diff}'\n t_diff = t[1:] - t[:-1]\n ft_areas = t_diff * (ft[:-1] + ft_diff / 2) # t_diff *(ft[1:] - ft[:-1]) + t_diff * abs(ft[1:] - ft[:-1]) /2\n slopes = ft_diff / t_diff\n if extended:\n # add the extension area to the last transition area\n ft_areas[-2] += ft_areas[-1]\n # truncate the extension, and leave n-areas for the n-transition done\n ft_areas = ft_areas[:-1]\n slopes = slopes[:-1]\n ft_diff = ft_diff[:-1]\n\n if return_slope_and_diff:\n return ft_areas, slopes, ft_diff\n else:\n return ft_areas\n\n\ndef truncate(t, ft, support, interpolate=False):\n \"\"\"\n Truncate curves to support, interpolating last point at the end of the support.\n If t < support does not do anything.\n \"\"\"\n assert type(t) == list and type(ft) == list\n if t[-1] <= support:\n return t, ft\n # find first index of t > support\n last_index = np.nonzero(np.array(t) > support)[0][0]\n # discard elements after last index\n t = t[:last_index + 1]\n ft = ft[:last_index + 1]\n # interpolate ft at last index\n if interpolate:\n slope = (ft[-1] - ft[-2]) / (t[-1] - t[-2])\n # compute the linear interpolation of ft at t_support\n ft[-1] = ft[-2] + slope * (support - t[-2])\n t[-1] = support\n\n return t, ft\n\n",
"from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model\nimport networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import combinations\nfrom utils.functions import get_normalized_areas\nfrom tqdm import tqdm\nimport pickle\nimport pandas as pd\n\nimport os\nif not os.path.isdir('sweet_spot_results'):\n os.makedirs('sweet_spot_results')\n\nprint('#################################################')\nprint('################ MVC ############################')\nprint('#################################################')\n\n# randomize graphs\ngraph_sizes = [60, 100, 150] # 200\nseeds = [46, 72, 101]\ndensities = {k: np.arange(5, k, 10) / k for k in graph_sizes}\n\n\n# MVC with branch and cut:\nif False:\n results = {gs: {'density': [],\n 'time': [],\n 'lp_iterations': [],\n 'nodes': [],\n 'applied': [],\n 'lp_rounds': [],\n }\n for gs in graph_sizes}\n for gs in graph_sizes:\n for density in tqdm(densities[gs], desc=f'sweeping on graph size {gs}'):\n stats = {\n 'time': [],\n 'lp_iterations': [],\n 'nodes': [],\n 'applied': [],\n 'lp_rounds': [],\n }\n g = nx.erdos_renyi_graph(n=gs, p=density, directed=False)\n nx.set_node_attributes(g, {i: np.random.random() for i in g.nodes}, 'c')\n for seed in seeds:\n model, _ = mvc_model(g, use_random_branching=False)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.hideOutput(True)\n model.optimize()\n assert model.getGap() == 0\n stats['time'].append(model.getSolvingTime())\n stats['lp_iterations'].append(model.getNLPIterations())\n stats['nodes'].append(model.getNNodes())\n stats['applied'].append(model.getNCutsApplied())\n stats['lp_rounds'].append(model.getNLPs())\n results[gs]['density'].append('{:.2f}({:.2f})'.format(density, nx.density(g)))\n for k, vs in stats.items():\n results[gs][k].append(np.mean(vs))\n\n with open(f'sweet_spot_results/mvc-er-sweet-spot-results-bnc.pkl', 'wb') as f:\n pickle.dump(results, f)\n\nwith open(f'sweet_spot_results/mvc-er-sweet-spot-results-bnc.pkl', 'rb') as f:\n results = pickle.load(f)\n\n# plot for each graph size:\n# metric vs. density for metric in stats.keys\nfig = plt.figure(figsize=(16, 10))\naxes = fig.subplots(5, 3)\nplt.suptitle('MVC Branch & Cut')\nfor col, gs in enumerate(graph_sizes):\n x_labels = results[gs]['density']\n for row, (k, vals) in enumerate([(kk, v) for kk, v in results[gs].items() if kk != 'density']):\n ax = axes[row, col]\n # ax.plot(np.arange(len(vals)), vals)\n ax.plot(x_labels, vals)\n if col == 0:\n ax.set_ylabel(k)\n ax.get_xaxis().set_visible(False)\n\n axes[4, col].get_xaxis().set_visible(True)\n # axes[4, col].set_xticks(list(range(len(x_labels))), minor=True)\n # axes[4, col].set_xticklabels(x_labels, minor=True, rotation=45)\n fig.autofmt_xdate(rotation=45)\n axes[4, col].set_xlabel('denisty')\n axes[0, col].set_title(f'size={gs}')\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.show()\nplt.savefig('sweet_spot_results/mvc-er-sweet-spot-bnc.png')\n\n################ root node #################\nlp_iterations_limit = 1500\n\n# MVC in root node:\nif False:\n results = {gs: {'density': [],\n 'time': [],\n 'lp_iterations': [],\n 'gap': [],\n 'applied': [],\n 'lp_rounds': [],\n }\n for gs in graph_sizes}\n for gs in graph_sizes:\n for density in tqdm(densities[gs], desc=f'sweeping on graph size {gs}'):\n stats = {\n 'time': [],\n 'lp_iterations': [],\n 'gap': [],\n 'applied': [],\n 'lp_rounds': [],\n }\n g = nx.erdos_renyi_graph(n=gs, p=density, directed=False)\n nx.set_node_attributes(g, {i: np.random.random() for i in g.nodes}, 'c')\n for seed in seeds:\n model, _ = mvc_model(g)\n set_aggresive_separation(model)\n sepa = CSBaselineSepa(hparams={'lp_iterations_limit': lp_iterations_limit})\n model.includeSepa(sepa, '#CS_baseline', 'do-nothing', priority=-100000000, freq=1)\n model.setBoolParam(\"misc/allowdualreds\", 0)\n model.setLongintParam('limits/nodes', 1) # solve only at the root node\n model.setIntParam('separating/maxstallroundsroot', -1) # add cuts forever\n model.setIntParam('branching/random/priority', 10000000)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.hideOutput(True)\n model.optimize()\n sepa.update_stats()\n stats['time'].append(model.getSolvingTime())\n stats['lp_iterations'].append(model.getNLPIterations())\n stats['gap'].append(model.getGap())\n stats['applied'].append(model.getNCutsApplied())\n stats['lp_rounds'].append(model.getNLPs())\n results[gs]['density'].append('{:.2f}({:.2f})'.format(density, nx.density(g)))\n for k, vs in stats.items():\n results[gs][k].append(np.mean(vs))\n\n with open(f'sweet_spot_results/mvc-er-sweet-spot-results-lpiter{lp_iterations_limit}.pkl', 'wb') as f:\n pickle.dump(results, f)\n\nwith open(f'sweet_spot_results/mvc-er-sweet-spot-results-lpiter{lp_iterations_limit}.pkl', 'rb') as f:\n results = pickle.load(f)\n\n# plot for each graph size:\n# metric vs. density for metric in stats.keys\nfig = plt.figure(figsize=(16, 10))\naxes = fig.subplots(5, 3)\nplt.suptitle(f'MVC Root Node (LP Iter Limit = {lp_iterations_limit})')\nfor col, gs in enumerate(graph_sizes):\n x_labels = results[gs]['density']\n for row, (k, vals) in enumerate([(kk, v) for kk, v in results[gs].items() if kk != 'density']):\n ax = axes[row, col]\n # ax.plot(np.arange(len(vals)), vals)\n ax.plot(x_labels, vals)\n if col == 0:\n ax.set_ylabel(k)\n ax.get_xaxis().set_visible(False)\n\n axes[4, col].get_xaxis().set_visible(True)\n # axes[4, col].set_xticks(list(range(len(x_labels))), minor=True)\n # axes[4, col].set_xticklabels(x_labels, minor=True, rotation=45)\n fig.autofmt_xdate(rotation=45)\n axes[4, col].set_xlabel('p(denisty)')\n axes[0, col].set_title(f'size={gs}')\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.show()\nplt.savefig(f'sweet_spot_results/mvc-er-sweet-spot-lpiter{lp_iterations_limit}.png')\n\nprint('finished')\n\n\n\nprint('#################################################')\nprint('############# MAXCUT ############################')\nprint('#################################################')\n\n# randomize graphs\ngraph_sizes = [40, 70, 100]\nseeds = [46, 72, 101]\ndensities = {k: np.arange(5, k, 10)/k for k in graph_sizes}\n\n\n# MVC with branch and cut:\n# if True:\n# results = {gs: {'xticks': [],\n# 'time': [],\n# 'lp_iterations': [],\n# 'nodes': [],\n# 'applied': [],\n# 'lp_rounds': [],\n# }\n# for gs in graph_sizes}\n# for gs in graph_sizes:\n# for m in tqdm(ms[gs], desc=f'sweeping on graph size {gs}'):\n# stats = {\n# 'time': [],\n# 'lp_iterations': [],\n# 'nodes': [],\n# 'applied': [],\n# 'lp_rounds': [],\n# }\n# g = nx.barabasi_albert_graph(n=gs, m=m)\n# nx.set_node_attributes(g, {i: np.random.random() for i in g.nodes}, 'c')\n# for seed in seeds:\n# model, _ = mvc_model(g, use_random_branching=False)\n# model.setBoolParam('randomization/permutevars', True)\n# model.setIntParam('randomization/permutationseed', seed)\n# model.setIntParam('randomization/randomseedshift', seed)\n# model.hideOutput(True)\n# model.optimize()\n# assert model.getGap() == 0\n# stats['time'].append(model.getSolvingTime())\n# stats['lp_iterations'].append(model.getNLPIterations())\n# stats['nodes'].append(model.getNNodes())\n# stats['applied'].append(model.getNCutsApplied())\n# stats['lp_rounds'].append(model.getNLPs())\n# results[gs]['xticks'].append('{}({:.2f})'.format(m, nx.density(g)))\n# for k, vs in stats.items():\n# results[gs][k].append(np.mean(vs))\n#\n# with open(f'sweet_spot_results/mvc-er-sweet-spot-results-bnc.pkl', 'wb') as f:\n# pickle.dump(results, f)\n#\n# with open(f'sweet_spot_results/mvc-er-sweet-spot-results-bnc.pkl', 'rb') as f:\n# results = pickle.load(f)\n#\n# # plot for each graph size:\n# # metric vs. density for metric in stats.keys\n# fig = plt.figure(figsize=(16, 10))\n# axes = fig.subplots(5, 4)\n# plt.suptitle('MVC Branch & Cut')\n# for col, gs in enumerate(graph_sizes):\n# x_labels = results[gs]['xticks']\n# for row, (k, vals) in enumerate([(kk, v) for kk, v in results[gs].items() if kk != 'xticks']):\n# ax = axes[row, col]\n# # ax.plot(np.arange(len(vals)), vals)\n# ax.plot(x_labels, vals)\n# if col == 0:\n# ax.set_ylabel(k)\n# ax.get_xaxis().set_visible(False)\n#\n# axes[4, col].get_xaxis().set_visible(True)\n# # axes[4, col].set_xticks(list(range(len(x_labels))), minor=True)\n# # axes[4, col].set_xticklabels(x_labels, minor=True, rotation=45)\n# fig.autofmt_xdate(rotation=45)\n# axes[4, col].set_xlabel('m(denisty)')\n# axes[0, col].set_title(f'size={gs}')\n# plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# # plt.show()\n# plt.savefig('sweet_spot_results/mvc-er-sweet-spot-bnc.png')\n\n################ root node #################\nlp_iterations_limit = 5000\n\n# MAXCUT in root node:\nif False:\n results = {gs: {'density': [],\n 'time': [],\n 'lp_iterations': [],\n 'gap': [],\n 'applied': [],\n 'lp_rounds': [],\n }\n for gs in graph_sizes}\n for gs in graph_sizes:\n for density in tqdm(densities[gs], desc=f'sweeping on graph size {gs}'):\n stats = {\n 'time': [],\n 'lp_iterations': [],\n 'gap': [],\n 'applied': [],\n 'lp_rounds': [],\n }\n g = nx.erdos_renyi_graph(n=gs, p=density, directed=False)\n nx.set_node_attributes(g, {e: np.random.random() for e in g.edges}, 'weight')\n for seed in seeds:\n model, _, _ = maxcut_mccormic_model(g)\n set_aggresive_separation(model)\n sepa = CSBaselineSepa(hparams={'lp_iterations_limit': lp_iterations_limit})\n model.includeSepa(sepa, '#CS_baseline', 'do-nothing', priority=-100000000, freq=1)\n model.setBoolParam(\"misc/allowdualreds\", 0)\n model.setLongintParam('limits/nodes', 1) # solve only at the root node\n model.setIntParam('separating/maxstallroundsroot', -1) # add cuts forever\n model.setIntParam('branching/random/priority', 10000000)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.hideOutput(True)\n model.optimize()\n stats['time'].append(model.getSolvingTime())\n stats['lp_iterations'].append(model.getNLPIterations())\n stats['gap'].append(model.getGap())\n stats['applied'].append(model.getNCutsApplied())\n stats['lp_rounds'].append(model.getNLPs())\n results[gs]['density'].append('{:.2f}({:.2f})'.format(density, nx.density(g)))\n for k, vs in stats.items():\n results[gs][k].append(np.mean(vs))\n\n with open(f'sweet_spot_results/maxcut-sweet-spot-results-lpiter{lp_iterations_limit}.pkl', 'wb') as f:\n pickle.dump(results, f)\n\nwith open(f'sweet_spot_results/maxcut-sweet-spot-results-lpiter{lp_iterations_limit}.pkl', 'rb') as f:\n results = pickle.load(f)\n\n# plot for each graph size:\n# metric vs. density for metric in stats.keys\nfig = plt.figure(figsize=(16, 10))\naxes = fig.subplots(5, 3)\nplt.suptitle(f'MAXCUT Root Node (LP Iter Limit = {lp_iterations_limit}')\nfor col, gs in enumerate(graph_sizes):\n x_labels = results[gs]['density']\n for row, (k, vals) in enumerate([(kk, v) for kk, v in results[gs].items() if kk != 'density']):\n ax = axes[row, col]\n # ax.plot(np.arange(len(vals)), vals)\n ax.plot(x_labels, vals)\n if col == 0:\n ax.set_ylabel(k)\n ax.get_xaxis().set_visible(False)\n\n axes[4, col].get_xaxis().set_visible(True)\n # axes[4, col].set_xticks(list(range(len(x_labels))), minor=True)\n # axes[4, col].set_xticklabels(x_labels, minor=True, rotation=45)\n fig.autofmt_xdate(rotation=45)\n axes[4, col].set_xlabel('p(denisty)')\n axes[0, col].set_title(f'size={gs}')\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.show()\nplt.savefig(f'sweet_spot_results/maxcut-er-sweet-spot-lpiter{lp_iterations_limit}.png')\n\nexit(0)\n# plot dual bound graphs of each graph size to see if plateau was reached:\nfull_res = {}\nif False:\n for gs, density in zip(graph_sizes, [10, 20, 20]):\n g = nx.erdos_renyi_graph(n=gs, p=density, directed=False)\n nx.set_node_attributes(g, {e: np.random.random() for e in g.edges}, 'weight')\n for seed in seeds:\n model, _, _ = maxcut_mccormic_model(g)\n set_aggresive_separation(model)\n sepa = CSBaselineSepa(hparams={'lp_iterations_limit': lp_iterations_limit})\n model.includeSepa(sepa, '#CS_baseline', 'do-nothing', priority=-100000000, freq=1)\n model.setBoolParam(\"misc/allowdualreds\", 0)\n model.setLongintParam('limits/nodes', 1) # solve only at the root node\n model.setIntParam('separating/maxstallroundsroot', -1) # add cuts forever\n model.setIntParam('branching/random/priority', 10000000)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.setBoolParam('randomization/permutevars', True)\n model.setIntParam('randomization/permutationseed', seed)\n model.setIntParam('randomization/randomseedshift', seed)\n model.hideOutput(True)\n model.optimize()\n sepa.update_stats()\n full_res[gs] = sepa.stats\n full_res[gs]['density'] = '{:.2f}({:.2f})'.format(density, nx.density(g))\n with open(f'sweet_spot_results/maxcut-er-full-res-lpiter{lp_iterations_limit}.pkl', 'wb') as f:\n pickle.dump(full_res, f)\n\nwith open(f'sweet_spot_results/maxcut-er-full-res-lpiter{lp_iterations_limit}.pkl', 'rb') as f:\n full_res = pickle.load(f)\n\nfig = plt.figure(figsize=(16,10))\naxes = fig.subplots(3, 4)\nfor row, (color, (gs, stats)) in enumerate(zip(['g','b', 'r'], full_res.items())):\n stats = full_res[gs]\n axes[row, 0].plot(stats['lp_iterations'], stats['dualbound'], color, label=f'size{gs}-density{stats[\"density\"]}')\n axes[row, 1].plot(stats['lp_iterations'], stats['gap'], color, label=f'size{gs}-density{stats[\"density\"]}')\n axes[row, 2].plot(stats['lp_iterations'], stats['solving_time'], color, label=f'size{gs}-density{stats[\"density\"]}')\n axes[row, 3].plot(stats['solving_time'], stats['gap'], color, label=f'size{gs}-density{stats[\"density\"]}')\n\n # axes[0, 0].set_title('default')\n # axes[2, 0].set_xlabel('lp iter')\n # axes[2, 1].set_xlabel('lp iter')\n # axes[0, 1].set_title('aggressive')\n# for row in range(3):\naxes[0, 0].set_ylabel(f'size={graph_sizes[0]}')\naxes[1, 0].set_ylabel(f'size={graph_sizes[1]}')\naxes[2, 0].set_ylabel(f'size={graph_sizes[2]}')\naxes[2, 0].set_xlabel('lp iterations')\naxes[2, 1].set_xlabel('lp iterations')\naxes[2, 2].set_xlabel('lp iterations')\naxes[2, 3].set_xlabel('time')\naxes[0, 0].set_title('db vs. lp iterations')\naxes[0, 1].set_title('gap vs. lp iterations')\naxes[0, 2].set_title('time vs. lp iterations')\naxes[0, 3].set_title('gap vs. time')\naxes[0, 3].legend()\naxes[1, 3].legend()\naxes[2, 3].legend()\nplt.suptitle(f'MAXCUT (lp iter limit = {lp_iterations_limit})')\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\nfig.savefig(f'sweet_spot_results/maxcut-er-curves-lpiter-{lp_iterations_limit}.png')\n\n\nprint('finished')\n"
] |
[
[
"numpy.array",
"numpy.ones_like",
"numpy.abs"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.random",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.mean",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Alejandro-sin/Learning_Notebooks
|
[
"161d6bed4c7b1d171b45f61c0cc6fa91e9894aad"
] |
[
"NoteBooks/Curso de Python/Python/Paradigmas/Programacion Estocastica/regresion.py"
] |
[
"# Regresión lineal, que peudo correr en collab.\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n\n# Esto son la variabel independiente\nx = np.array([0,1,2,4 ])\n\n# Esta es mi dependiente, resutlado de experimentos\ny = np.array([1,3,4,5])\n\n# Me permite tomar ecnontrar los cefiioentes lienales para neustra función lineal.\ncoeffs = np.polyfit(x, y, 1)\na = coeffs[0]\nb = coeffs[1]\nestimado_y = (a *x) +b\n\n# Me permite hacer el gráfico linea\nplt.plot(x, estimado_y)\n# Me permite ver los datos sctaer\nplt.scatter(x,y)\nplt.show()"
] |
[
[
"numpy.polyfit",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kubajir/msticpy
|
[
"7b319b71b191b5f75dcf9afd87492523a74b5ad7"
] |
[
"tests/datamodel/test_pivot_data_queries_run.py"
] |
[
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"Test data query pivot functon handling of different input types.\"\"\"\nimport warnings\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\nimport pytest_check as check\nfrom msticpy.data import QueryProvider\nfrom msticpy.data.query_container import QueryContainer\nfrom msticpy.datamodel import entities\nfrom msticpy.datamodel.pivot import Pivot\n\nfrom ..unit_test_lib import get_test_data_path\n\n__author__ = \"Ian Hellen\"\n\n# pylint: disable=redefined-outer-name\n\n\[email protected](scope=\"session\")\ndef data_providers():\n \"\"\"Return dict of providers.\"\"\"\n data_path = Path(get_test_data_path()) / \"localdata\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n return {\n \"LocalData\": QueryProvider(\n \"LocalData\", data_paths=[str(data_path)], query_paths=[str(data_path)]\n ),\n }\n\n\ndef _reset_entities():\n \"\"\"Clear any query containers in entities.\"\"\"\n for entity_name in (\"Host\", \"IpAddress\", \"Account\", \"Url\"):\n entity = getattr(entities, entity_name)\n for attr in dir(entity):\n if isinstance(getattr(entity, attr), QueryContainer):\n delattr(entity, attr)\n\n\[email protected](scope=\"session\")\ndef _create_pivot(data_providers):\n _reset_entities()\n providers = data_providers.values()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n return Pivot(providers=providers)\n\n\nPivotQuery = namedtuple(\n \"PivotQuery\",\n \"entity, attrib, value, provider, pivot_func, func_param, src_df_col, exp_count\",\n)\n\n_IP_LIST = [\n \"104.211.30.1\",\n \"104.211.30.2\",\n \"192.168.0.1\",\n \"127.0.0.1\",\n]\n\n_HOST_LIST = [\"host1\", \"host2\", \"host3\"]\n\n_ACCOUNT_LIST = [\"user1\", \"user2\", \"user3\"]\n\n_PIVOT_QUERIES = [\n pytest.param(\n PivotQuery(\n entity=entities.IpAddress,\n attrib=\"Address\",\n value=_IP_LIST,\n provider=\"LocalData\",\n pivot_func=\"list_azure_network_flows_by_ip\",\n func_param=\"ip_address_list\",\n src_df_col=\"ip\",\n exp_count=1,\n ),\n id=\"IpAddress-list_azure_network_flows_by_ip\",\n ),\n pytest.param(\n PivotQuery(\n entity=entities.Host,\n attrib=\"HostName\",\n value=_HOST_LIST,\n provider=\"LocalData\",\n pivot_func=\"list_host_logons\",\n func_param=\"host_name\",\n src_df_col=\"cmdline\",\n exp_count=len(_HOST_LIST),\n ),\n id=\"Host-list_host_logons\",\n ),\n pytest.param(\n PivotQuery(\n entity=entities.Account,\n attrib=\"Name\",\n value=_ACCOUNT_LIST,\n provider=\"LocalData\",\n pivot_func=\"list_logons_by_account\",\n func_param=\"account_name\",\n src_df_col=\"cmdline\",\n exp_count=len(_ACCOUNT_LIST),\n ),\n id=\"Account-list_logons_by_account\",\n ),\n]\n\n\[email protected](\"test_case\", _PIVOT_QUERIES)\ndef test_data_query_entity(_create_pivot, test_case):\n \"\"\"Test calling function with entity attributes.\"\"\"\n # Test entity\n first_val = next(iter(test_case.value))\n init_args = {test_case.attrib: first_val}\n entity = test_case.entity(**init_args)\n func = getattr(getattr(entity, test_case.provider), test_case.pivot_func)\n # Test entity input\n # result_df = entity.LocalData.list_logons_by_account()\n result_df = func(entity)\n check.is_instance(result_df, pd.DataFrame)\n\n\[email protected](\"test_case\", _PIVOT_QUERIES)\ndef test_data_query_value(_create_pivot, test_case):\n \"\"\"Test calling function with value.\"\"\"\n func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func)\n # Test value input\n val = next(iter(test_case.value))\n params = {test_case.func_param: val}\n result_df = func(**params)\n check.is_instance(result_df, pd.DataFrame)\n\n\[email protected](\"test_case\", _PIVOT_QUERIES)\ndef test_data_query_itbl(_create_pivot, test_case):\n \"\"\"Test calling function with iterable input.\"\"\"\n func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func)\n\n val = next(iter(test_case.value))\n params = {test_case.func_param: val}\n single_val_result_df = func(**params)\n\n # Test iterable input\n val = test_case.value\n params = {test_case.func_param: val}\n result_df = func(**params)\n\n check.is_instance(result_df, pd.DataFrame)\n # For local data we are reading and returning the same data set each time\n # for multi input values, we expect to get that number\n # mulitplied by the number of input values, except in cases\n # where the query supports \"list\" parameters. In that case we\n # should just get 1x the data set.\n check.equal(len(single_val_result_df) * test_case.exp_count, len(result_df))\n\n\[email protected](\"test_case\", _PIVOT_QUERIES)\ndef test_data_query_df(_create_pivot, test_case):\n \"\"\"Test calling function with DF input attributes.\"\"\"\n func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func)\n\n val = next(iter(test_case.value))\n params = {test_case.func_param: val}\n single_val_result_df = func(**params)\n\n # Test DF input\n val = test_case.value\n in_df = pd.DataFrame(val, columns=[test_case.src_df_col])\n params = {test_case.func_param: test_case.src_df_col}\n result_df = func(data=in_df, **params)\n check.is_instance(result_df, pd.DataFrame)\n # For local data we are reading and returning the same data set each time\n # for multi input values, we expect to get that number\n # mulitplied by the number of input values, except in cases\n # where the query supports \"list\" parameters. In that case we\n # should just get 1x the data set.\n check.equal(len(single_val_result_df) * test_case.exp_count, len(result_df))\n\n\[email protected](\"join_type\", [\"left\", \"inner\", \"right\"])\[email protected](\"test_case\", _PIVOT_QUERIES)\ndef test_pivot_funcs_df_merge(_create_pivot, join_type, test_case):\n \"\"\"Test calling function with DF input attributes.\"\"\"\n func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func)\n # Test DF input\n val = test_case.value\n in_df = pd.DataFrame(val, columns=[test_case.src_df_col])\n params = {test_case.func_param: test_case.src_df_col}\n in_df[\"extra_col1\"] = \"test1\"\n in_df[\"extra_col2\"] = \"test2\"\n result_no_merge_df = func(data=in_df, **params)\n\n if test_case.entity not in (entities.Account, entities.Host):\n # The IP test uses a list param so we cannot do index joins\n # with it\n with pytest.warns(UserWarning):\n result_df = func(data=in_df, **params, join=join_type)\n return\n\n # should work ok with Account and Host\n result_df = func(data=in_df, **params, join=join_type)\n\n in_cols = in_df.shape[1]\n no_merge_cols = result_no_merge_df.shape[1]\n merge_cols = result_df.shape[1]\n # merged DF should have result + input cols - join key col\n check.greater_equal(no_merge_cols + in_cols, merge_cols)\n\n if join_type in (\"left\", \"inner\"):\n # inner and left joins should have same or greater length as input\n check.greater_equal(result_df.shape[0], in_df.shape[0])\n # all the keys from the input should be in the merged output\n for row_val in in_df[test_case.src_df_col]:\n check.is_in(row_val, result_df[test_case.src_df_col].values)\n if join_type == \"right\":\n # We don't know how many results we get back from right join\n # (although should not be zero)\n check.greater(len(result_df), 0)\n # but all of its key values should be present in input\n for row_val in result_df[test_case.src_df_col].values:\n check.is_in(row_val, in_df[test_case.src_df_col].values)\n\n join_in_data = {\n 0: \"0x3e7\",\n 1: \"0xc90e957\",\n 2: \"0xc90ea44\",\n 3: \"0xc912d62\",\n 4: \"0xc913737\",\n 10: \"0x3e3\",\n 14: \"0x3e4\",\n 15: \"0xaddd\",\n 16: \"0xafff\",\n 17: \"0x3e5\",\n 23: \"no_match\",\n }\n in_df = pd.DataFrame(\n pd.Series(join_in_data), columns=[\"TargetLogonId\"]\n ).reset_index()\n result_no_merge_df = func(data=in_df, **params)\n result_df = func(\n data=in_df,\n **params,\n join=join_type,\n left_on=\"TargetLogonId\",\n right_on=\"TargetLogonId\",\n )\n check.is_not_none(result_df)\n\n if join_type in (\"inner\", \"right\"):\n check.equal(len(result_df), len(result_no_merge_df))\n for val in join_in_data.values():\n if val != \"no_match\":\n check.is_in(val, result_df[\"TargetLogonId\"].values)\n else:\n check.is_not_in(val, result_df[\"TargetLogonId\"].values)\n if join_type == \"left\":\n check.equal(len(result_df), len(result_no_merge_df) + 1)\n for val in join_in_data.values():\n check.is_in(val, result_df[\"TargetLogonId\"].values)\n"
] |
[
[
"pandas.Series",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jiayuanlu/XJTU_course
|
[
"dc1a24e0230d8d5eff6441d088e01a2376fac893"
] |
[
"DSP/pinyu_mfcc/visualize.py"
] |
[
"import utils\nimport config\nimport os\nimport feature\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ninfo, wave_data = utils.wavDecode(os.path.join(config.datasetPath, \"1_34_1/0/0_0.wav\"))\nwave_data = wave_data[:, 0]\nwave_data = wave_data * 1.0 / (max(abs(wave_data)))\n\nframe_lens = 512\nmove = 128\n\nframes = utils.frame(wave_data, frame_lens, move)\nenergy = feature.averageEnergy(frames)\nCrossZeroRate = feature.zeroCrossingRate(frames)\n\nenergy_mean = energy.mean()\nT1 = np.mean(energy[:10])\nT2 = energy_mean / 4 # 较高的能量阈值\nT1 = (T1 + T2) / 4 # 较低的能量阈值\n\nrange_o = np.arange(len(energy))\n# 首先利用较大能量阈值 MH 进行初步检测\nmask1 = energy > T2\nrange1 = range_o[mask1]\nN3, N4 = range1[0], range1[-1]\n\n# 利用较小能量阈值 ML 进行第二步能量检测\nN2, N5 = N3, N4\nfor i in range_o[:N3][::-1]: # 从N3向左搜索 从N4向右搜索\n if energy[i] <= T1:\n N2 = i\n break\nfor j in range_o[N4:]:\n if energy[j] <= T1:\n N5 = j\n break\nL = N2\nR = N5\nL_w = N2 * move + frame_lens // 2\nR_w = N5 * move + frame_lens // 2\n\nfig = plt.figure(figsize=(9, 6))\nx2 = np.arange(len(energy))\nx3 = np.arange(len(wave_data))\n\nfig.add_subplot(311)\nplt.title(\"Wave\")\nplt.xticks([])\nplt.ylim([wave_data.min(), wave_data.max()])\nplt.plot(x3, wave_data)\nplt.plot([L_w, L_w], [wave_data.min(), wave_data.max()], c='r', linestyle='--')\nplt.plot([R_w, R_w], [wave_data.min(), wave_data.max()], c='r', linestyle='--')\n\nfig.add_subplot(312)\nplt.title(\"Energy\")\nplt.xticks([])\nplt.ylim([energy.min(), energy.max()])\nplt.plot(x2, energy)\nplt.plot([L, L], [energy.min(), energy.max()], c='r', linestyle='--')\nplt.plot([R, R], [energy.min(), energy.max()], c='r', linestyle='--')\n\nfig.add_subplot(313)\nplt.title(\"CrossZeroRate\")\nplt.xticks([])\nplt.ylim([CrossZeroRate.min(), CrossZeroRate.max()])\nplt.plot(x2, CrossZeroRate)\nplt.plot([L, L], [CrossZeroRate.min(), CrossZeroRate.max()], c='r', linestyle='--')\nplt.plot([R, R], [CrossZeroRate.min(), CrossZeroRate.max()], c='r', linestyle='--')\n\nplt.savefig(r\"image/double_thres.png\", bbox_inches='tight')\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.mean",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
quanted/poptox
|
[
"5579f9571ff7e3bddf5ed79c3641337e3df7f8f2"
] |
[
"poptox/loons/loons_output.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE']='settings'\nimport webapp2 as webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext.webapp import template\nimport numpy as np\nimport cgi\nimport cgitb\nimport copy\ncgitb.enable()\nfrom loons import loons_model\nfrom loons import loons_tables\n\n\nclass loons_OutputPage(webapp.RequestHandler):\n def post(self): \n form = cgi.FieldStorage()\n b = form.getvalue('b')\n m = form.getvalue('m')\n r = form.getvalue('r')\n pa = form.getvalue('pa')\n sj = form.getvalue('sj')\n t = form.getvalue('t')\n\n no1 = form.getvalue('no1')\n no2 = form.getvalue('no2')\n no3 = form.getvalue('no3')\n no4 = form.getvalue('no4')\n n_o =[no1, no2, no3, no4]\n n_o = np.asarray(n_o)\n\n loons_obj = loons_model.loons(b, m, r, pa, sj, t, no1, no2, no3, no4)\n # print loons_obj.b[4]\n\n templatepath = os.path.dirname(__file__) + '/../templates/'\n html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})\n html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'loons','page':'output'})\n html = html + template.render(templatepath + '03pop_ubertext_links_left.html', {})\n html = html + template.render(templatepath + '04uberoutput_start.html', {\n 'model':'loons', \n 'model_attributes':'Loons Population Model'})\n\n html = html + loons_tables.table_all(loons_obj) # \n\n html = html + \"\"\"<table width=\"400\" border=\"1\" style=\"display:none\">\n <tr>\n <td>number of class</td>\n <td id=\"n_o_c\">4</td>\n </tr>\n <tr>\n <td>final population</td>\n <td id=\"final\">%s</td>\n </tr> \n </table>\"\"\"%(loons_obj.leslie_out)\n html = html + template.render(templatepath + 'loons_jqplot.html', {}) \n\n html = html + template.render(templatepath + '04uberoutput_end.html', {})\n html = html + template.render(templatepath + 'export.html', {})\n html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})\n self.response.out.write(html)\n \napp = webapp.WSGIApplication([('/.*', loons_OutputPage)], debug=True)\n\n \ndef main():\n run_wsgi_app(app)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.asarray"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
glyg/napari
|
[
"be196dc979aed663a484f3a424dacc3227f7c958"
] |
[
"napari/layers/shapes/shapes.py"
] |
[
"import numpy as np\nfrom copy import copy, deepcopy\n\nfrom ...util.event import Event\nfrom ...util.misc import ensure_iterable\nfrom ...util.status_messages import format_float\nfrom ..base import Layer\nfrom vispy.color import get_color_names\nfrom ._constants import Mode, Box, BACKSPACE, shape_classes, ShapeType\nfrom .shape_list import ShapeList\nfrom .shape_util import create_box, point_to_lines\nfrom .shape_models import Rectangle, Ellipse, Line, Path, Polygon\n\n\nclass Shapes(Layer):\n \"\"\"Shapes layer.\n\n Parameters\n ----------\n data : list or array\n List of shape data, where each element is an (N, D) array of the\n N vertices of a shape in D dimensions. Can be an 3-dimensional\n array if each shape has the same number of vertices.\n shape_type : string or list\n String of shape shape_type, must be one of \"{'line', 'rectangle',\n 'ellipse', 'path', 'polygon'}\". If a list is supplied it must be\n the same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_width : float or list\n Thickness of lines and edges. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_color : str or list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n face_color : str or list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n z_index : int or list\n Specifier of z order priority. Shapes with higher z order are\n displayed ontop of others. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float or list\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Attributes\n ----------\n data : (N, ) list of array\n List of shape data, where each element is an (N, D) array of the\n N vertices of a shape in D dimensions.\n shape_types : (N, ) list of str\n Name of shape type for each shape.\n edge_colors : (N, ) list of str\n Name of edge color for each shape.\n face_colors : (N, ) list of str\n Name of face color for each shape.\n edge_widths : (N, ) list of float\n Edge width for each shape.\n opacities : (N, ) list of float\n Opacity for each shape.\n z_indices : (N, ) list of int\n z-index for each shape.\n edge_width : float\n Thickness of lines and edges of the next shape to be added or the\n currently selected shape.\n edge_color : str\n Color of the edge of the next shape to be added or the currently\n selected shape.\n face_color : str\n Color of the face of the next shape to be added or the currently\n selected shape.\n opacity : float\n Opacity of the next shape to be added or the currently selected shape.\n selected_data : list\n List of currently selected shapes.\n nshapes : int\n Total number of shapes.\n mode : Mode\n Interactive mode. The normal, default mode is PAN_ZOOM, which\n allows for normal interactivity with the canvas.\n\n The SELECT mode allows for entire shapes to be selected, moved and\n resized.\n\n The DIRECT mode allows for shapes to be selected and their individual\n vertices to be moved.\n\n The VERTEX_INSERT and VERTEX_REMOVE modes allow for individual\n vertices either to be added to or removed from shapes that are already\n selected. Note that shapes cannot be selected in this mode.\n\n The ADD_RECTANGLE, ADD_ELLIPSE, ADD_LINE, ADD_PATH, and ADD_POLYGON\n modes all allow for their corresponding shape type to be added.\n\n Extended Summary\n ----------\n _data_dict : Dict of ShapeList\n Dictionary containing all the shape data indexed by slice tuple\n _data_view : ShapeList\n Object containing the currently viewed shape data.\n _mode_history : Mode\n Interactive mode captured on press of <space>.\n _selected_data_history : list\n List of currently selected captured on press of <space>.\n _selected_data_stored : list\n List of selected previously displayed. Used to prevent rerendering the\n same highlighted shapes when no data has changed.\n _selected_box : None | np.ndarray\n `None` if no shapes are selected, otherwise a 10x2 array of vertices of\n the interaction box. The first 8 points are the corners and midpoints\n of the box. The 9th point is the center of the box, and the last point\n is the location of the rotation handle that can be used to rotate the\n box.\n _drag_start : None | np.ndarray\n If a drag has been started and is in progress then a length 2 array of\n the initial coordinates of the drag. `None` otherwise.\n _drag_box : None | np.ndarray\n If a drag box is being created to select shapes then this is a 2x2\n array of the two extreme corners of the drag. `None` otherwise.\n _drag_box_stored : None | np.ndarray\n If a drag box is being created to select shapes then this is a 2x2\n array of the two extreme corners of the drag that have previously been\n rendered. `None` otherwise. Used to prevent rerendering the same\n drag box when no data has changed.\n _is_moving : bool\n Bool indicating if any shapes are currently being moved.\n _is_selecting : bool\n Bool indicating if a drag box is currently being created in order to\n select shapes.\n _is_creating : bool\n Bool indicating if any shapes are currently being created.\n _fixed_aspect : bool\n Bool indicating if aspect ratio of shapes should be preserved on\n resizing.\n _aspect_ratio : float\n Value of aspect ratio to be preserved if `_fixed_aspect` is `True`.\n _fixed_vertex : None | np.ndarray\n If a scaling or rotation is in progress then a length 2 array of the\n coordinates that are remaining fixed during the move. `None` otherwise.\n _fixed_index : int\n If a scaling or rotation is in progress then the index of the vertex of\n the boudning box that is remaining fixed during the move. `None`\n otherwise.\n _update_properties : bool\n Bool indicating if properties are to allowed to update the selected\n shapes when they are changed. Blocking this prevents circular loops\n when shapes are selected and the properties are changed based on that\n selection\n _clipboard : dict\n Dict of shape objects that are to be used during a copy and paste.\n _colors : list\n List of supported vispy color names.\n _vertex_size : float\n Size of the vertices of the shapes and boudning box in Canvas\n coordinates.\n _rotation_handle_length : float\n Length of the rotation handle of the boudning box in Canvas\n coordinates.\n _input_ndim : int\n Dimensions of shape data.\n \"\"\"\n\n _colors = get_color_names()\n _vertex_size = 10\n _rotation_handle_length = 20\n _highlight_color = (0, 0.6, 1)\n _highlight_width = 1.5\n\n def __init__(\n self,\n data=None,\n *,\n shape_type='rectangle',\n edge_width=1,\n edge_color='black',\n face_color='white',\n z_index=0,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ):\n if data is None:\n data = np.empty((0, 0, 2))\n if np.array(data).ndim == 3:\n ndim = np.array(data).shape[2]\n elif len(data) == 0:\n ndim = 2\n elif np.array(data[0]).ndim == 1:\n ndim = np.array(data).shape[1]\n else:\n ndim = np.array(data[0]).shape[1]\n\n # Don't pass on opacity value to base layer as it could be a list\n # and will get set bellow\n super().__init__(\n ndim,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n blending=blending,\n visible=visible,\n )\n\n self.events.add(\n mode=Event,\n edge_width=Event,\n edge_color=Event,\n face_color=Event,\n highlight=Event,\n )\n\n self._display_order_stored = []\n self._ndisplay_stored = self.dims.ndisplay\n self.dims.clip = False\n\n # The following shape properties are for the new shapes that will\n # be drawn. Each shape has a corresponding property with the\n # value for itself\n if np.isscalar(edge_width):\n self._edge_width = edge_width\n else:\n self._edge_width = 1\n\n if type(edge_color) is str:\n self._edge_color = edge_color\n else:\n self._edge_color = 'black'\n\n if type(face_color) is str:\n self._face_color = face_color\n else:\n self._face_color = 'white'\n\n if np.isscalar(opacity):\n self._opacity = opacity\n else:\n self._opacity = 0.7\n\n self._data_view = ShapeList(ndisplay=self.dims.ndisplay)\n self._data_slice_keys = np.empty(\n (0, 2, len(self.dims.not_displayed)), dtype=int\n )\n\n self._value = (None, None)\n self._value_stored = (None, None)\n self._moving_value = (None, None)\n self._selected_data = []\n self._selected_data_stored = []\n self._selected_data_history = []\n self._selected_box = None\n\n self._drag_start = None\n self._fixed_vertex = None\n self._fixed_aspect = False\n self._aspect_ratio = 1\n self._is_moving = False\n self._fixed_index = 0\n self._is_selecting = False\n self._drag_box = None\n self._drag_box_stored = None\n self._is_creating = False\n self._clipboard = {}\n\n self._mode = Mode.PAN_ZOOM\n self._mode_history = self._mode\n self._status = self.mode\n self._help = 'enter a selection mode to edit shape properties'\n\n self.events.deselect.connect(lambda x: self._finish_drawing())\n self.events.face_color.connect(lambda e: self._update_thumbnail())\n self.events.edge_color.connect(lambda e: self._update_thumbnail())\n\n self.add(\n data,\n shape_type=shape_type,\n edge_width=edge_width,\n edge_color=edge_color,\n face_color=face_color,\n opacity=opacity,\n z_index=z_index,\n )\n\n # Trigger generation of view slice and thumbnail\n self._update_dims()\n\n @property\n def data(self):\n \"\"\"list: Each element is an (N, D) array of the vertices of a shape.\"\"\"\n return self._data_view.data\n\n @data.setter\n def data(self, data, shape_type='rectangle'):\n self._finish_drawing()\n self._data_view = ShapeList()\n self.add(data, shape_type=shape_type)\n self._update_dims()\n self.events.data()\n\n def _get_ndim(self):\n \"\"\"Determine number of dimensions of the layer.\"\"\"\n if self.nshapes == 0:\n ndim = self.ndim\n else:\n ndim = self.data[0].shape[1]\n return ndim\n\n def _get_extent(self):\n \"\"\"Determine ranges for slicing given by (min, max, step).\"\"\"\n if self.nshapes == 0:\n maxs = [1] * self.ndim\n mins = [0] * self.ndim\n else:\n maxs = np.max([np.max(d, axis=0) for d in self.data], axis=0)\n mins = np.min([np.min(d, axis=0) for d in self.data], axis=0)\n\n return tuple((min, max, 1) for min, max in zip(mins, maxs))\n\n @property\n def nshapes(self):\n \"\"\"int: Total number of shapes.\"\"\"\n return len(self._data_view.shapes)\n\n @property\n def edge_width(self):\n \"\"\"float: Width of shape edges including lines and paths.\"\"\"\n return self._edge_width\n\n @edge_width.setter\n def edge_width(self, edge_width):\n self._edge_width = edge_width\n if self._update_properties:\n index = self.selected_data\n for i in index:\n self._data_view.update_edge_width(i, edge_width)\n self.status = format_float(self.edge_width)\n self.events.edge_width()\n\n @property\n def edge_color(self):\n \"\"\"str: color of shape edges including lines and paths.\"\"\"\n return self._edge_color\n\n @edge_color.setter\n def edge_color(self, edge_color):\n self._edge_color = edge_color\n if self._update_properties:\n index = self.selected_data\n for i in index:\n self._data_view.update_edge_color(i, edge_color)\n self.events.edge_color()\n\n @property\n def face_color(self):\n \"\"\"str: color of shape faces.\"\"\"\n return self._face_color\n\n @face_color.setter\n def face_color(self, face_color):\n self._face_color = face_color\n if self._update_properties:\n index = self.selected_data\n for i in index:\n self._data_view.update_face_color(i, face_color)\n self.events.face_color()\n\n @property\n def opacity(self):\n \"\"\"float: Opacity value between 0.0 and 1.0.\"\"\"\n return self._opacity\n\n @opacity.setter\n def opacity(self, opacity):\n if not 0.0 <= opacity <= 1.0:\n raise ValueError(\n 'opacity must be between 0.0 and 1.0; ' f'got {opacity}'\n )\n\n self._opacity = opacity\n if self._update_properties:\n index = self.selected_data\n for i in index:\n self._data_view.update_opacity(i, opacity)\n self.status = format_float(self.opacity)\n self.events.opacity()\n\n @property\n def shape_types(self):\n \"\"\"list of str: name of shape type for each shape.\"\"\"\n return self._data_view.shape_types\n\n @property\n def edge_colors(self):\n \"\"\"list of str: name of edge color for each shape.\"\"\"\n return self._data_view.edge_colors\n\n @property\n def face_colors(self):\n \"\"\"list of str: name of face color for each shape.\"\"\"\n return self._data_view.face_colors\n\n @property\n def edge_widths(self):\n \"\"\"list of float: edge width for each shape.\"\"\"\n return self._data_view.edge_widths\n\n @property\n def opacities(self):\n \"\"\"list of float: opacity for each shape.\"\"\"\n return self._data_view.opacities\n\n @property\n def z_indices(self):\n \"\"\"list of int: z_index for each shape.\"\"\"\n return self._data_view.z_indices\n\n @property\n def selected_data(self):\n \"\"\"list: list of currently selected shapes.\"\"\"\n return self._selected_data\n\n @selected_data.setter\n def selected_data(self, selected_data):\n self._selected_data = selected_data\n self._selected_box = self.interaction_box(selected_data)\n\n # Update properties based on selected shapes\n face_colors = list(\n set(\n [\n self._data_view.shapes[i]._face_color_name\n for i in selected_data\n ]\n )\n )\n if len(face_colors) == 1:\n face_color = face_colors[0]\n with self.block_update_properties():\n self.face_color = face_color\n\n edge_colors = list(\n set(\n [\n self._data_view.shapes[i]._edge_color_name\n for i in selected_data\n ]\n )\n )\n if len(edge_colors) == 1:\n edge_color = edge_colors[0]\n with self.block_update_properties():\n self.edge_color = edge_color\n\n edge_width = list(\n set([self._data_view.shapes[i].edge_width for i in selected_data])\n )\n if len(edge_width) == 1:\n edge_width = edge_width[0]\n with self.block_update_properties():\n self.edge_width = edge_width\n\n opacities = list(\n set([self._data_view.shapes[i].opacity for i in selected_data])\n )\n if len(opacities) == 1:\n opacity = opacities[0]\n with self.block_update_properties():\n self.opacity = opacity\n\n @property\n def mode(self):\n \"\"\"MODE: Interactive mode. The normal, default mode is PAN_ZOOM, which\n allows for normal interactivity with the canvas.\n\n The SELECT mode allows for entire shapes to be selected, moved and\n resized.\n\n The DIRECT mode allows for shapes to be selected and their individual\n vertices to be moved.\n\n The VERTEX_INSERT and VERTEX_REMOVE modes allow for individual\n vertices either to be added to or removed from shapes that are already\n selected. Note that shapes cannot be selected in this mode.\n\n The ADD_RECTANGLE, ADD_ELLIPSE, ADD_LINE, ADD_PATH, and ADD_POLYGON\n modes all allow for their corresponding shape type to be added.\n \"\"\"\n return str(self._mode)\n\n @mode.setter\n def mode(self, mode):\n if isinstance(mode, str):\n mode = Mode(mode)\n\n if not self.editable:\n mode = Mode.PAN_ZOOM\n\n if mode == self._mode:\n return\n old_mode = self._mode\n if mode == Mode.PAN_ZOOM:\n self.cursor = 'standard'\n self.interactive = True\n self.help = 'enter a selection mode to edit shape properties'\n elif mode in [Mode.SELECT, Mode.DIRECT]:\n self.cursor = 'pointing'\n self.interactive = False\n self.help = (\n 'hold <space> to pan/zoom, '\n f'press <{BACKSPACE}> to remove selected'\n )\n elif mode in [Mode.VERTEX_INSERT, Mode.VERTEX_REMOVE]:\n self.cursor = 'cross'\n self.interactive = False\n self.help = 'hold <space> to pan/zoom'\n elif mode in [Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]:\n self.cursor = 'cross'\n self.interactive = False\n self.help = 'hold <space> to pan/zoom'\n elif mode in [Mode.ADD_PATH, Mode.ADD_POLYGON]:\n self.cursor = 'cross'\n self.interactive = False\n self.help = (\n 'hold <space> to pan/zoom, ' 'press <esc> to finish drawing'\n )\n else:\n raise ValueError(\"Mode not recongnized\")\n\n self.status = str(mode)\n self._mode = mode\n\n draw_modes = [\n Mode.SELECT,\n Mode.DIRECT,\n Mode.VERTEX_INSERT,\n Mode.VERTEX_REMOVE,\n ]\n\n self.events.mode(mode=mode)\n if not (mode in draw_modes and old_mode in draw_modes):\n self._finish_drawing()\n self._set_view_slice()\n\n def _set_editable(self, editable=None):\n \"\"\"Set editable mode based on layer properties.\"\"\"\n if editable is None:\n if self.dims.ndisplay == 3:\n self.editable = False\n else:\n self.editable = True\n\n if not self.editable:\n self.mode = Mode.PAN_ZOOM\n\n def add(\n self,\n data,\n *,\n shape_type='rectangle',\n edge_width=None,\n edge_color=None,\n face_color=None,\n opacity=None,\n z_index=None,\n ):\n \"\"\"Add shapes to the current layer.\n\n Parameters\n ----------\n data : list or array\n List of shape data, where each element is an (N, D) array of the\n N vertices of a shape in D dimensions. Can be an 3-dimensional\n array if each shape has the same number of vertices.\n shape_type : string | list\n String of shape shape_type, must be one of \"{'line', 'rectangle',\n 'ellipse', 'path', 'polygon'}\". If a list is supplied it must be\n the same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_width : float | list\n thickness of lines and edges. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_color : str | tuple | list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n face_color : str | tuple | list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n opacity : float | list\n Opacity of the shapes, must be between 0 and 1.\n z_index : int | list\n Specifier of z order priority. Shapes with higher z order are\n displayed ontop of others. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n \"\"\"\n if edge_width is None:\n edge_width = self.edge_width\n if edge_color is None:\n edge_color = self.edge_color\n if face_color is None:\n face_color = self.face_color\n if opacity is None:\n opacity = self.opacity\n if self._data_view is not None:\n z_index = z_index or max(self._data_view._z_index, default=-1) + 1\n else:\n z_index = z_index or 0\n\n if len(data) > 0:\n if np.array(data[0]).ndim == 1:\n # If a single array for a shape has been passed turn into list\n data = [data]\n\n # Turn input arguments into iterables\n shape_inputs = zip(\n data,\n ensure_iterable(shape_type),\n ensure_iterable(edge_width),\n ensure_iterable(edge_color, color=True),\n ensure_iterable(face_color, color=True),\n ensure_iterable(opacity),\n ensure_iterable(z_index),\n )\n\n for d, st, ew, ec, fc, o, z in shape_inputs:\n\n # A False slice_key means the shape is invalid as it is not\n # confined to a single plane\n shape_cls = shape_classes[ShapeType(st)]\n shape = shape_cls(\n d,\n edge_width=ew,\n edge_color=ec,\n face_color=fc,\n opacity=o,\n z_index=z,\n dims_order=self.dims.order,\n ndisplay=self.dims.ndisplay,\n )\n\n # Add shape\n self._data_view.add(shape)\n\n self._display_order_stored = copy(self.dims.order)\n self._ndisplay_stored = copy(self.dims.ndisplay)\n self._update_dims()\n\n def _set_view_slice(self):\n \"\"\"Set the view given the slicing indices.\"\"\"\n if not self.dims.ndisplay == self._ndisplay_stored:\n self.selected_data = []\n self._data_view.ndisplay = min(self.dims.ndim, self.dims.ndisplay)\n self._ndisplay_stored = copy(self.dims.ndisplay)\n self._clipboard = {}\n\n if not self.dims.order == self._display_order_stored:\n self.selected_data = []\n self._data_view.update_dims_order(self.dims.order)\n self._display_order_stored = copy(self.dims.order)\n # Clear clipboard if dimensions swap\n self._clipboard = {}\n\n slice_key = np.array(self.dims.indices)[list(self.dims.not_displayed)]\n if not np.all(slice_key == self._data_view.slice_key):\n self.selected_data = []\n self._data_view.slice_key = slice_key\n\n self._set_highlight(force=True)\n self._update_thumbnail()\n self._update_coordinates()\n self.events.set_data()\n\n def interaction_box(self, index):\n \"\"\"Create the interaction box around a shape or list of shapes.\n If a single index is passed then the boudning box will be inherited\n from that shapes interaction box. If list of indices is passed it will\n be computed directly.\n\n Parameters\n ----------\n index : int | list\n Index of a single shape, or a list of shapes around which to\n construct the interaction box\n\n Returns\n ----------\n box : np.ndarray\n 10x2 array of vertices of the interaction box. The first 8 points\n are the corners and midpoints of the box in clockwise order\n starting in the upper-left corner. The 9th point is the center of\n the box, and the last point is the location of the rotation handle\n that can be used to rotate the box\n \"\"\"\n if isinstance(index, (list, np.ndarray)):\n if len(index) == 0:\n box = None\n elif len(index) == 1:\n box = copy(self._data_view.shapes[index[0]]._box)\n else:\n indices = np.isin(self._data_view.displayed_index, index)\n box = create_box(self._data_view.displayed_vertices[indices])\n else:\n box = copy(self._data_view.shapes[index]._box)\n\n if box is not None:\n rot = box[Box.TOP_CENTER]\n length_box = np.linalg.norm(\n box[Box.BOTTOM_LEFT] - box[Box.TOP_LEFT]\n )\n if length_box > 0:\n r = self._rotation_handle_length * self.scale_factor\n rot = (\n rot\n - r\n * (box[Box.BOTTOM_LEFT] - box[Box.TOP_LEFT])\n / length_box\n )\n box = np.append(box, [rot], axis=0)\n\n return box\n\n def _outline_shapes(self):\n \"\"\"Find outlines of any selected or hovered shapes.\n\n Returns\n ----------\n vertices : None | np.ndarray\n Nx2 array of any vertices of outline or None\n triangles : None | np.ndarray\n Mx3 array of any indices of vertices for triangles of outline or\n None\n \"\"\"\n if self._value[0] is not None or len(self.selected_data) > 0:\n if len(self.selected_data) > 0:\n index = copy(self.selected_data)\n if self._value[0] is not None:\n if self._value[0] in index:\n pass\n else:\n index.append(self._value[0])\n index.sort()\n else:\n index = self._value[0]\n\n centers, offsets, triangles = self._data_view.outline(index)\n vertices = centers + (\n self.scale_factor * self._highlight_width * offsets\n )\n vertices = vertices[:, ::-1]\n else:\n vertices = None\n triangles = None\n\n return vertices, triangles\n\n def _compute_vertices_and_box(self):\n \"\"\"Compute location of highlight vertices and box for rendering.\n\n Returns\n ----------\n vertices : np.ndarray\n Nx2 array of any vertices to be rendered as Markers\n face_color : str\n String of the face color of the Markers\n edge_color : str\n String of the edge color of the Markers and Line for the box\n pos : np.ndarray\n Nx2 array of vertices of the box that will be rendered using a\n Vispy Line\n width : float\n Width of the box edge\n \"\"\"\n if len(self.selected_data) > 0:\n if self._mode == Mode.SELECT:\n # If in select mode just show the interaction boudning box\n # including its vertices and the rotation handle\n box = self._selected_box[Box.WITH_HANDLE]\n if self._value[0] is None:\n face_color = 'white'\n elif self._value[1] is None:\n face_color = 'white'\n else:\n face_color = self._highlight_color\n edge_color = self._highlight_color\n vertices = box[:, ::-1]\n # Use a subset of the vertices of the interaction_box to plot\n # the line around the edge\n pos = box[Box.LINE_HANDLE][:, ::-1]\n width = 1.5\n elif self._mode in (\n [\n Mode.DIRECT,\n Mode.ADD_PATH,\n Mode.ADD_POLYGON,\n Mode.ADD_RECTANGLE,\n Mode.ADD_ELLIPSE,\n Mode.ADD_LINE,\n Mode.VERTEX_INSERT,\n Mode.VERTEX_REMOVE,\n ]\n ):\n # If in one of these mode show the vertices of the shape itself\n inds = np.isin(\n self._data_view.displayed_index, self.selected_data\n )\n vertices = self._data_view.displayed_vertices[inds][:, ::-1]\n # If currently adding path don't show box over last vertex\n if self._mode == Mode.ADD_PATH:\n vertices = vertices[:-1]\n\n if self._value[0] is None:\n face_color = 'white'\n elif self._value[1] is None:\n face_color = 'white'\n else:\n face_color = self._highlight_color\n edge_color = self._highlight_color\n pos = None\n width = 0\n else:\n # Otherwise show nothing\n vertices = np.empty((0, 2))\n face_color = 'white'\n edge_color = 'white'\n pos = None\n width = 0\n elif self._is_selecting:\n # If currently dragging a selection box just show an outline of\n # that box\n vertices = np.empty((0, 2))\n edge_color = self._highlight_color\n face_color = 'white'\n box = create_box(self._drag_box)\n width = 1.5\n # Use a subset of the vertices of the interaction_box to plot\n # the line around the edge\n pos = box[Box.LINE][:, ::-1]\n else:\n # Otherwise show nothing\n vertices = np.empty((0, 2))\n face_color = 'white'\n edge_color = 'white'\n pos = None\n width = 0\n\n return vertices, face_color, edge_color, pos, width\n\n def _set_highlight(self, force=False):\n \"\"\"Render highlights of shapes.\n\n Includes boundaries, vertices, interaction boxes, and the drag\n selection box when appropriate.\n\n Parameters\n ----------\n force : bool\n Bool that forces a redraw to occur when `True`\n \"\"\"\n # Check if any shape or vertex ids have changed since last call\n if (\n self.selected_data == self._selected_data_stored\n and np.all(self._value == self._value_stored)\n and np.all(self._drag_box == self._drag_box_stored)\n ) and not force:\n return\n self._selected_data_stored = copy(self.selected_data)\n self._value_stored = copy(self._value)\n self._drag_box_stored = copy(self._drag_box)\n self.events.highlight()\n\n def _finish_drawing(self):\n \"\"\"Reset properties used in shape drawing.\"\"\"\n index = copy(self._moving_value[0])\n self._is_moving = False\n self.selected_data = []\n self._drag_start = None\n self._drag_box = None\n self._is_selecting = False\n self._fixed_vertex = None\n self._value = (None, None)\n self._moving_value = (None, None)\n if self._is_creating is True and self._mode == Mode.ADD_PATH:\n vertices = self._data_view.displayed_vertices[\n self._data_view.displayed_index == index\n ]\n if len(vertices) <= 2:\n self._data_view.remove(index)\n else:\n data_full = self.expand_shape(vertices)\n self._data_view.edit(index, data_full[:-1])\n if self._is_creating is True and self._mode == Mode.ADD_POLYGON:\n vertices = self._data_view.displayed_vertices[\n self._data_view.displayed_index == index\n ]\n if len(vertices) <= 2:\n self._data_view.remove(index)\n self._is_creating = False\n self._update_dims()\n\n def _update_thumbnail(self):\n \"\"\"Update thumbnail with current points and colors.\"\"\"\n # calculate min vals for the vertices and pad with 0.5\n # the offset is needed to ensure that the top left corner of the shapes\n # corresponds to the top left corner of the thumbnail\n offset = (\n np.array([self.dims.range[d][0] for d in self.dims.displayed])\n + 0.5\n )\n # calculate range of values for the vertices and pad with 1\n # padding ensures the entire shape can be represented in the thumbnail\n # without getting clipped\n shape = np.ceil(\n [\n self.dims.range[d][1] - self.dims.range[d][0] + 1\n for d in self.dims.displayed\n ]\n ).astype(int)\n zoom_factor = np.divide(self._thumbnail_shape[:2], shape[-2:]).min()\n\n colormapped = self._data_view.to_colors(\n colors_shape=self._thumbnail_shape[:2],\n zoom_factor=zoom_factor,\n offset=offset[-2:],\n )\n\n self.thumbnail = colormapped\n\n def remove_selected(self):\n \"\"\"Remove any selected shapes.\"\"\"\n to_remove = sorted(self.selected_data, reverse=True)\n for index in to_remove:\n self._data_view.remove(index)\n self.selected_data = []\n self._finish_drawing()\n\n def _rotate_box(self, angle, center=[0, 0]):\n \"\"\"Perfrom a rotation on the selected box.\n\n Parameters\n ----------\n angle : float\n angle specifying rotation of shapes in degrees.\n center : list\n coordinates of center of rotation.\n \"\"\"\n theta = np.radians(angle)\n transform = np.array(\n [[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]\n )\n box = self._selected_box - center\n self._selected_box = box @ transform.T + center\n\n def _scale_box(self, scale, center=[0, 0]):\n \"\"\"Perfrom a scaling on the selected box.\n\n Parameters\n ----------\n scale : float, list\n scalar or list specifying rescaling of shape.\n center : list\n coordinates of center of rotation.\n \"\"\"\n if not isinstance(scale, (list, np.ndarray)):\n scale = [scale, scale]\n box = self._selected_box - center\n box = np.array(box * scale)\n if not np.all(box[Box.TOP_CENTER] == box[Box.HANDLE]):\n r = self._rotation_handle_length * self.scale_factor\n handle_vec = box[Box.HANDLE] - box[Box.TOP_CENTER]\n cur_len = np.linalg.norm(handle_vec)\n box[Box.HANDLE] = box[Box.TOP_CENTER] + r * handle_vec / cur_len\n self._selected_box = box + center\n\n def _transform_box(self, transform, center=[0, 0]):\n \"\"\"Perfrom a linear transformation on the selected box.\n\n Parameters\n ----------\n transform : np.ndarray\n 2x2 array specifying linear transform.\n center : list\n coordinates of center of rotation.\n \"\"\"\n box = self._selected_box - center\n box = box @ transform.T\n if not np.all(box[Box.TOP_CENTER] == box[Box.HANDLE]):\n r = self._rotation_handle_length * self.scale_factor\n handle_vec = box[Box.HANDLE] - box[Box.TOP_CENTER]\n cur_len = np.linalg.norm(handle_vec)\n box[Box.HANDLE] = box[Box.TOP_CENTER] + r * handle_vec / cur_len\n self._selected_box = box + center\n\n def expand_shape(self, data):\n \"\"\"Expand shape from 2D to the full data dims.\n\n Parameters\n --------\n data : array\n 2D data array of shape to be expanded.\n\n Returns\n --------\n data_full : array\n Full D dimensional data array of the shape.\n \"\"\"\n if self.ndim == 2:\n data_full = data[:, self.dims.displayed_order]\n else:\n data_full = np.zeros((len(data), self.ndim), dtype=float)\n indices = np.array(self.dims.indices)\n data_full[:, self.dims.not_displayed] = indices[\n self.dims.not_displayed\n ]\n data_full[:, self.dims.displayed] = data\n\n return data_full\n\n def get_value(self):\n \"\"\"Determine if any shape at given coord using triangle meshes.\n\n Getting value is not supported yet for 3D meshes\n\n Returns\n ----------\n shape : int | None\n Index of shape if any that is at the coordinates. Returns `None`\n if no shape is found.\n vertex : int | None\n Index of vertex if any that is at the coordinates. Returns `None`\n if no vertex is found.\n \"\"\"\n if self.dims.ndisplay == 3:\n return (None, None)\n\n if self._is_moving:\n return self._moving_value\n\n coord = [self.coordinates[i] for i in self.dims.displayed]\n\n # Check selected shapes\n value = None\n if len(self.selected_data) > 0:\n if self._mode == Mode.SELECT:\n # Check if inside vertex of interaction box or rotation handle\n box = self._selected_box[Box.WITH_HANDLE]\n distances = abs(box - coord)\n\n # Get the vertex sizes\n sizes = self._vertex_size * self.scale_factor / 2\n\n # Check if any matching vertices\n matches = np.all(distances <= sizes, axis=1).nonzero()\n if len(matches[0]) > 0:\n value = (self.selected_data[0], matches[0][-1])\n elif self._mode in (\n [Mode.DIRECT, Mode.VERTEX_INSERT, Mode.VERTEX_REMOVE]\n ):\n # Check if inside vertex of shape\n inds = np.isin(\n self._data_view.displayed_index, self.selected_data\n )\n vertices = self._data_view.displayed_vertices[inds]\n distances = abs(vertices - coord)\n\n # Get the vertex sizes\n sizes = self._vertex_size * self.scale_factor / 2\n\n # Check if any matching vertices\n matches = np.all(distances <= sizes, axis=1).nonzero()[0]\n if len(matches) > 0:\n index = inds.nonzero()[0][matches[-1]]\n shape = self._data_view.displayed_index[index]\n vals, idx = np.unique(\n self._data_view.displayed_index, return_index=True\n )\n shape_in_list = list(vals).index(shape)\n value = (shape, index - idx[shape_in_list])\n\n if value is None:\n # Check if mouse inside shape\n shape = self._data_view.inside(coord)\n value = (shape, None)\n\n return value\n\n def move_to_front(self):\n \"\"\"Moves selected objects to be displayed in front of all others.\"\"\"\n if len(self.selected_data) == 0:\n return\n new_z_index = max(self._data_view._z_index) + 1\n for index in self.selected_data:\n self._data_view.update_z_index(index, new_z_index)\n self._set_view_slice()\n\n def move_to_back(self):\n \"\"\"Moves selected objects to be displayed behind all others.\"\"\"\n if len(self.selected_data) == 0:\n return\n new_z_index = min(self._data_view._z_index) - 1\n for index in self.selected_data:\n self._data_view.update_z_index(index, new_z_index)\n self._set_view_slice()\n\n def _copy_data(self):\n \"\"\"Copy selected shapes to clipboard.\"\"\"\n if len(self.selected_data) > 0:\n self._clipboard = {\n 'data': [\n deepcopy(self._data_view.shapes[i])\n for i in self._selected_data\n ],\n 'indices': self.dims.indices,\n }\n else:\n self._clipboard = {}\n\n def _paste_data(self):\n \"\"\"Paste any shapes from clipboard and then selects them.\"\"\"\n cur_shapes = self.nshapes\n if len(self._clipboard.keys()) > 0:\n # Calculate offset based on dimension shifts\n offset = [\n self.dims.indices[i] - self._clipboard['indices'][i]\n for i in self.dims.not_displayed\n ]\n\n # Add new shape data\n for s in self._clipboard['data']:\n shape = deepcopy(s)\n data = copy(shape.data)\n data[:, self.dims.not_displayed] = data[\n :, self.dims.not_displayed\n ] + np.array(offset)\n shape.data = data\n self._data_view.add(shape)\n\n self.selected_data = list(\n range(cur_shapes, cur_shapes + len(self._clipboard['data']))\n )\n self.move_to_front()\n\n def _move(self, coord):\n \"\"\"Moves object at given mouse position and set of indices.\n\n Parameters\n ----------\n coord : sequence of two int\n Position of mouse cursor in image coordinates.\n \"\"\"\n vertex = self._moving_value[1]\n if self._mode in (\n [Mode.SELECT, Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]\n ):\n if len(self.selected_data) > 0:\n self._is_moving = True\n if vertex is None:\n # Check where dragging box from to move whole object\n if self._drag_start is None:\n center = self._selected_box[Box.CENTER]\n self._drag_start = coord - center\n center = self._selected_box[Box.CENTER]\n shift = coord - center - self._drag_start\n for index in self.selected_data:\n self._data_view.shift(index, shift)\n self._selected_box = self._selected_box + shift\n self._set_view_slice()\n elif vertex < Box.LEN:\n # Corner / edge vertex is being dragged so resize object\n box = self._selected_box\n if self._fixed_vertex is None:\n self._fixed_index = (vertex + 4) % Box.LEN\n self._fixed_vertex = box[self._fixed_index]\n\n size = (\n box[(self._fixed_index + 4) % Box.LEN]\n - box[self._fixed_index]\n )\n offset = box[Box.HANDLE] - box[Box.CENTER]\n offset = offset / np.linalg.norm(offset)\n offset_perp = np.array([offset[1], -offset[0]])\n\n fixed = self._fixed_vertex\n new = list(coord)\n\n if self._fixed_aspect and self._fixed_index % 2 == 0:\n if (new - fixed)[0] == 0:\n ratio = 1\n else:\n ratio = abs((new - fixed)[1] / (new - fixed)[0])\n if ratio > self._aspect_ratio:\n r = self._aspect_ratio / ratio\n new[1] = fixed[1] + (new[1] - fixed[1]) * r\n else:\n r = ratio / self._aspect_ratio\n new[0] = fixed[0] + (new[0] - fixed[0]) * r\n\n if size @ offset == 0:\n dist = 1\n else:\n dist = ((new - fixed) @ offset) / (size @ offset)\n\n if size @ offset_perp == 0:\n dist_perp = 1\n else:\n dist_perp = ((new - fixed) @ offset_perp) / (\n size @ offset_perp\n )\n\n if self._fixed_index % 2 == 0:\n # corner selected\n scale = np.array([dist_perp, dist])\n elif self._fixed_index % 4 == 3:\n # top selected\n scale = np.array([1, dist])\n else:\n # side selected\n scale = np.array([dist_perp, 1])\n\n # prevent box from shrinking below a threshold size\n threshold = self._vertex_size * self.scale_factor / 8\n scale[abs(scale * size[[1, 0]]) < threshold] = 1\n\n # check orientation of box\n angle = -np.arctan2(offset[0], -offset[1])\n c, s = np.cos(angle), np.sin(angle)\n if angle == 0:\n for index in self.selected_data:\n self._data_view.scale(\n index, scale, center=self._fixed_vertex\n )\n self._scale_box(scale, center=self._fixed_vertex)\n else:\n rotation = np.array([[c, s], [-s, c]])\n scale_mat = np.array([[scale[0], 0], [0, scale[1]]])\n inv_rot = np.array([[c, -s], [s, c]])\n transform = rotation @ scale_mat @ inv_rot\n for index in self.selected_data:\n self._data_view.shift(index, -self._fixed_vertex)\n self._data_view.transform(index, transform)\n self._data_view.shift(index, self._fixed_vertex)\n self._transform_box(\n transform, center=self._fixed_vertex\n )\n self._set_view_slice()\n elif vertex == 8:\n # Rotation handle is being dragged so rotate object\n handle = self._selected_box[Box.HANDLE]\n if self._drag_start is None:\n self._fixed_vertex = self._selected_box[Box.CENTER]\n offset = handle - self._fixed_vertex\n self._drag_start = -np.degrees(\n np.arctan2(offset[0], -offset[1])\n )\n\n new_offset = coord - self._fixed_vertex\n new_angle = -np.degrees(\n np.arctan2(new_offset[0], -new_offset[1])\n )\n fixed_offset = handle - self._fixed_vertex\n fixed_angle = -np.degrees(\n np.arctan2(fixed_offset[0], -fixed_offset[1])\n )\n\n if np.linalg.norm(new_offset) < 1:\n angle = 0\n elif self._fixed_aspect:\n angle = np.round(new_angle / 45) * 45 - fixed_angle\n else:\n angle = new_angle - fixed_angle\n\n for index in self.selected_data:\n self._data_view.rotate(\n index, angle, center=self._fixed_vertex\n )\n self._rotate_box(angle, center=self._fixed_vertex)\n self._set_view_slice()\n else:\n self._is_selecting = True\n if self._drag_start is None:\n self._drag_start = coord\n self._drag_box = np.array([self._drag_start, coord])\n self._set_highlight()\n elif self._mode in [Mode.DIRECT, Mode.ADD_PATH, Mode.ADD_POLYGON]:\n if len(self.selected_data) > 0:\n if vertex is not None:\n self._is_moving = True\n index = self._moving_value[0]\n shape_type = type(self._data_view.shapes[index])\n if shape_type == Ellipse:\n # DIRECT vertex moving of ellipse not implemented\n pass\n else:\n if shape_type == Rectangle:\n new_type = Polygon\n else:\n new_type = None\n indices = self._data_view.displayed_index == index\n vertices = self._data_view.displayed_vertices[indices]\n vertices[vertex] = coord\n data_full = self.expand_shape(vertices)\n self._data_view.edit(\n index, data_full, new_type=new_type\n )\n shapes = self.selected_data\n self._selected_box = self.interaction_box(shapes)\n self._set_view_slice()\n else:\n self._is_selecting = True\n if self._drag_start is None:\n self._drag_start = coord\n self._drag_box = np.array([self._drag_start, coord])\n self._set_highlight()\n elif self._mode in [Mode.VERTEX_INSERT, Mode.VERTEX_REMOVE]:\n if len(self.selected_data) > 0:\n pass\n else:\n self._is_selecting = True\n if self._drag_start is None:\n self._drag_start = coord\n self._drag_box = np.array([self._drag_start, coord])\n self._set_highlight()\n\n def to_xml_list(self):\n \"\"\"Convert the shapes to a list of svg xml elements.\n\n Z ordering of the shapes will be taken into account.\n\n Returns\n ----------\n xml : list\n List of xml elements defining each shape according to the\n svg specification\n \"\"\"\n return self._data_view.to_xml_list()\n\n def to_masks(self, mask_shape=None):\n \"\"\"Return an array of binary masks, one for each shape.\n\n Parameters\n ----------\n mask_shape : np.ndarray | tuple | None\n tuple defining shape of mask to be generated. If non specified,\n takes the max of all the vertiecs\n\n Returns\n ----------\n masks : np.ndarray\n Array where there is one binary mask for each shape\n \"\"\"\n if mask_shape is None:\n mask_shape = self.shape\n\n mask_shape = np.ceil(mask_shape).astype('int')\n masks = self._data_view.to_masks(mask_shape=mask_shape)\n\n return masks\n\n def to_labels(self, labels_shape=None):\n \"\"\"Return an integer labels image.\n\n Parameters\n ----------\n labels_shape : np.ndarray | tuple | None\n Tuple defining shape of labels image to be generated. If non\n specified, takes the max of all the vertiecs\n\n Returns\n ----------\n labels : np.ndarray\n Integer array where each value is either 0 for background or an\n integer up to N for points inside the shape at the index value - 1.\n For overlapping shapes z-ordering will be respected.\n \"\"\"\n if labels_shape is None:\n labels_shape = self.shape\n\n labels_shape = np.ceil(labels_shape).astype('int')\n labels = self._data_view.to_labels(labels_shape=labels_shape)\n\n return labels\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n\n Parameters\n ----------\n event : Event\n Vispy event\n \"\"\"\n coord = [self.coordinates[i] for i in self.dims.displayed]\n shift = 'Shift' in event.modifiers\n\n if self._mode == Mode.PAN_ZOOM:\n # If in pan/zoom mode do nothing\n pass\n elif self._mode in [Mode.SELECT, Mode.DIRECT]:\n if not self._is_moving and not self._is_selecting:\n self._moving_value = copy(self._value)\n if self._value[1] is None:\n if shift and self._value[0] is not None:\n if self._value[0] in self.selected_data:\n self.selected_data.remove(self._value[0])\n shapes = self.selected_data\n self._selected_box = self.interaction_box(shapes)\n else:\n self.selected_data.append(self._value[0])\n shapes = self.selected_data\n self._selected_box = self.interaction_box(shapes)\n elif self._value[0] is not None:\n if self._value[0] not in self.selected_data:\n self.selected_data = [self._value[0]]\n else:\n self.selected_data = []\n self._set_highlight()\n elif self._mode in (\n [Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]\n ):\n # Start drawing a rectangle / ellipse / line\n size = self._vertex_size * self.scale_factor / 4\n corner = np.array(coord)\n if self._mode == Mode.ADD_RECTANGLE:\n data = np.array(\n [\n corner,\n corner + [size, 0],\n corner + size,\n corner + [0, size],\n ]\n )\n shape_type = 'rectangle'\n elif self._mode == Mode.ADD_ELLIPSE:\n data = np.array(\n [\n corner,\n corner + [size, 0],\n corner + size,\n corner + [0, size],\n ]\n )\n shape_type = 'ellipse'\n elif self._mode == Mode.ADD_LINE:\n data = np.array([corner, corner + size])\n shape_type = 'line'\n data_full = self.expand_shape(data)\n self.add(data_full, shape_type=shape_type)\n self.selected_data = [self.nshapes - 1]\n self._value = (self.selected_data[0], 4)\n self._moving_value = copy(self._value)\n self._is_creating = True\n self._set_view_slice()\n elif self._mode in [Mode.ADD_PATH, Mode.ADD_POLYGON]:\n if self._is_creating is False:\n # Start drawing a path\n data = np.array([coord, coord])\n data_full = self.expand_shape(data)\n self.add(data_full, shape_type='path')\n self.selected_data = [self.nshapes - 1]\n self._value = (self.selected_data[0], 1)\n self._moving_value = copy(self._value)\n self._is_creating = True\n self._set_highlight()\n else:\n # Add to an existing path or polygon\n index = self._moving_value[0]\n if self._mode == Mode.ADD_POLYGON:\n new_type = Polygon\n else:\n new_type = None\n vertices = self._data_view.displayed_vertices[\n self._data_view.displayed_index == index\n ]\n vertices = np.concatenate((vertices, [coord]), axis=0)\n # Change the selected vertex\n self._value = (self._value[0], self._value[1] + 1)\n self._moving_value = copy(self._value)\n data_full = self.expand_shape(vertices)\n self._data_view.edit(index, data_full, new_type=new_type)\n self._selected_box = self.interaction_box(self.selected_data)\n elif self._mode == Mode.VERTEX_INSERT:\n if len(self.selected_data) == 0:\n # If none selected return immediately\n return\n\n all_lines = np.empty((0, 2, 2))\n all_lines_shape = np.empty((0, 2), dtype=int)\n for index in self.selected_data:\n shape_type = type(self._data_view.shapes[index])\n if shape_type == Ellipse:\n # Adding vertex to ellipse not implemented\n pass\n else:\n vertices = self._data_view.displayed_vertices[\n self._data_view.displayed_index == index\n ]\n # Find which edge new vertex should inserted along\n closed = shape_type != Path\n n = len(vertices)\n if closed:\n lines = np.array(\n [\n [vertices[i], vertices[(i + 1) % n]]\n for i in range(n)\n ]\n )\n else:\n lines = np.array(\n [\n [vertices[i], vertices[i + 1]]\n for i in range(n - 1)\n ]\n )\n all_lines = np.append(all_lines, lines, axis=0)\n indices = np.array(\n [np.repeat(index, len(lines)), list(range(len(lines)))]\n ).T\n all_lines_shape = np.append(\n all_lines_shape, indices, axis=0\n )\n if len(all_lines) == 0:\n # No appropriate shapes found\n return\n ind, loc = point_to_lines(coord, all_lines)\n index = all_lines_shape[ind][0]\n ind = all_lines_shape[ind][1] + 1\n shape_type = type(self._data_view.shapes[index])\n if shape_type == Line:\n # Adding vertex to line turns it into a path\n new_type = Path\n elif shape_type == Rectangle:\n # Adding vertex to rectangle turns it into a polygon\n new_type = Polygon\n else:\n new_type = None\n closed = shape_type != Path\n vertices = self._data_view.displayed_vertices[\n self._data_view.displayed_index == index\n ]\n if closed is not True:\n if int(ind) == 1 and loc < 0:\n ind = 0\n elif int(ind) == len(vertices) - 1 and loc > 1:\n ind = ind + 1\n\n vertices = np.insert(vertices, ind, [coord], axis=0)\n with self.events.set_data.blocker():\n data_full = self.expand_shape(vertices)\n self._data_view.edit(index, data_full, new_type=new_type)\n self._selected_box = self.interaction_box(self.selected_data)\n self._set_view_slice()\n elif self._mode == Mode.VERTEX_REMOVE:\n if self._value[1] is not None:\n # have clicked on a current vertex so remove\n index = self._value[0]\n shape_type = type(self._data_view.shapes[index])\n if shape_type == Ellipse:\n # Removing vertex from ellipse not implemented\n return\n vertices = self._data_view.displayed_vertices[\n self._data_view.displayed_index == index\n ]\n if len(vertices) <= 2:\n # If only 2 vertices present, remove whole shape\n with self.events.set_data.blocker():\n if index in self.selected_data:\n self.selected_data.remove(index)\n self._data_view.remove(index)\n shapes = self.selected_data\n self._selected_box = self.interaction_box(shapes)\n elif shape_type == Polygon and len(vertices) == 3:\n # If only 3 vertices of a polygon present remove\n with self.events.set_data.blocker():\n if index in self.selected_data:\n self.selected_data.remove(index)\n self._data_view.remove(index)\n shapes = self.selected_data\n self._selected_box = self.interaction_box(shapes)\n else:\n if shape_type == Rectangle:\n # Deleting vertex from a rectangle creates a polygon\n new_type = Polygon\n else:\n new_type = None\n # Remove clicked on vertex\n vertices = np.delete(vertices, self._value[1], axis=0)\n with self.events.set_data.blocker():\n data_full = self.expand_shape(vertices)\n self._data_view.edit(\n index, data_full, new_type=new_type\n )\n shapes = self.selected_data\n self._selected_box = self.interaction_box(shapes)\n self._set_view_slice()\n else:\n raise ValueError(\"Mode not recongnized\")\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\n\n Parameters\n ----------\n event : Event\n Vispy event\n \"\"\"\n coord = [self.coordinates[i] for i in self.dims.displayed]\n\n if self._mode == Mode.PAN_ZOOM:\n # If in pan/zoom mode just pass\n pass\n elif self._mode == Mode.SELECT:\n if event.is_dragging:\n # Drag any selected shapes\n self._move(coord)\n elif self._is_moving:\n pass\n elif self._is_selecting:\n pass\n else:\n # Highlight boxes\n self._set_highlight()\n elif self._mode == Mode.DIRECT:\n if event.is_dragging:\n # Drag any selected shapes\n self._move(coord)\n elif self._is_moving:\n pass\n elif self._is_selecting:\n pass\n else:\n # Highlight boxes\n self._set_highlight()\n elif self._mode in (\n [Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]\n ):\n # While drawing a shape or doing nothing\n if self._is_creating and event.is_dragging:\n # Drag any selected shapes\n self._move(coord)\n elif self._mode in [Mode.ADD_PATH, Mode.ADD_POLYGON]:\n # While drawing a path or doing nothing\n if self._is_creating:\n # Drag any selected shapes\n self._move(coord)\n elif self._mode in [Mode.VERTEX_INSERT, Mode.VERTEX_REMOVE]:\n self._set_highlight()\n else:\n raise ValueError(\"Mode not recongnized\")\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n\n Parameters\n ----------\n event : Event\n Vispy event\n \"\"\"\n shift = 'Shift' in event.modifiers\n\n if self._mode == Mode.PAN_ZOOM:\n # If in pan/zoom mode do nothing\n pass\n elif self._mode == Mode.SELECT:\n if not self._is_moving and not self._is_selecting and not shift:\n if self._value[0] is not None:\n self.selected_data = [self._value[0]]\n else:\n self.selected_data = []\n elif self._is_selecting:\n self.selected_data = self._data_view.shapes_in_box(\n self._drag_box\n )\n self._is_selecting = False\n self._set_highlight()\n self._is_moving = False\n self._drag_start = None\n self._drag_box = None\n self._fixed_vertex = None\n self._moving_value = (None, None)\n self._set_highlight()\n self._update_thumbnail()\n elif self._mode == Mode.DIRECT:\n if not self._is_moving and not self._is_selecting and not shift:\n if self._value[0] is not None:\n self.selected_data = [self._value[0]]\n else:\n self.selected_data = []\n elif self._is_selecting:\n self.selected_data = self._data_view.shapes_in_box(\n self._drag_box\n )\n self._is_selecting = False\n self._set_highlight()\n self._is_moving = False\n self._drag_start = None\n self._drag_box = None\n self._fixed_vertex = None\n self._moving_value = (None, None)\n self._set_highlight()\n self._update_thumbnail()\n elif self._mode in (\n [Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]\n ):\n self._finish_drawing()\n elif self._mode in (\n [\n Mode.ADD_PATH,\n Mode.ADD_POLYGON,\n Mode.VERTEX_INSERT,\n Mode.VERTEX_REMOVE,\n ]\n ):\n pass\n else:\n raise ValueError(\"Mode not recongnized\")\n"
] |
[
[
"numpy.radians",
"numpy.all",
"numpy.max",
"numpy.arctan2",
"numpy.concatenate",
"numpy.round",
"numpy.divide",
"numpy.unique",
"numpy.sin",
"numpy.ceil",
"numpy.insert",
"numpy.isin",
"numpy.min",
"numpy.append",
"numpy.delete",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"numpy.isscalar",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iriproff/AuditoryCoding
|
[
"cbd474e18514bdccd651ed639f6d5dd05170814b"
] |
[
"gammatone_utils.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Rachid and Kimia\n\"\"\"\n\n\nimport numpy as np\n\n\ndef gammatone_function(resolution, fc, center, fs=16000, n=4, b=1.019):\n \"\"\"Define a single gammatone function\"\"\"\n t = np.linspace(0, resolution-(center+1), resolution-center)/fs\n g = np.zeros((resolution,))\n g[center:] = t**(n-1) * np.exp(-2*np.pi*b*erb(fc)*t)*np.cos(2*np.pi*fc*t)\n return g\n\n\ndef gammatone_matrix(b, fc, resolution, step, fs=16000, n=4, threshold=5):\n \"\"\"Dictionary of gammatone functions\"\"\"\n centers = np.arange(0, resolution - step, step)\n D = []\n for i, center in enumerate(centers):\n t = np.linspace(0, resolution-(center+1), resolution-center)/fs\n env = t**(n-1) * np.exp(-2*np.pi*b*erb(fc)*t)\n if env[-1]/max(env) < threshold:\n D.append(gammatone_function(resolution, fc, center, b=b, n=n))\n D = np.asarray(D)\n D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]\n freq_c = np.array(fc*np.ones(D.shape[0]))\n return D, freq_c, centers\n\n\ndef erb(f):\n \"\"\"Equivalent rectangular bandwidth formula\"\"\"\n return 24.7+0.108*f\n\n\ndef erb_space(low_freq, high_freq, num_channels=1, EarQ=9.26449, minBW=24.7, order=1):\n \"\"\"Generates sequence of critical (center) frequencies\"\"\"\n return -(EarQ*minBW) + np.exp(np.arange(1, num_channels+1)*(-np.log(high_freq + EarQ*minBW) + np.log(low_freq + EarQ*minBW))/num_channels) * (high_freq + EarQ*minBW)"
] |
[
[
"numpy.log",
"numpy.linspace",
"numpy.asarray",
"numpy.arange",
"numpy.cos",
"numpy.ones",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
proloyd/Eelbrain
|
[
"5606b9dffaa3aec2d58c1f14e6c3403a0fed4537",
"5606b9dffaa3aec2d58c1f14e6c3403a0fed4537",
"5606b9dffaa3aec2d58c1f14e6c3403a0fed4537"
] |
[
"eelbrain/_stats/tests/test_testnd.py",
"eelbrain/tests/test_design.py",
"eelbrain/_experiment/mne_experiment.py"
] |
[
"# Author: Christian Brodbeck <[email protected]>\nfrom itertools import product\nimport pickle\nimport logging\nimport pytest\nimport sys\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_allclose\n\nimport eelbrain\nfrom eelbrain import Dataset, NDVar, Categorial, Scalar, UTS, Sensor, configure, datasets, test, testnd, set_log_level, cwt_morlet\nfrom eelbrain._exceptions import WrongDimension, ZeroVariance\nfrom eelbrain._stats.testnd import Connectivity, NDPermutationDistribution, label_clusters, _MergedTemporalClusterDist, find_peaks, VectorDifferenceIndependent\nfrom eelbrain._utils.system import IS_WINDOWS\nfrom eelbrain.fmtxt import asfmtext\nfrom eelbrain.testing import assert_dataobj_equal, assert_dataset_equal, requires_mne_sample_data\n\n\ndef test_anova():\n \"Test testnd.anova()\"\n ds = datasets.get_uts(True, nrm=True)\n\n testnd.anova('utsnd', 'A*B', ds=ds)\n for samples in (0, 2):\n logging.info(\"TEST: samples=%r\" % samples)\n testnd.anova('utsnd', 'A*B', ds=ds, samples=samples)\n testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05)\n res = testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True)\n assert res._plot_model() == 'A%B'\n asfmtext(res)\n\n res = testnd.anova('utsnd', 'A*B*rm', match=False, ds=ds, samples=0, pmin=0.05)\n assert repr(res) == \"<anova 'utsnd', 'A*B*rm', match=False, samples=0, pmin=0.05, 'A': 17 clusters, 'B': 20 clusters, 'A x B': 22 clusters>\"\n assert res._plot_model() == 'A%B'\n res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=2, pmin=0.05)\n assert res.match == 'rm'\n assert repr(res) == \"<anova 'utsnd', 'A*B*rm', match='rm', samples=2, pmin=0.05, 'A': 17 clusters, p < .001, 'B': 20 clusters, p < .001, 'A x B': 22 clusters, p < .001>\"\n assert res._plot_model() == 'A%B'\n\n # persistence\n string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert repr(res_) == repr(res)\n assert res_._plot_model() == 'A%B'\n\n # threshold-free\n res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10)\n assert res.match == 'rm'\n assert repr(res) == \"<anova 'utsnd', 'A*B*rm', match='rm', samples=10, 'A': p < .001, 'B': p < .001, 'A x B': p < .001>\"\n assert 'A clusters' in res.clusters.info\n assert 'B clusters' in res.clusters.info\n assert 'A x B clusters' in res.clusters.info\n\n # no clusters\n res = testnd.anova('uts', 'B', sub=\"A=='a1'\", ds=ds, samples=5, pmin=0.05, mintime=0.02)\n repr(res)\n assert 'v' in res.clusters\n assert 'p' in res.clusters\n assert res._plot_model() == 'B'\n\n # all effects with clusters\n res = testnd.anova('uts', 'A*B*rm', match=False, ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02)\n assert set(res.clusters['effect'].cells) == set(res.effects)\n\n # some effects with clusters, some without\n res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02)\n assert res.match == 'rm'\n string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert_dataobj_equal(res.clusters, res_.clusters)\n\n # test multi-effect results (with persistence)\n # UTS\n res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5)\n assert res.match == 'rm'\n repr(res)\n string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)\n resr = pickle.loads(string)\n tf_clusters = resr.find_clusters(pmin=0.05)\n peaks = resr.find_peaks()\n assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05))\n assert_dataobj_equal(peaks, res.find_peaks())\n assert tf_clusters.eval(\"p.min()\") == peaks.eval(\"p.min()\")\n unmasked = resr.f[0]\n masked = resr.masked_parameter_map(effect=0, pmin=0.05)\n assert_array_equal(masked.x <= unmasked.x, True)\n\n # reproducibility\n decimal = 12 if IS_WINDOWS else None # FIXME: why is Windows sometimes different???\n res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5)\n res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5)\n assert_dataset_equal(res.clusters, res0.clusters, decimal=decimal)\n configure(n_workers=0)\n res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5)\n assert_dataset_equal(res.clusters, res0.clusters, decimal=decimal)\n configure(n_workers=True)\n\n # permutation\n eelbrain._stats.permutation._YIELD_ORIGINAL = 1\n samples = 4\n # raw\n res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples)\n for dist in res._cdist:\n assert len(dist.dist) == samples\n assert_array_equal(dist.dist, dist.parameter_map.abs().max())\n # TFCE\n res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples)\n for dist in res._cdist:\n assert len(dist.dist) == samples\n assert_array_equal(dist.dist, dist.tfce_map.abs().max())\n # thresholded\n res1 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples)\n clusters = res1.find_clusters()\n for dist, effect in zip(res1._cdist, res1.effects):\n effect_idx = clusters.eval(\"effect == %r\" % effect)\n vmax = clusters[effect_idx, 'v'].abs().max()\n assert len(dist.dist) == samples\n assert_array_equal(dist.dist, vmax)\n eelbrain._stats.permutation._YIELD_ORIGINAL = 0\n\n # 1d TFCE\n configure(n_workers=0)\n res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples)\n configure(n_workers=True)\n\n # zero variance\n res2 = testnd.anova('utsnd', 'A', ds=ds)\n ds['utsnd'].x[:, 1, 10] = 0.\n zero_var = ds['utsnd'].var('case') == 0\n zv_index = tuple(i[0] for i in zero_var.nonzero())\n res1_zv = testnd.anova('utsnd', 'A*B*rm', ds=ds)\n res2_zv = testnd.anova('utsnd', 'A', ds=ds)\n for res, res_zv in ((res1, res1_zv), (res2, res2_zv)):\n for f, f_zv in zip(res.f, res_zv.f):\n assert_array_equal((f_zv == 0).x, zero_var.x)\n assert f_zv[zv_index] == 0\n f_zv[zv_index] = f[zv_index]\n assert_dataobj_equal(f_zv, f, decimal=decimal)\n\n # nested random effect\n res = testnd.anova('uts', 'A * B * nrm(A)', ds=ds, samples=10, tstart=.4)\n assert res.match == 'nrm(A)'\n assert [p.min() for p in res.p] == [0.0, 0.6, 0.9]\n\n # unequal argument length\n with pytest.raises(ValueError):\n testnd.anova('uts', 'A[:-1]', ds=ds)\n with pytest.raises(ValueError):\n testnd.anova('uts[:-1]', 'A * B * nrm(A)', ds=ds)\n\n\ndef test_anova_incremental():\n \"Test testnd.anova() with incremental f-tests\"\n ds = datasets.get_uts()\n testnd.anova('uts', 'A*B', ds=ds[3:], pmin=0.05, samples=10)\n\n\n@requires_mne_sample_data\ndef test_anova_parc():\n \"Test ANOVA with parc argument and source space data\"\n set_log_level('warning', 'mne')\n ds = datasets.get_mne_sample(src='ico', sub=\"side.isin(('L', 'R'))\")\n y = ds['src'].sub(source=('lateraloccipital-lh', 'cuneus-lh'))\n y1 = y.sub(source='lateraloccipital-lh')\n y2 = y.sub(source='cuneus-lh')\n kwa = dict(ds=ds, tstart=0.2, tstop=0.3, samples=100)\n\n resp = testnd.anova(y, \"side*modality\", pmin=0.05, parc='source', **kwa)\n c1p = resp.find_clusters(source='lateraloccipital-lh')\n c2p = resp.find_clusters(source='cuneus-lh')\n del c1p['p_parc', 'id']\n del c2p['p_parc', 'id']\n res1 = testnd.anova(y1, \"side*modality\", pmin=0.05, **kwa)\n c1 = res1.find_clusters()\n del c1['id']\n res2 = testnd.anova(y2, \"side*modality\", pmin=0.05, **kwa)\n c2 = res2.find_clusters()\n del c2['id']\n assert_dataset_equal(c1p, c1)\n assert_dataset_equal(c2p, c2)\n assert_array_equal(c2['p'], [0.85, 0.88, 0.97, 0.75, 0.99, 0.99, 0.98, 0.0,\n 0.12, 0.88, 0.25, 0.97, 0.34, 0.96])\n\n # without multiprocessing\n configure(n_workers=0)\n ress = testnd.anova(y, \"side*modality\", pmin=0.05, parc='source', **kwa)\n c1s = ress.find_clusters(source='lateraloccipital-lh')\n c2s = ress.find_clusters(source='cuneus-lh')\n del c1s['p_parc', 'id']\n del c2s['p_parc', 'id']\n assert_dataset_equal(c1s, c1)\n assert_dataset_equal(c2s, c2)\n configure(n_workers=True)\n\n # parc but single label\n resp2 = testnd.anova(y2, \"side*modality\", pmin=0.05, parc='source', **kwa)\n c2sp = resp2.find_clusters(source='cuneus-lh')\n del c2sp['p_parc', 'id']\n assert_dataset_equal(c2sp, c2)\n\n # not defined\n with pytest.raises(NotImplementedError):\n testnd.anova(y, \"side*modality\", tfce=True, parc='source', **kwa)\n\n\ndef test_clusterdist():\n \"Test NDPermutationDistribution class\"\n shape = (10, 6, 6, 4)\n locs = [[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0]]\n x = np.random.normal(0, 1, shape)\n sensor = Sensor(locs, ['0', '1', '2', '3'])\n sensor.set_connectivity(connect_dist=1.1)\n dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6), 'unit'),\n sensor)\n y = NDVar(x, dims)\n\n # test connecting sensors\n logging.info(\"TEST: connecting sensors\")\n bin_map = np.zeros(shape[1:], dtype=np.bool8)\n bin_map[:3, :3, :2] = True\n pmap = np.random.normal(0, 1, shape[1:])\n np.clip(pmap, -1, 1, pmap)\n pmap[bin_map] = 2\n cdist = NDPermutationDistribution(y, 0, 1.5)\n print(repr(cdist))\n cdist.add_original(pmap)\n print(repr(cdist))\n assert cdist.n_clusters == 1\n assert_array_equal(cdist._original_cluster_map == cdist._cids[0],\n cdist._crop(bin_map).swapaxes(0, cdist._nad_ax))\n assert cdist.parameter_map.dims == y.dims[1:]\n\n # test connecting many sensors\n logging.info(\"TEST: connecting sensors\")\n bin_map = np.zeros(shape[1:], dtype=np.bool8)\n bin_map[:3, :3] = True\n pmap = np.random.normal(0, 1, shape[1:])\n np.clip(pmap, -1, 1, pmap)\n pmap[bin_map] = 2\n cdist = NDPermutationDistribution(y, 0, 1.5)\n cdist.add_original(pmap)\n assert cdist.n_clusters == 1\n assert_array_equal(cdist._original_cluster_map == cdist._cids[0],\n cdist._crop(bin_map).swapaxes(0, cdist._nad_ax))\n\n # test keeping sensors separate\n logging.info(\"TEST: keeping sensors separate\")\n bin_map = np.zeros(shape[1:], dtype=np.bool8)\n bin_map[:3, :3, 0] = True\n bin_map[:3, :3, 2] = True\n pmap = np.random.normal(0, 1, shape[1:])\n np.clip(pmap, -1, 1, pmap)\n pmap[bin_map] = 2\n cdist = NDPermutationDistribution(y, 1, 1.5)\n cdist.add_original(pmap)\n assert cdist.n_clusters == 2\n\n # criteria\n ds = datasets.get_uts(True)\n res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05)\n assert res.clusters['duration'].min() < 0.01\n assert res.clusters['n_sensors'].min() == 1\n res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05,\n mintime=0.02, minsensor=2)\n assert res.clusters['duration'].min() >= 0.02\n assert res.clusters['n_sensors'].min() == 2\n\n # 1d\n res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds,\n samples=0, pmin=0.05)\n assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1))\n\n # TFCE\n logging.info(\"TEST: TFCE\")\n sensor = Sensor(locs, ['0', '1', '2', '3'])\n sensor.set_connectivity(connect_dist=1.1)\n time = UTS(-0.1, 0.1, 4)\n scalar = Scalar('scalar', range(10), 'unit')\n dims = ('case', time, sensor, scalar)\n rng = np.random.RandomState(0)\n y = NDVar(rng.normal(0, 1, (10, 4, 4, 10)), dims)\n cdist = NDPermutationDistribution(y, 3, None)\n cdist.add_original(y.x[0])\n cdist.finalize()\n assert cdist.dist.shape == (3,)\n # I/O\n string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL)\n cdist_ = pickle.loads(string)\n assert repr(cdist_) == repr(cdist)\n # find peaks\n x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [7, 7, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 7, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [5, 7, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 6, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 7, 5, 5, 0, 0],\n [0, 0, 0, 0, 5, 4, 4, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 7, 0, 0, 3, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]])\n tgt = np.equal(x, 7)\n peaks = find_peaks(x, cdist._connectivity)\n logging.debug(' detected: \\n%s' % (peaks.astype(int)))\n logging.debug(' target: \\n%s' % (tgt.astype(int)))\n assert_array_equal(peaks, tgt)\n # testnd permutation result\n res = testnd.ttest_1samp(y, tfce=True, samples=3)\n if sys.version_info[0] == 3:\n target = [96.84232967, 205.83207424, 425.65942084]\n else:\n target = [77.5852307, 119.1976153, 217.6270428]\n assert_allclose(np.sort(res._cdist.dist), target)\n\n # parc with TFCE on unconnected dimension\n configure(False)\n x = rng.normal(0, 1, (10, 5, 2, 4))\n time = UTS(-0.1, 0.1, 5)\n categorial = Categorial('categorial', ('a', 'b'))\n y = NDVar(x, ('case', time, categorial, sensor))\n y0 = NDVar(x[:, :, 0], ('case', time, sensor))\n y1 = NDVar(x[:, :, 1], ('case', time, sensor))\n res = testnd.ttest_1samp(y, tfce=True, samples=3)\n res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial')\n res0 = testnd.ttest_1samp(y0, tfce=True, samples=3)\n res1 = testnd.ttest_1samp(y1, tfce=True, samples=3)\n # cdist\n assert res._cdist.shape == (4, 2, 5)\n # T-maps don't depend on connectivity\n assert_array_equal(res.t.x[:, 0], res0.t.x)\n assert_array_equal(res.t.x[:, 1], res1.t.x)\n assert_array_equal(res_parc.t.x[:, 0], res0.t.x)\n assert_array_equal(res_parc.t.x[:, 1], res1.t.x)\n # TFCE-maps should always be the same because they're unconnected\n assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x)\n assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x)\n assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x)\n assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x)\n # Probability-maps should depend on what is taken into account\n p_a = res0.compute_probability_map().x\n p_b = res1.compute_probability_map().x\n assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a)\n assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b)\n p_parc = res_parc.compute_probability_map()\n assert_array_equal(p_parc.x, res.compute_probability_map().x)\n assert np.all(p_parc.sub(categorial='a').x >= p_a)\n assert np.all(p_parc.sub(categorial='b').x >= p_b)\n configure(True)\n\n\ndef test_corr():\n \"Test testnd.corr()\"\n ds = datasets.get_uts(True)\n\n # add correlation\n Y = ds['Y']\n utsnd = ds['utsnd']\n utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None]\n\n res = testnd.corr('utsnd', 'Y', ds=ds, samples=0)\n assert repr(res) == \"<corr 'utsnd', 'Y', samples=0>\"\n for s, t in product('01234', (0.1, 0.2, 0.35)):\n target = test.Correlation(utsnd.sub(sensor=s, time=t), Y).r\n assert res.r.sub(sensor=s, time=t) == pytest.approx(target)\n res = testnd.corr('utsnd', 'Y', 'rm', ds=ds, samples=0)\n repr(res)\n res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, pmin=0.05)\n repr(res)\n res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, tfce=True)\n repr(res)\n\n # persistence\n string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert repr(res_) == repr(res)\n assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)\n assert_dataobj_equal(res.p, res_.p)\n\n\ndef test_t_contrast():\n ds = datasets.get_uts()\n\n # simple contrast\n res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10, pmin=0.05)\n assert repr(res) == \"<t_contrast_rel 'uts', 'A', 'a1>a0', match='rm', samples=10, pmin=0.05, 7 clusters, p < .001>\"\n res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)\n assert_array_equal(res.t.x, res_.t.x)\n\n # complex contrast\n res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)', 'rm', ds=ds, samples=10, pmin=0.05)\n res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm', ds=ds)\n res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm', ds=ds)\n assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0))\n\n # persistence\n string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert repr(res_) == repr(res)\n assert_dataobj_equal(res.p, res_.p)\n\n # contrast with \"*\"\n res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', ds=ds, tail=1, samples=0)\n\n # zero variance\n ds['uts'].x[:, 10] = 0.\n with pytest.raises(ZeroVariance):\n testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', tail=1, ds=ds, samples=0)\n\n\ndef test_labeling():\n \"Test cluster labeling\"\n shape = (4, 20)\n pmap = np.empty(shape, np.float_)\n edges = np.array([(0, 1), (0, 3), (1, 2), (2, 3)], np.uint32)\n conn = Connectivity((\n Scalar('graph', range(4), connectivity=edges),\n UTS(0, 0.01, 20)))\n criteria = None\n\n # some clusters\n pmap[:] = [[3, 3, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 8, 0, 0, 4, 4, 4, 0, 0, 0, 0, 0, 0, 4, 0],\n [0, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 4, 4],\n [0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 4, 4, 4, 0, 0, 0, 0, 0]]\n cmap, cids = label_clusters(pmap, 2, 0, conn, criteria)\n assert len(cids) == 6\n assert_array_equal(cmap > 0, np.abs(pmap) > 2)\n\n # some other clusters\n pmap[:] = [[4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0],\n [0, 4, 0, 0, 0, 0, 0, 4, 0, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 4, 4, 0, 4, 4, 0, 4, 0, 0, 0, 4, 4, 1, 0, 4, 4, 0, 0],\n [0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0]]\n cmap, cids = label_clusters(pmap, 2, 0, conn, criteria)\n assert len(cids) == 6\n assert_array_equal(cmap > 0, np.abs(pmap) > 2)\n\n\ndef test_ttest_1samp():\n \"Test testnd.ttest_1samp()\"\n ds = datasets.get_uts(True)\n\n # no clusters\n res0 = testnd.ttest_1samp('uts', sub=\"A == 'a0'\", ds=ds, samples=0)\n assert res0.p_uncorrected.min() < 0.05\n assert repr(res0) == \"<ttest_1samp 'uts', sub=\\\"A == 'a0'\\\", samples=0>\"\n\n # sub as array\n res1 = testnd.ttest_1samp('uts', sub=ds.eval(\"A == 'a0'\"), ds=ds, samples=0)\n assert repr(res1) == \"<ttest_1samp 'uts', sub=<array>, samples=0>\"\n\n # clusters without resampling\n res1 = testnd.ttest_1samp('uts', sub=\"A == 'a0'\", ds=ds, samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05)\n assert res1.clusters.n_cases == 1\n assert 'p' not in res1.clusters\n assert repr(res1) == \"<ttest_1samp 'uts', sub=\\\"A == 'a0'\\\", samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 1 clusters>\"\n\n # persistence\n string = pickle.dumps(res1, pickle.HIGHEST_PROTOCOL)\n res1_ = pickle.loads(string)\n assert repr(res1_) == repr(res1)\n assert_dataobj_equal(res1.p_uncorrected, res1_.p_uncorrected)\n\n # clusters with resampling\n res2 = testnd.ttest_1samp('uts', sub=\"A == 'a0'\", ds=ds, samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05)\n assert res2.clusters.n_cases == 1\n assert res2.samples == 10\n assert 'p' in res2.clusters\n assert repr(res2) == \"<ttest_1samp 'uts', sub=\\\"A == 'a0'\\\", samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 1 clusters, p < .001>\"\n\n # clusters with permutations\n dss = ds.sub(\"logical_and(A=='a0', B=='b0')\")[:8]\n res3 = testnd.ttest_1samp('uts', sub=\"A == 'a0'\", ds=dss, samples=10000, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05)\n assert repr(res3) == \"<ttest_1samp 'uts', sub=\\\"A == 'a0'\\\", samples=255, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 2 clusters, p = .020>\"\n assert res3.clusters.n_cases == 2\n assert res3.samples == -1\n assert str(res3.clusters) == (\n 'id tstart tstop duration v p sig\\n'\n '--------------------------------------------------------\\n'\n '3 0.08 0.34 0.26 95.692 0.015686 * \\n'\n '4 0.35 0.56 0.21 81.819 0.019608 * ')\n\n # nd\n dss = ds.sub(\"A == 'a0'\")\n res = testnd.ttest_1samp('utsnd', ds=dss, samples=1)\n res = testnd.ttest_1samp('utsnd', ds=dss, pmin=0.05, samples=1)\n res = testnd.ttest_1samp('utsnd', ds=dss, tfce=True, samples=1)\n\n # TFCE properties\n res = testnd.ttest_1samp('utsnd', sub=\"A == 'a0'\", ds=ds, samples=1)\n string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)\n res = pickle.loads(string)\n tfce_clusters = res.find_clusters(pmin=0.05)\n peaks = res.find_peaks()\n assert tfce_clusters.eval(\"p.min()\") == peaks.eval(\"p.min()\")\n masked = res.masked_parameter_map(pmin=0.05)\n assert_array_equal(masked.abs().x <= res.t.abs().x, True)\n\n # zero variance\n ds['utsnd'].x[:, 1, 10] = 0.\n ds['utsnd'].x[:, 2, 10] = 0.1\n res = testnd.ttest_1samp('utsnd', ds=ds, samples=0)\n assert res.t.x[1, 10] == 0.\n assert res.t.x[2, 10] > 1e10\n\n # argument length\n with pytest.raises(ValueError):\n testnd.ttest_1samp('utsnd', sub=\"A[:-1] == 'a0'\", ds=ds, samples=0)\n\n\ndef test_ttest_ind():\n \"Test testnd.ttest_ind()\"\n ds = datasets.get_uts(True)\n\n # basic\n res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, samples=0)\n assert repr(res) == \"<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30), samples=0>\"\n assert res.p_uncorrected.min() < 0.05\n # persistence\n string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert repr(res_) == \"<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30), samples=0>\"\n assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)\n # alternate argspec\n res_ = testnd.ttest_ind(\"uts[A == 'a1']\", \"uts[A == 'a0']\", ds=ds, samples=0)\n assert repr(res_) == \"<ttest_ind 'uts' (n=30), 'uts' (n=30), samples=0>\"\n assert_dataobj_equal(res_.t, res.t)\n\n # cluster\n res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1)\n # persistence\n string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert repr(res_) == repr(res)\n assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)\n\n # nd\n res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2)\n assert res._cdist.n_clusters == 10\n\n # zero variance\n ds['utsnd'].x[:, 1, 10] = 0.\n res_zv = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, samples=0)\n assert_array_equal(res_zv.t.x[0], res.t.x[0])\n assert res_zv.t.x[1, 10] == 0.\n # argument mismatch\n with pytest.raises(ValueError):\n testnd.ttest_ind(ds['utsnd'], ds[:-1, 'A'], samples=0)\n\n\ndef test_ttest_rel():\n \"Test testnd.ttest_rel()\"\n ds = datasets.get_uts(True)\n\n # basic\n res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100)\n assert repr(res) == \"<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), 'rm' (n=15), samples=100, p < .001>\"\n difference = res.masked_difference()\n assert difference.x.mask.sum() == 84\n c1 = res.masked_c1()\n assert c1.x.mask.sum() == 84\n assert_array_equal(c1.x.data, res.c1_mean.x)\n\n # alternate argspec\n res_ = testnd.ttest_rel(\"uts[A%B == ('a1', 'b1')]\", \"uts[A%B == ('a0', 'b0')]\", ds=ds, samples=100)\n assert repr(res_) == \"<ttest_rel 'uts', 'uts' (n=15), samples=100, p < .001>\"\n assert_dataobj_equal(res_.t, res.t)\n # alternate argspec 2\n ds1 = Dataset()\n ds1['a1b1'] = ds.eval(\"uts[A%B == ('a1', 'b1')]\")\n ds1['a0b0'] = ds.eval(\"uts[A%B == ('a0', 'b0')]\")\n res1 = testnd.ttest_rel('a1b1', 'a0b0', ds=ds1, samples=100)\n assert_dataobj_equal(res1.t, res.t)\n assert repr(res1) == \"<ttest_rel 'a1b1', 'a0b0' (n=15), samples=100, p < .001>\"\n\n # persistence\n string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)\n res_ = pickle.loads(string)\n assert repr(res_) == repr(res)\n assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)\n\n # collapsing cells\n res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds, samples=0)\n assert res2.p_uncorrected.min() < 0.05\n assert res2.n == res.n\n\n # reproducibility\n res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100)\n assert_dataset_equal(res3.find_clusters(maps=True), res.clusters)\n configure(n_workers=0)\n res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100)\n assert_dataset_equal(res4.find_clusters(maps=True), res.clusters)\n configure(n_workers=True)\n sds = ds.sub(\"B=='b0'\")\n # thresholded, UTS\n configure(n_workers=0)\n res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100)\n tgt = res0.find_clusters()\n configure(n_workers=True)\n res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100)\n assert_dataset_equal(res1.find_clusters(), tgt)\n # thresholded, UTSND\n configure(n_workers=0)\n res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100)\n tgt = res0.find_clusters()\n configure(n_workers=True)\n res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100)\n assert_dataset_equal(res1.find_clusters(), tgt)\n # TFCE, UTS\n configure(n_workers=0)\n res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10)\n tgt = res0.compute_probability_map()\n configure(n_workers=True)\n res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10)\n assert_dataobj_equal(res1.compute_probability_map(), tgt)\n\n # zero variance\n ds['utsnd'].x[:, 1, 10] = 0.\n res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds)\n assert res.t.x[1, 10] == 0\n\n # argument length\n with pytest.raises(ValueError):\n testnd.ttest_rel('utsnd', 'A[:-1]', match='rm', ds=ds)\n with pytest.raises(ValueError):\n testnd.ttest_rel('utsnd', 'A', match='rm[:-1]', ds=ds)\n\n\ndef test_vector():\n \"\"\"Test vector tests\"\"\"\n # single vector\n ds = datasets.get_uv(vector=True)\n res = testnd.Vector('v[:40]', ds=ds, samples=10)\n assert res.p == 0.0\n res = testnd.Vector('v[40:]', ds=ds, samples=10)\n assert res.p == 1.0\n\n # single vector with norm stat\n res_t = testnd.Vector('v[:40]', ds=ds, samples=10, norm=True)\n assert res_t.p == 0.0\n res_t = testnd.Vector('v[40:]', ds=ds, samples=10, norm=True)\n assert res_t.p == 1.0\n\n # non-space tests should raise error\n with pytest.raises(WrongDimension):\n testnd.ttest_1samp('v', ds=ds)\n with pytest.raises(WrongDimension):\n testnd.ttest_rel('v', 'A', match='rm', ds=ds)\n with pytest.raises(WrongDimension):\n testnd.ttest_ind('v', 'A', ds=ds)\n with pytest.raises(WrongDimension):\n testnd.t_contrast_rel('v', 'A', 'a0 > a1', 'rm', ds=ds)\n with pytest.raises(WrongDimension):\n testnd.corr('v', 'fltvar', ds=ds)\n with pytest.raises(WrongDimension):\n testnd.anova('v', 'A * B', ds=ds)\n\n # vector in time\n ds = datasets.get_uts(vector3d=True)\n v1 = ds[30:, 'v3d']\n v2 = ds[:30, 'v3d']\n vd = v1 - v2\n res = testnd.Vector(vd, samples=10)\n assert res.p.min() == 0.2\n difference = res.masked_difference(0.5)\n assert difference.x.mask.sum() == 288\n # diff related\n resd = testnd.VectorDifferenceRelated(v1, v2, samples=10)\n assert_dataobj_equal(resd.p, res.p, name=False)\n assert_dataobj_equal(resd.t2, res.t2, name=False)\n # diff independent\n res = VectorDifferenceIndependent(v1, v2, samples=10, norm=True)\n assert_dataobj_equal(res.difference, v1.mean('case') - v2.mean('case'), name=False)\n assert res.p.max() == 1\n assert res.p.min() == 0\n # with mp\n res = testnd.Vector(v1, samples=10)\n assert res.p.min() == 0.4\n # without mp\n configure(n_workers=0)\n res0 = testnd.Vector(v1, samples=10)\n assert_array_equal(np.sort(res0._cdist.dist), np.sort(res._cdist.dist))\n configure(n_workers=True)\n # time window\n res = testnd.Vector(v2, samples=10, tstart=0.1, tstop=0.4)\n assert res.p.min() == 0.3\n difference = res.masked_difference(0.5)\n assert difference.x.mask.sum() == 294\n\n # vector in time with norm stat\n res = testnd.Vector(vd, samples=10, norm=True)\n assert res.p.min() == 0\n difference = res.masked_difference()\n assert difference.x.mask.sum() == 297\n resd = testnd.VectorDifferenceRelated(v1, v2, samples=10, norm=True)\n assert_dataobj_equal(resd.p, res.p, name=False)\n assert_dataobj_equal(resd.difference, res.difference, name=False)\n\n v_small = v2 / 100\n res = testnd.Vector(v_small, tfce=True, samples=10, norm=True)\n assert 'WARNING' in repr(res)\n res = testnd.Vector(v_small, tfce=0.1, samples=10)\n assert res.p.min() == 0.0\n\n\ndef test_cwt():\n \"Test tests with wavelet transform\"\n ds = datasets.get_uts(True)\n ds['cwt'] = cwt_morlet(ds['utsnd'], np.arange(10, 20))\n res = testnd.ttest_rel('cwt', 'A', match='rm', ds=ds, pmin=0.05, samples=10)\n cluster = res.clusters.sub(\"p == 0\")\n assert_array_equal(cluster['frequency_min'], 10)\n assert_array_equal(cluster['frequency_max'], 19)\n\n\ndef test_merged_temporal_cluster_dist():\n \"Test use of _MergedTemporalClusterDist with testnd test results\"\n ds1 = datasets.get_uts()\n ds2 = datasets.get_uts(seed=42)\n\n anova_kw = dict(y='uts', x='A*B*rm', match='rm', pmin=0.05, samples=10)\n ttest_kw = dict(y='uts', x='A', c1='a1', c0='a0', pmin=0.05, samples=10)\n contrast_kw = dict(y='uts', x='A', contrast='a1>a0', pmin=0.05, samples=10)\n\n def test_merged(res1, res2):\n merged_dist = _MergedTemporalClusterDist([res1._cdist, res2._cdist])\n if isinstance(res1, testnd.anova):\n assert len(merged_dist.dist) == len(res1.effects)\n for effect, dist in merged_dist.dist.items():\n assert effect in res1.effects\n assert len(dist) == res1.samples\n else:\n assert len(merged_dist.dist) == res1.samples\n res1_clusters = merged_dist.correct_cluster_p(res1)\n res2_clusters = merged_dist.correct_cluster_p(res2)\n for clusters in [res1_clusters, res2_clusters]:\n assert 'p_parc' in clusters\n for cl in clusters.itercases():\n assert cl['p_parc'] >= cl['p']\n\n # multi-effect\n res1 = testnd.anova(ds=ds1, **anova_kw)\n res2 = testnd.anova(ds=ds2, **anova_kw)\n test_merged(res1, res2)\n\n # ttest_rel\n res1 = testnd.ttest_rel(ds=ds1, match='rm', **ttest_kw)\n res2 = testnd.ttest_rel(ds=ds2, match='rm', **ttest_kw)\n test_merged(res1, res2)\n\n # ttest_ind\n res1 = testnd.ttest_ind(ds=ds1, **ttest_kw)\n res2 = testnd.ttest_ind(ds=ds2, **ttest_kw)\n test_merged(res1, res2)\n\n # ttest_1samp\n res1 = testnd.ttest_1samp('uts', ds=ds1, pmin=0.05, samples=10)\n res2 = testnd.ttest_1samp('uts', ds=ds2, pmin=0.05, samples=10)\n test_merged(res1, res2)\n\n # t_contrast_rel\n res1 = testnd.t_contrast_rel(ds=ds1, match='rm', **contrast_kw)\n res2 = testnd.t_contrast_rel(ds=ds2, match='rm', **contrast_kw)\n test_merged(res1, res2)\n",
"import numpy as np\nimport pytest\n\nfrom eelbrain import Dataset, Factor\nfrom eelbrain._design import permute, random_factor, complement\n\n\ndef test_random_factor():\n \"\"\"Test the design module for creating an experiemnt design\"\"\"\n ds = permute((\n ('A', '123456'),\n ('Bin', '01'),\n ('B', 'abcdef'),\n ))\n n = ds.n_cases\n\n rand = random_factor(('1', '2', '3'), n, 'rand')\n nv = (rand == '1').sum()\n assert nv == (rand == '2').sum(), \"overall balancing\"\n assert nv == (rand == '3').sum(), \"overall balancing\"\n\n # test urn kwarg\n randu = random_factor(('1', '2', '3'), n, urn=[rand])\n assert (rand == randu).sum() == 0, \"`urn` arg failed\"\n nv = (randu == '1').sum()\n assert nv == 24, \"random value assignment\"\n assert nv == (randu == '2').sum(), \"overall balancing\"\n assert nv == (randu == '3').sum(), \"overall balancing\"\n\n # test sub kwarg\n sub = ds['Bin'] == '1'\n subrand = random_factor(('1', '2', '3'), n, urn=[rand], sub=sub)\n assert np.all(rand != randu), \"`urn` arg failed with `sub` arg\"\n subc = (sub == False)\n assert np.all(subrand[subc] == ''), \"values outside of sub are not ''\"\n nv = (subrand == '1').sum()\n assert nv == 12, \"random value assignment with `sub` arg\"\n assert nv == (subrand == '2').sum(), \"sub balancing\"\n assert nv == (subrand == '3').sum(), \"sub balancing\"\n\n\ndef test_complement():\n \"\"\"Test design.complement()\"\"\"\n ds = Dataset()\n ds['A'] = Factor('abcabc')\n ds['B'] = Factor('bcabca')\n ds['C'] = Factor('cabcab')\n\n # underspecified\n with pytest.raises(ValueError):\n complement(['A'], ds=ds)\n\n # correct\n comp = complement(['A', 'B'], ds=ds)\n assert np.all(comp == ds['C']), f\"Complement yielded {comp} instead of {ds['C']}\"\n\n # overspecified\n with pytest.raises(ValueError):\n complement(['A', 'B', 'C'], ds=ds)\n",
"# Author: Christian Brodbeck <[email protected]>\n\"\"\"MneExperiment class to manage data from a experiment\n\nFor testing purposed, set up an experiment class without checking for data:\n\nMneExperiment.auto_delete_cache = 'disable'\nMneExperiment.sessions = ('session',)\ne = MneExperiment('.', find_subjects=False)\n\n\"\"\"\nfrom collections import defaultdict, Sequence\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom glob import glob\nimport inspect\nfrom itertools import chain, product\nimport logging\nimport os\nfrom os.path import basename, exists, getmtime, isdir, join, relpath\nfrom pathlib import Path\nimport re\nimport shutil\nimport time\nfrom typing import Union\nimport warnings\n\nimport numpy as np\nfrom tqdm import tqdm\nimport mne\nfrom mne.baseline import rescale\nfrom mne.minimum_norm import make_inverse_operator, apply_inverse, apply_inverse_epochs\n\nfrom .. import _report\nfrom .. import fmtxt\nfrom .. import gui\nfrom .. import load\nfrom .. import plot\nfrom .. import save\nfrom .. import table\nfrom .. import testnd\nfrom .._data_obj import (\n Datalist, Dataset, Factor, Var, SourceSpace, VolumeSourceSpace,\n align1, all_equal, assert_is_legal_dataset_key, combine)\nfrom .._exceptions import DefinitionError, DimensionMismatchError, OldVersionError\nfrom .._info import BAD_CHANNELS\nfrom .._io.pickle import update_subjects_dir\nfrom .._names import INTERPOLATE_CHANNELS\nfrom .._meeg import new_rejection_ds\nfrom .._mne import (\n dissolve_label, labels_from_mni_coords, rename_label, combination_label,\n morph_source_space, shift_mne_epoch_trigger, find_source_subject,\n label_from_annot,\n)\nfrom ..mne_fixes import (\n write_labels_to_annot, _interpolate_bads_eeg, _interpolate_bads_meg)\nfrom ..mne_fixes._trans import hsp_equal, mrk_equal\nfrom ..mne_fixes._source_space import merge_volume_source_space, prune_volume_source_space\nfrom .._ndvar import concatenate, cwt_morlet, neighbor_correlation\nfrom ..fmtxt import List, Report, Image, read_meta\nfrom .._stats.stats import ttest_t\nfrom .._stats.testnd import _MergedTemporalClusterDist\nfrom .._text import enumeration, plural\nfrom .._utils import IS_WINDOWS, ask, subp, keydefaultdict, log_level, ScreenHandler\nfrom .._utils.mne_utils import fix_annot_names, is_fake_mri\nfrom .definitions import FieldCode, find_dependent_epochs, find_epochs_vars, log_dict_change, log_list_change\nfrom .epochs import PrimaryEpoch, SecondaryEpoch, SuperEpoch, EpochCollection, assemble_epochs, decim_param\nfrom .exceptions import FileDeficient, FileMissing\nfrom .experiment import FileTree\nfrom .groups import assemble_groups\nfrom .parc import (\n assemble_parcs,\n FS_PARC, FSA_PARC, SEEDED_PARC_RE,\n CombinationParc, EelbrainParc, FreeSurferParc, FSAverageParc, SeededParc,\n IndividualSeededParc, LabelParc\n)\nfrom .preprocessing import (\n assemble_pipeline, RawSource, RawFilter, RawICA,\n compare_pipelines, ask_to_delete_ica_files)\nfrom .test_def import (\n Test, EvokedTest,\n ROITestResult, ROI2StageResult, TestDims, TwoStageTest,\n assemble_tests, find_test_vars,\n)\nfrom .variable_def import Variables\n\n\n# current cache state version\nCACHE_STATE_VERSION = 12\n# History:\n# 10: input_state: share forward-solutions between sessions\n# 11: add samplingrate to epochs\n# 12: store test-vars as Variables object\n\n# paths\nLOG_FILE = join('{root}', 'eelbrain {name}.log')\nLOG_FILE_OLD = join('{root}', '.eelbrain.log')\n\n# Allowable parameters\nCOV_PARAMS = {'epoch', 'session', 'method', 'reg', 'keep_sample_mean', 'reg_eval_win_pad'}\nINV_METHODS = ('MNE', 'dSPM', 'sLORETA', 'eLORETA')\nSRC_RE = re.compile(r'^(ico|vol)-(\\d+)(?:-(cortex|brainstem))?$')\ninv_re = re.compile(r\"^\"\n r\"(free|fixed|loose\\.\\d+|vec)-\" # orientation constraint\n r\"(\\d*\\.?\\d+)-\" # SNR\n rf\"({'|'.join(INV_METHODS)})\" # method\n r\"(?:-((?:0\\.)?\\d+))?\" # depth weighting\n r\"(?:-(pick_normal))?\"\n r\"$\") # pick normal\n\n# Eelbrain 0.24 raw/preprocessing pipeline\nLEGACY_RAW = {\n '0-40': RawFilter('raw', None, 40, method='iir'),\n '0.1-40': RawFilter('raw', 0.1, 40, l_trans_bandwidth=0.08, filter_length='60s'),\n '0.2-40': RawFilter('raw', 0.2, 40, l_trans_bandwidth=0.08, filter_length='60s'),\n '1-40': RawFilter('raw', 1, 40, method='iir'),\n}\n\n\nCACHE_HELP = \"A change in the {experiment} class definition (or the input files) means that some {filetype} files no longer reflect the current definition. In order to keep local results consistent with the definition, these files should be deleted. If you want to keep a copy of the results, be sure to move them to a different location before proceding. If you think the change in the definition was a mistake, you can select 'abort', revert the change and try again.\"\n\n################################################################################\n\n\ndef _mask_ndvar(ds, name):\n y = ds[name]\n if y.source.parc is None:\n raise RuntimeError('%r has no parcellation' % (y,))\n mask = y.source.parc.startswith('unknown')\n if mask.any():\n ds[name] = y.sub(source=np.invert(mask))\n\n\ndef _time_str(t):\n \"String for representing a time value\"\n if t is None:\n return ''\n else:\n return '%i' % round(t * 1000)\n\n\ndef _time_window_str(window, delim='-'):\n \"String for representing a time window\"\n return delim.join(map(_time_str, window))\n\n\ndef guess_y(ds, default=None):\n \"Given a dataset, guess the dependent variable\"\n for y in ('srcm', 'src', 'meg', 'eeg'):\n if y in ds:\n return y\n if default is not None:\n return default\n raise RuntimeError(r\"Could not find data in {ds}\")\n\n\nclass DictSet:\n \"\"\"Helper class for list of dicts without duplicates\"\"\"\n def __init__(self):\n self._list = []\n\n def __repr__(self):\n return \"DictSet(%s)\" % self._list\n\n def __iter__(self):\n return self._list.__iter__()\n\n def add(self, item):\n if item not in self._list:\n self._list.append(item)\n\n def update(self, items):\n for item in items:\n self.add(item)\n\n\nclass CacheDict(dict):\n\n def __init__(self, func, key_vars, *args):\n super(CacheDict, self).__init__()\n self._func = func\n self._key_vars = key_vars\n self._args = args\n\n def __getitem__(self, key):\n if key in self:\n return dict.__getitem__(self, key)\n\n if isinstance(key, str):\n out = self._func(*self._args, **{self._key_vars: key})\n else:\n out = self._func(*self._args, **dict(zip(self._key_vars, key)))\n\n self[key] = out\n return out\n\n\ndef cache_valid(mtime, *source_mtimes):\n \"Determine whether mtime is up-to-date\"\n return (\n mtime is not None\n and all(t is not None for t in source_mtimes)\n and mtime > max(source_mtimes))\n\n\ntemp = {\n # MEG\n 'equalize_evoked_count': ('', 'eq'),\n # locations\n 'raw-sdir': join('{root}', 'meg'),\n 'raw-dir': join('{raw-sdir}', '{subject}'),\n\n # raw input files\n 'trans-file': join('{raw-dir}', '{mrisubject_visit}-trans.fif'),\n # log-files (eye-tracker etc.)\n 'log-dir': join('{raw-dir}', 'logs'),\n 'edf-file': join('{log-dir}', '*.edf'),\n\n # created input files\n 'ica-file': join('{raw-dir}', '{subject_visit} {raw}-ica.fif'), # hard-coded in RawICA\n 'rej-dir': join('{raw-dir}', 'epoch selection'),\n 'rej-file': join('{rej-dir}', '{session}_{sns_kind}_{epoch_visit}-{rej}.pickled'),\n\n # cache\n 'cache-dir': join('{root}', 'eelbrain-cache'),\n # raw\n 'raw-cache-dir': join('{cache-dir}', 'raw', '{subject}'),\n 'raw-cache-base': join('{raw-cache-dir}', '{recording} {raw}'),\n 'cached-raw-file': '{raw-cache-base}-raw.fif',\n 'event-file': '{raw-cache-base}-evts.pickled',\n 'interp-file': '{raw-cache-base}-interp.pickled',\n 'cached-raw-log-file': '{raw-cache-base}-raw.log',\n\n # forward modeling:\n 'fwd-file': join('{raw-cache-dir}', '{recording}-{mrisubject}-{src}-fwd.fif'),\n # sensor covariance\n 'cov-dir': join('{cache-dir}', 'cov'),\n 'cov-base': join('{cov-dir}', '{subject_visit}', '{sns_kind} {cov}-{rej}'),\n 'cov-file': '{cov-base}-cov.fif',\n 'cov-info-file': '{cov-base}-info.txt',\n # inverse solution\n 'inv-file': join('{raw-cache-dir}', 'inv', '{mrisubject} {src} {recording} {sns_kind} {cov} {rej} {inv-cache}-inv.fif'),\n # evoked\n 'evoked-dir': join('{cache-dir}', 'evoked'),\n 'evoked-file': join('{evoked-dir}', '{subject}', '{sns_kind} {epoch_visit} {model} {evoked_kind}-ave.fif'),\n # test files\n 'test-dir': join('{cache-dir}', 'test'),\n 'test-file': join('{test-dir}', '{analysis} {group}', '{test_desc} {test_dims}.pickled'),\n\n # MRIs\n 'common_brain': 'fsaverage',\n # MRI base files\n 'mri-sdir': join('{root}', 'mri'),\n 'mri-dir': join('{mri-sdir}', '{mrisubject}'),\n 'bem-dir': join('{mri-dir}', 'bem'),\n 'mri-cfg-file': join('{mri-dir}', 'MRI scaling parameters.cfg'),\n 'mri-file': join('{mri-dir}', 'mri', 'orig.mgz'),\n 'bem-file': join('{bem-dir}', '{mrisubject}-inner_skull-bem.fif'),\n 'bem-sol-file': join('{bem-dir}', '{mrisubject}-*-bem-sol.fif'), # removed for 0.24\n 'head-bem-file': join('{bem-dir}', '{mrisubject}-head.fif'),\n 'src-file': join('{bem-dir}', '{mrisubject}-{src}-src.fif'),\n 'fiducials-file': join('{bem-dir}', '{mrisubject}-fiducials.fif'),\n # Labels\n 'hemi': ('lh', 'rh'),\n 'label-dir': join('{mri-dir}', 'label'),\n 'annot-file': join('{label-dir}', '{hemi}.{parc}.annot'),\n\n # (method) plots\n 'methods-dir': join('{root}', 'methods'),\n\n # result output files\n # data processing parameters\n # > group\n # > kind of test\n # > single-subject\n # > kind of test\n # > subject\n 'res-dir': join('{root}', 'results'),\n 'res-file': join('{res-dir}', '{analysis}', '{resname}.{ext}'),\n 'res-deep-file': join('{res-dir}', '{analysis}', '{folder}', '{resname}.{ext}'),\n 'report-file': join('{res-dir}', '{analysis} {group}', '{folder}', '{test_desc}.html'),\n 'group-mov-file': join('{res-dir}', '{analysis} {group}', '{epoch_visit} {test_options} {resname}.mov'),\n 'subject-res-dir': join('{res-dir}', '{analysis} subjects'),\n 'subject-spm-report': join('{subject-res-dir}', '{test} {epoch_visit} {test_options}', '{subject}.html'),\n 'subject-mov-file': join('{subject-res-dir}', '{epoch_visit} {test_options} {resname}', '{subject}.mov'),\n\n # plots\n # plot corresponding to a report (and using same folder structure)\n 'res-plot-root': join('{root}', 'result plots'),\n 'res-plot-dir': join('{res-plot-root}', '{analysis} {group}', '{folder}', '{test_desc}'),\n\n # besa\n 'besa-root': join('{root}', 'besa'),\n 'besa-trig': join('{besa-root}', '{subject}', '{subject}_{recording}_{epoch_visit}_triggers.txt'),\n 'besa-evt': join('{besa-root}', '{subject}', '{subject}_{recording}_{epoch_visit}[{rej}].evt'),\n\n # MRAT\n 'mrat_condition': '',\n 'mrat-root': join('{root}', 'mrat'),\n 'mrat-sns-root': join('{mrat-root}', '{sns_kind}', '{epoch_visit} {model} {evoked_kind}'),\n 'mrat-src-root': join('{mrat-root}', '{src_kind}', '{epoch_visit} {model} {evoked_kind}'),\n 'mrat-sns-file': join('{mrat-sns-root}', '{mrat_condition}', '{mrat_condition}_{subject}-ave.fif'),\n 'mrat_info-file': join('{mrat-root}', '{subject} info.txt'),\n 'mrat-src-file': join('{mrat-src-root}', '{mrat_condition}', '{mrat_condition}_{subject}'),\n}\n\n\nclass MneExperiment(FileTree):\n \"\"\"Analyze an MEG experiment (gradiometer only) with MNE\n\n Parameters\n ----------\n root : str | None\n the root directory for the experiment (usually the directory\n containing the 'meg' and 'mri' directories). The experiment can be\n initialized without the root for testing purposes.\n find_subjects : bool\n Automatically look for subjects in the MEG-directory (default\n True). Set ``find_subjects=False`` to initialize the experiment\n without any files.\n ...\n Initial state parameters.\n\n Notes\n -----\n .. seealso::\n Guide on using :ref:`experiment-class-guide`.\n \"\"\"\n _safe_delete = 'cache-dir'\n path_version = 2\n screen_log_level = logging.INFO\n auto_delete_results = False\n auto_delete_cache = True\n # what to do when the experiment class definition changed:\n # True: delete outdated files\n # False: raise an error\n # 'disable': ignore it\n # 'debug': prompt with debug options\n cache_inv = True # Whether to cache inverse solution\n # moderate speed gain for loading source estimates (34 subjects: 20 vs 70 s)\n # hard drive space ~ 100 mb/file\n\n # tuple (if the experiment has multiple sessions)\n sessions = None\n visits = ('',)\n\n # Raw preprocessing pipeline\n raw = {}\n\n # add this value to all trigger times\n trigger_shift = 0\n\n # variables for automatic labeling {name: {trigger: label, triggers: label}}\n variables = {}\n\n # Default values for epoch definitions\n epoch_default = {'decim': 5}\n\n # named epochs\n epochs = {}\n\n # Rejection\n # =========\n # eog_sns: The sensors to plot separately in the rejection GUI. The default\n # is the two MEG sensors closest to the eyes.\n _eog_sns = {None: (),\n 'KIT-157': ('MEG 143', 'MEG 151'),\n 'KIT-208': ('MEG 087', 'MEG 130'),\n 'KIT-UMD-1': ('MEG 042', 'MEG 025'),\n 'KIT-UMD-2': ('MEG 042', 'MEG 025'),\n 'KIT-UMD-3': ('MEG 042', 'MEG 025'),\n 'KIT-BRAINVISION': ('HEOGL', 'HEOGR', 'VEOGb'),\n 'neuromag306mag': ('MEG 0121', 'MEG 1411')}\n #\n # artifact_rejection dict:\n #\n # kind : 'manual' | 'make'\n # How the rejection is derived:\n # 'manual': manually create a rejection file (use the selection GUI\n # through .make_epoch_selection())\n # 'make' a rejection file is created by the user\n # interpolation : bool\n # enable by-epoch channel interpolation\n #\n # For manual rejection\n # ^^^^^^^^^^^^^^^^^^^^\n _artifact_rejection = {\n '': {'kind': None},\n 'man': {'kind': 'manual', 'interpolation': True},\n }\n artifact_rejection = {}\n\n exclude = {} # field_values to exclude (e.g. subjects)\n\n # groups can be defined as subject lists: {'group': ('member1', 'member2', ...)}\n # or by exclusion: {'group': {'base': 'all', 'exclude': ('member1', 'member2')}}\n groups = {}\n\n # whether to look for and load eye tracker data when loading raw files\n has_edf = defaultdict(lambda: False)\n\n # Pattern for subject names. The first group is used to determine what\n # MEG-system the data was recorded from\n subject_re = r'(R|S|A|Y|AD|QP)(\\d{3,})$'\n # MEG-system (legacy variable).\n meg_system = None\n\n # kwargs for regularization of the covariance matrix (see .make_cov())\n _covs = {'auto': {'epoch': 'cov', 'method': 'auto'},\n 'bestreg': {'epoch': 'cov', 'reg': 'best'},\n 'reg': {'epoch': 'cov', 'reg': True},\n 'noreg': {'epoch': 'cov', 'reg': None},\n 'emptyroom': {'session': 'emptyroom', 'reg': None}}\n\n # MRI subject names: {subject: mrisubject} mappings\n # selected with e.set(mri=dict_name)\n # default is identity (mrisubject = subject)\n _mri_subjects = {'': keydefaultdict(lambda s: s)}\n\n # Where to search for subjects (defined as a template name). If the\n # experiment searches for subjects automatically, it scans this directory\n # for subfolders matching subject_re.\n _subject_loc = 'raw-sdir'\n\n # Parcellations\n __parcs = {\n 'aparc.a2005s': FS_PARC,\n 'aparc.a2009s': FS_PARC,\n 'aparc': FS_PARC,\n 'aparc.DKTatlas': FS_PARC,\n 'cortex': LabelParc(('cortex',), ('lateral', 'medial')),\n 'PALS_B12_Brodmann': FSA_PARC,\n 'PALS_B12_Lobes': FSA_PARC,\n 'PALS_B12_OrbitoFrontal': FSA_PARC,\n 'PALS_B12_Visuotopic': FSA_PARC,\n 'lobes': EelbrainParc(True, ('lateral', 'medial')),\n 'lobes-op': CombinationParc('lobes', {'occipitoparietal': \"occipital + parietal\"}, ('lateral', 'medial')),\n 'lobes-ot': CombinationParc('lobes', {'occipitotemporal': \"occipital + temporal\"}, ('lateral', 'medial')),\n }\n parcs = {}\n\n # Frequencies: lowbound, highbound, step\n _freqs = {'gamma': {'frequencies': np.arange(25, 50, 2),\n 'n_cycles': 5}}\n freqs = {}\n\n # basic templates to use. Can be a string referring to a templates\n # dictionary in the module level _temp dictionary, or a templates\n # dictionary\n _templates = temp\n # specify additional templates\n _values = {}\n # specify defaults for specific fields (e.g. specify the initial subject\n # name)\n defaults = {}\n\n # model order: list of factors in the order in which models should be built\n # (default for factors not in this list is alphabetic)\n _model_order = []\n\n # Backup\n # ------\n # basic state for a backup\n _backup_state = {'subject': '*', 'mrisubject': '*', 'session': '*', 'raw': 'raw'}\n # files to back up, together with state modifications on the basic state\n _backup_files = (('rej-file', {'raw': '*', 'epoch': '*', 'rej': '*'}),\n ('trans-file', {}),\n ('mri-cfg-file', {}),\n ('log-dir', {}),)\n\n # Tests\n # -----\n # specify tests as (test_type, model, test_parameter) tuple. For example,\n # (\"anova\", \"condition\", \"condition*subject\")\n # (\"t_contrast_rel\", \"ref%loc\", \"+min(ref|left>nref|*, ref|right>nref|*)\")\n # Make sure dictionary keys (test names) are appropriate for filenames.\n # tests imply a model which is set automatically\n tests = {}\n _empty_test = False # for TRFExperiment\n _cluster_criteria = {\n '': {'time': 0.025, 'sensor': 4, 'source': 10},\n 'all': {},\n '10ms': {'time': 0.01, 'sensor': 4, 'source': 10},\n 'large': {'time': 0.025, 'sensor': 8, 'source': 20},\n }\n\n # plotting\n # --------\n _brain_plot_defaults = {'surf': 'inflated'}\n brain_plot_defaults = {}\n\n def __init__(self, root=None, find_subjects=True, **state):\n # checks\n if hasattr(self, 'cluster_criteria'):\n raise AttributeError(\"MneExperiment subclasses can not have a .cluster_criteria attribute anymore. Please remove the attribute, delete the eelbrain-cache folder and use the select_clusters analysis parameter.\")\n\n # create attributes (overwrite class attributes)\n self._mri_subjects = self._mri_subjects.copy()\n self._templates = self._templates.copy()\n # templates version\n if self.path_version == 0:\n self._templates['raw-dir'] = join('{raw-sdir}', 'meg', 'raw')\n raw_def = {**LEGACY_RAW, 'raw': RawSource('{subject}_{recording}_clm-raw.fif'), **self.raw}\n elif self.path_version == 1:\n raw_def = {**LEGACY_RAW, 'raw': RawSource(), **self.raw}\n elif self.path_version == 2:\n raw_def = {'raw': RawSource(), **self.raw}\n else:\n raise ValueError(f\"MneExperiment.path_version={self.path_version}; needs to be 0, 1 or 2\")\n # update templates with _values\n for cls in reversed(inspect.getmro(self.__class__)):\n if hasattr(cls, '_values'):\n self._templates.update(cls._values)\n\n FileTree.__init__(self)\n self._log = log = logging.Logger(self.__class__.__name__, logging.DEBUG)\n\n ########################################################################\n # sessions\n if not self.sessions:\n raise TypeError(\"The MneExperiment.sessions parameter needs to be specified. The session name is contained in your raw data files. For example if your file is named `R0026_mysession-raw.fif` your session name is 'mysession' and you should set MneExperiment.sessions to 'mysession'.\")\n elif isinstance(self.sessions, str):\n self._sessions = (self.sessions,)\n elif isinstance(self.sessions, Sequence):\n self._sessions = tuple(self.sessions)\n else:\n raise TypeError(f\"MneExperiment.sessions={self.sessions!r}; needs to be a string or a tuple\")\n self._visits = (self._visits,) if isinstance(self.visits, str) else tuple(self.visits)\n\n ########################################################################\n # subjects\n if root is None:\n find_subjects = False\n else:\n root = self.get('root', root=root)\n\n if find_subjects:\n subject_re = re.compile(self.subject_re)\n sub_dir = self.get(self._subject_loc)\n if not exists(sub_dir):\n raise IOError(f\"Subjects directory {sub_dir}: does notexist. To initialize {self.__class__.__name__} without data, initialize with root=None or find_subjects=False\")\n subjects = [s for s in os.listdir(sub_dir) if subject_re.match(s) and isdir(join(sub_dir, s))]\n if len(subjects) == 0:\n log.warning(f\"No subjects found in {sub_dir}\")\n subjects.sort()\n subjects = tuple(subjects)\n else:\n subjects = ()\n\n ########################################################################\n # groups\n self._groups = assemble_groups(self.groups, set(subjects))\n\n ########################################################################\n # Preprocessing\n skip = {'root', 'subject', 'recording', 'raw'}\n raw_dir = self._partial('raw-dir', skip)\n cache_path = self._partial('cached-raw-file', skip)\n self._raw = assemble_pipeline(raw_def, raw_dir, cache_path, root, self._sessions, log)\n\n raw_pipe = self._raw['raw']\n # legacy connectivity determination\n if raw_pipe.sysname is None:\n if self.meg_system is not None:\n raw_pipe.sysname = self.meg_system\n # update templates\n self._register_constant('raw-file', raw_pipe.path)\n\n ########################################################################\n # variables\n self._variables = Variables(self.variables)\n self._variables._check_trigger_vars()\n\n ########################################################################\n # epochs\n epoch_default = {'session': self._sessions[0], **self.epoch_default}\n self._epochs = assemble_epochs(self.epochs, epoch_default)\n\n ########################################################################\n # epoch rejection\n artifact_rejection = {}\n for name, params in chain(self._artifact_rejection.items(), self.artifact_rejection.items()):\n if params['kind'] in ('manual', 'make', None):\n artifact_rejection[name] = params.copy()\n elif params['kind'] == 'ica':\n raise ValueError(f\"kind={params['kind']!r} in artifact_rejection {name!r}; The ICA option has been removed, use the RawICA raw pipe instead.\")\n else:\n raise ValueError(f\"kind={params['kind']!r} in artifact_rejection {name!r}\")\n self._artifact_rejection = artifact_rejection\n\n ########################################################################\n # noise covariance\n for k, params in self._covs.items():\n params = set(params)\n n_datasource = ('epoch' in params) + ('session' in params)\n if n_datasource != 1:\n if n_datasource == 0:\n raise ValueError(\"Cov %s has neither epoch nor session \"\n \"entry\" % k)\n raise ValueError(\"Cov %s has both epoch and session entry\" % k)\n if params.difference(COV_PARAMS):\n raise ValueError(\"Cov %s has unused entries: %s\" %\n (k, ', '.join(params.difference(COV_PARAMS))))\n\n ########################################################################\n # parcellations\n ###############\n # make : can be made if non-existent\n # morph_from_fraverage : can be morphed from fsaverage to other subjects\n self._parcs = assemble_parcs(chain(self.__parcs.items(), self.parcs.items()))\n parc_values = list(self._parcs.keys())\n parc_values.append('')\n\n ########################################################################\n # frequency\n freqs = {}\n for name, f in chain(self._freqs.items(), self.freqs.items()):\n if name in freqs:\n raise ValueError(\"Frequency %s defined twice\" % name)\n elif 'frequencies' not in f:\n raise KeyError(\"Frequency values missing for %s\" % name)\n elif 'n_cycles' not in f:\n raise KeyError(\"Number of cycles not defined for %s\" % name)\n freqs[name] = f\n\n self._freqs = freqs\n\n ########################################################################\n # tests\n self._tests = assemble_tests(self.tests)\n test_values = sorted(self._tests)\n if self._empty_test:\n test_values.insert(0, '')\n\n ########################################################################\n # Experiment class setup\n ########################\n self._register_field('mri', sorted(self._mri_subjects), allow_empty=True)\n self._register_field('subject', subjects or None, repr=True)\n self._register_field('group', self._groups.keys(), 'all', post_set_handler=self._post_set_group)\n\n raw_default = sorted(self.raw)[0] if self.raw else None\n self._register_field('raw', sorted(self._raw), default=raw_default, repr=True)\n self._register_field('rej', self._artifact_rejection.keys(), 'man', allow_empty=True)\n\n # epoch\n epoch_keys = sorted(self._epochs)\n for default_epoch in epoch_keys:\n if isinstance(self._epochs[default_epoch], PrimaryEpoch):\n break\n else:\n default_epoch = None\n self._register_field('epoch', epoch_keys, default_epoch, repr=True)\n self._register_field('session', self._sessions, depends_on=('epoch',), slave_handler=self._update_session, repr=True)\n self._register_field('visit', self._visits, allow_empty=True, repr=True)\n\n # cov\n if 'bestreg' in self._covs:\n default_cov = 'bestreg'\n else:\n default_cov = None\n self._register_field('cov', sorted(self._covs), default_cov)\n self._register_field('inv', default='free-3-dSPM', eval_handler=self._eval_inv, post_set_handler=self._post_set_inv)\n self._register_field('model', eval_handler=self._eval_model)\n self._register_field('test', test_values, post_set_handler=self._post_set_test, allow_empty=self._empty_test, repr=False)\n self._register_field('parc', parc_values, 'aparc', eval_handler=self._eval_parc, allow_empty=True)\n self._register_field('freq', self._freqs.keys())\n self._register_field('src', default='ico-4', eval_handler=self._eval_src)\n self._register_field('connectivity', ('', 'link-midline'), allow_empty=True)\n self._register_field('select_clusters', self._cluster_criteria.keys(), allow_empty=True)\n\n # slave fields\n self._register_field('mrisubject', depends_on=('mri', 'subject'), slave_handler=self._update_mrisubject, repr=False)\n self._register_field('src-name', depends_on=('src',), slave_handler=self._update_src_name, repr=False)\n self._register_field('inv-cache', depends_on='inv', slave_handler=self._update_inv_cache, repr=False)\n\n # fields used internally\n self._register_field('analysis', repr=False)\n self._register_field('test_options', repr=False)\n self._register_field('name', repr=False)\n self._register_field('folder', repr=False)\n self._register_field('resname', repr=False)\n self._register_field('ext', repr=False)\n self._register_field('test_dims', repr=False)\n\n # compounds\n self._register_compound('sns_kind', ('raw',))\n self._register_compound('src_kind', ('sns_kind', 'cov', 'mri', 'src-name', 'inv'))\n self._register_compound('recording', ('session', 'visit'))\n self._register_compound('subject_visit', ('subject', 'visit'))\n self._register_compound('mrisubject_visit', ('mrisubject', 'visit'))\n self._register_compound('epoch_visit', ('epoch', 'visit'))\n self._register_compound('evoked_kind', ('rej', 'equalize_evoked_count'))\n self._register_compound('test_desc', ('epoch', 'visit', 'test', 'test_options'))\n\n # Define make handlers\n self._bind_cache('cov-file', self.make_cov)\n self._bind_cache('src-file', self.make_src)\n self._bind_cache('fwd-file', self.make_fwd)\n\n # currently only used for .rm()\n self._secondary_cache['cached-raw-file'] = ('event-file', 'interp-file', 'cached-raw-log-file')\n\n ########################################################################\n # logger\n ########\n # log-file\n if root:\n log_file = LOG_FILE.format(root=root, name=self.__class__.__name__)\n log_file_old = LOG_FILE_OLD.format(root=root)\n if exists(log_file_old):\n os.rename(log_file_old, log_file)\n handler = logging.FileHandler(log_file)\n formatter = logging.Formatter(\"%(levelname)-8s %(asctime)s %(message)s\",\n \"%m-%d %H:%M\") # %(name)-12s\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n log.addHandler(handler)\n # Terminal log\n handler = ScreenHandler()\n self._screen_log_level = log_level(self.screen_log_level)\n handler.setLevel(self._screen_log_level)\n log.addHandler(handler)\n self._screen_log_handler = handler\n\n # log package versions\n from .. import __version__\n log.info(\"*** %s initialized with root %s on %s ***\", self.__class__.__name__, \n root, datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n msg = \"Using eelbrain %s, mne %s.\" % (__version__, mne.__version__)\n if any('dev' in v for v in (__version__, mne.__version__)):\n log.warning(f\"{msg} Development versions are more likely to contain errors.\")\n else:\n log.info(msg)\n\n if self.auto_delete_cache == 'disable':\n log.warning(\"Cache-management disabled\")\n return\n\n ########################################################################\n # Finalize\n ##########\n # register experimental features\n self._subclass_init()\n\n # Check that the template model is complete\n self._find_missing_fields()\n\n # set initial values\n self.set(**state)\n self._store_state()\n\n ########################################################################\n # Cache\n #######\n if not root:\n return\n\n # loading events will create cache-dir\n cache_dir = self.get('cache-dir')\n cache_dir_existed = exists(cache_dir)\n\n # collect input file information\n # ==============================\n raw_missing = [] # [(subject, recording), ...]\n subjects_with_raw_changes = set() # {subject, ...}\n events = {} # {(subject, recording): event_dataset}\n\n # saved mtimes\n input_state_file = join(cache_dir, 'input-state.pickle')\n if exists(input_state_file):\n input_state = load.unpickle(input_state_file)\n if input_state['version'] < 10:\n input_state = None\n elif input_state['version'] > CACHE_STATE_VERSION:\n raise RuntimeError(\n \"You are trying to initialize an experiment with an older \"\n \"version of Eelbrain than that which wrote the cache. If \"\n \"you really need this, delete the eelbrain-cache folder \"\n \"and try again.\")\n else:\n input_state = None\n\n if input_state is None:\n input_state = {\n 'version': CACHE_STATE_VERSION,\n 'raw-mtimes': {},\n 'fwd-sessions': {s: {} for s in subjects},\n }\n\n # collect current events and mtime\n raw_mtimes = input_state['raw-mtimes']\n pipe = self._raw['raw']\n with self._temporary_state:\n for subject, visit, recording in self.iter(('subject', 'visit', 'recording'), group='all', raw='raw'):\n key = subject, recording\n mtime = pipe.mtime(subject, recording, bad_chs=False)\n if mtime is None:\n raw_missing.append(key)\n continue\n # events\n events[key] = self.load_events(add_bads=False, data_raw=False)\n if key not in raw_mtimes or mtime != raw_mtimes[key]:\n subjects_with_raw_changes.add((subject, visit))\n raw_mtimes[key] = mtime\n # log missing raw files\n if raw_missing:\n log.debug(\"Raw files missing:\")\n missing = defaultdict(list)\n for subject, recording in raw_missing:\n missing[subject].append(recording)\n for subject, recordings in missing.items():\n log.debug(f\" {subject}: {', '.join(recordings)}\")\n\n # check for digitizer data differences\n # ====================================\n # Coordinate frames:\n # MEG (markers) ==raw-file==> head shape ==trans-file==> MRI\n #\n # - raw files with identical head shapes can share trans-file (head-mri)\n # - raw files with identical MEG markers (and head shape) can share\n # forward solutions\n # - SuperEpochs currently need to have a single forward solution,\n # hence marker positions need to be the same between sub-epochs\n if subjects_with_raw_changes:\n log.info(\"Raw input files changed, checking digitizer data\")\n super_epochs = [epoch for epoch in self._epochs.values() if isinstance(epoch, SuperEpoch)]\n for subject, visit in subjects_with_raw_changes:\n # find unique digitizer datasets\n head_shape = None\n markers = [] # unique MEG marker measurements\n marker_ids = {} # {recording: index in markers}\n dig_missing = [] # raw files without dig\n for recording in self.iter('recording', subject=subject, visit=visit):\n if (subject, recording) in raw_missing:\n continue\n raw = self.load_raw(False)\n dig = raw.info['dig']\n if dig is None:\n dig_missing.append(recording)\n continue\n elif head_shape is None:\n head_shape = dig\n elif not hsp_equal(dig, head_shape):\n raise FileDeficient(f\"Raw file {recording} for {subject} has head shape that is different from {enumeration(marker_ids)}; consider defining different visits.\")\n\n # find if marker pos already exists\n for i, dig_i in enumerate(markers):\n if mrk_equal(dig, dig_i):\n marker_ids[recording] = i\n break\n else:\n marker_ids[recording] = len(markers)\n markers.append(dig)\n\n # checks for missing digitizer data\n if len(markers) > 1:\n if dig_missing:\n n = len(dig_missing)\n raise FileDeficient(f\"The raw {plural('file', n)} for {subject}, {plural('recording', n)} {enumeration(dig_missing)} {plural('is', n)} missing digitizer information\")\n for epoch in super_epochs:\n if len(set(marker_ids[s] for s in epoch.sessions)) > 1:\n groups = defaultdict(list)\n for s in epoch.sessions:\n groups[marker_ids[s]].append(s)\n group_desc = ' vs '.join('/'.join(group) for group in groups.values())\n raise NotImplementedError(f\"SuperEpoch {epoch.name} has sessions with incompatible marker positions ({group_desc}); SuperEpochs with different forward solutions are not implemented.\")\n\n # determine which sessions to use for forward solutions\n # -> {for_session: use_session}\n use_for_session = input_state['fwd-sessions'].setdefault(subject, {})\n # -> {marker_id: use_session}, initialize with previously used sessions\n use_for_id = {marker_ids[s]: s for s in use_for_session.values() if s in marker_ids}\n for recording in sorted(marker_ids):\n mrk_id = marker_ids[recording]\n if recording in use_for_session:\n assert mrk_id == marker_ids[use_for_session[recording]]\n continue\n elif mrk_id not in use_for_id:\n use_for_id[mrk_id] = recording\n use_for_session[recording] = use_for_id[mrk_id]\n # for files missing digitizer, use singe available fwd-recording\n for recording in dig_missing:\n if use_for_id:\n assert len(use_for_id) == 1\n use_for_session[recording] = use_for_id[0]\n\n # save input-state\n if not cache_dir_existed:\n os.makedirs(cache_dir, exist_ok=True)\n save.pickle(input_state, input_state_file)\n self._dig_sessions = pipe._dig_sessions = input_state['fwd-sessions'] # {subject: {for_recording: use_recording}}\n\n # Check the cache, delete invalid files\n # =====================================\n save_state = new_state = {\n 'version': CACHE_STATE_VERSION,\n 'raw': {k: v.as_dict() for k, v in self._raw.items()},\n 'groups': self._groups,\n 'epochs': {k: v.as_dict() for k, v in self._epochs.items()},\n 'tests': {k: v.as_dict() for k, v in self._tests.items()},\n 'parcs': {k: v.as_dict() for k, v in self._parcs.items()},\n 'events': events,\n }\n cache_state_path = join(cache_dir, 'cache-state.pickle')\n if exists(cache_state_path):\n # check time stamp\n # ================\n state_mtime = getmtime(cache_state_path)\n now = time.time() + IS_WINDOWS # Windows seems to have rounding issue\n if state_mtime > now:\n raise RuntimeError(f\"The cache's time stamp is in the future ({time.ctime(state_mtime)}). If the system time ({time.ctime(now)}) is wrong, adjust the system clock; if not, delete the eelbrain-cache folder.\")\n cache_state = load.unpickle(cache_state_path)\n cache_state_v = cache_state.setdefault('version', 0)\n if cache_state_v < CACHE_STATE_VERSION:\n log.debug(\"Updating cache-state %i -> %i\", cache_state_v, CACHE_STATE_VERSION)\n save_state = deepcopy(save_state)\n self._state_backwards_compat(cache_state_v, new_state, cache_state)\n elif cache_state_v > CACHE_STATE_VERSION:\n raise RuntimeError(f\"The cache is from a newer version of Eelbrain than you are currently using. Either upgrade Eelbrain or delete the cache folder.\")\n\n # Find modified definitions\n # =========================\n invalid_cache = self._check_cache(new_state, cache_state, root)\n\n # Collect invalid files\n # =====================\n if invalid_cache or cache_state_v < 2:\n rm = self._collect_invalid_files(invalid_cache, new_state, cache_state)\n\n # find actual files to delete\n log.debug(\"Outdated cache files:\")\n files = set()\n result_files = []\n for temp, arg_dicts in rm.items():\n for args in arg_dicts:\n pattern = self._glob_pattern(temp, True, vmatch=False, **args)\n filenames = glob(pattern)\n files.update(filenames)\n # log\n rel_pattern = relpath(pattern, root)\n rel_filenames = sorted(' ' + relpath(f, root) for f in filenames)\n log.debug(' >%s', rel_pattern)\n for filename in rel_filenames:\n log.debug(filename)\n # message to the screen unless log is already displayed\n if rel_pattern.startswith('results'):\n result_files.extend(rel_filenames)\n\n # handle invalid files\n n_result_files = len(result_files)\n if n_result_files and self.auto_delete_cache is True and not self.auto_delete_results:\n if self._screen_log_level > logging.DEBUG:\n msg = result_files[:]\n msg.insert(0, \"Outdated result files detected:\")\n else:\n msg = []\n msg.append(\"Delete %i outdated results?\" % (n_result_files,))\n command = ask(\n '\\n'.join(msg),\n options=(\n ('delete', 'delete invalid result files'),\n ('abort', 'raise an error')),\n help=CACHE_HELP.format(\n experiment=self.__class__.__name__,\n filetype='result'),\n )\n if command == 'abort':\n raise RuntimeError(\"User aborted invalid result deletion\")\n elif command != 'delete':\n raise RuntimeError(\"command=%r\" % (command,))\n\n if files:\n if self.auto_delete_cache is False:\n raise RuntimeError(\n \"Automatic cache management disabled. Either \"\n \"revert changes, or set e.auto_delete_cache=True\")\n elif isinstance(self.auto_delete_cache, str):\n if self.auto_delete_cache != 'debug':\n raise ValueError(f\"MneExperiment.auto_delete_cache={self.auto_delete_cache!r}\")\n command = ask(\n \"Outdated cache files. Choose 'delete' to proceed. \"\n \"WARNING: only choose 'ignore' or 'revalidate' if \"\n \"you know what you are doing.\",\n options=(\n ('delete', 'delete invalid files'),\n ('abort', 'raise an error'),\n ('ignore', 'proceed without doing anything'),\n ('revalidate', \"don't delete any cache files but write a new cache-state file\")),\n help=CACHE_HELP.format(\n experiment=self.__class__.__name__,\n filetype='cache and/or result'),\n )\n if command == 'delete':\n pass\n elif command == 'abort':\n raise RuntimeError(\"User aborted invalid cache deletion\")\n elif command == 'ignore':\n log.warning(\"Ignoring invalid cache\")\n return\n elif command == 'revalidate':\n log.warning(\"Revalidating invalid cache\")\n files.clear()\n else:\n raise RuntimeError(\"command=%s\" % repr(command))\n elif self.auto_delete_cache is not True:\n raise TypeError(f\"MneExperiment.auto_delete_cache={self.auto_delete_cache!r}\")\n\n # delete invalid files\n n_cache_files = len(files) - n_result_files\n descs = []\n if n_result_files:\n descs.append(\"%i invalid result files\" % n_result_files)\n if n_cache_files:\n descs.append(\"%i invalid cache files\" % n_cache_files)\n log.info(\"Deleting \" + (' and '.join(descs)) + '...')\n for path in files:\n os.remove(path)\n else:\n log.debug(\"No existing cache files affected.\")\n else:\n log.debug(\"Cache up to date.\")\n elif cache_dir_existed: # cache-dir but no history\n if self.auto_delete_cache is True:\n log.info(\"Deleting cache-dir without history\")\n shutil.rmtree(cache_dir)\n os.mkdir(cache_dir)\n elif self.auto_delete_cache == 'disable':\n log.warning(\"Ignoring cache-dir without history\")\n elif self.auto_delete_cache == 'debug':\n command = ask(\"Cache directory without history\",\n (('validate', 'write a history file treating cache as valid'),\n ('abort', 'raise an error')))\n if command == 'abort':\n raise RuntimeError(\"User aborted\")\n elif command == 'validate':\n log.warning(\"Validating cache-dir without history\")\n else:\n raise RuntimeError(\"command=%r\" % (command,))\n else:\n raise IOError(\"Cache directory without history, but auto_delete_cache is not True\")\n elif not exists(cache_dir):\n os.mkdir(cache_dir)\n\n save.pickle(save_state, cache_state_path)\n\n def _state_backwards_compat(self, cache_state_v, new_state, cache_state):\n \"Update state dicts for backwards-compatible comparison\"\n # epochs\n if cache_state_v < 3:\n # Epochs represented as dict up to Eelbrain 0.24\n new_state['epochs'] = {k: v.as_dict_24() for k, v in self._epochs.items()}\n for e in cache_state['epochs'].values():\n e.pop('base', None)\n if 'sel_epoch' in e:\n e.pop('n_cases', None)\n elif cache_state_v < 11:\n # remove samplingrate parameter\n new_state['epochs'] = {k: {ki: vi for ki, vi in v.items() if ki != 'samplingrate'} for k, v in new_state['epochs'].items()}\n\n # events did not include session\n if cache_state_v < 4:\n session = self._sessions[0]\n cache_state['events'] = {(subject, session): v for subject, v in cache_state['events'].items()}\n\n # raw pipeline\n if cache_state_v < 5:\n legacy_raw = assemble_pipeline(LEGACY_RAW, '', '', '', '', self._sessions, self._log)\n cache_state['raw'] = {k: v.as_dict() for k, v in legacy_raw.items()}\n\n # parcellations represented as dicts\n if cache_state_v < 6:\n for params in cache_state['parcs'].values():\n for key in ('morph_from_fsaverage', 'make'):\n if key in params:\n del params[key]\n\n # tests represented as dicts\n if cache_state_v < 7:\n for params in cache_state['tests'].values():\n if 'desc' in params:\n del params['desc']\n cache_state['tests'] = {k: v.as_dict() for k, v in assemble_tests(cache_state['tests']).items()}\n elif cache_state_v == 7: # 'kind' key missing\n for name, params in cache_state['tests'].items():\n if name in new_state['tests']:\n params['kind'] = new_state['tests'][name]['kind']\n if cache_state_v < 12: # 'vars' entry added to all\n for test, params in cache_state['tests'].items():\n if 'vars' in params:\n try:\n params['vars'] = Variables(params['vars'])\n except Exception as error:\n self._log.warning(\" Test %s: Defective vardef %r\", test, params['vars'])\n params['vars'] = None\n else:\n params['vars'] = None\n\n def _check_cache(self, new_state, cache_state, root):\n invalid_cache = defaultdict(set)\n # events (subject, recording): overall change in events\n # variables: event change restricted to certain variables\n # raw: preprocessing definition changed\n # groups: change in group members\n # epochs: change in epoch parameters\n # parcs: parc def change\n # tests: test def change\n\n # check events\n # 'events' -> number or timing of triggers (includes trigger_shift)\n # 'variables' -> only variable change\n for key, old_events in cache_state['events'].items():\n new_events = new_state['events'].get(key)\n if new_events is None:\n invalid_cache['events'].add(key)\n self._log.warning(\" raw file removed: %s\", '/'.join(key))\n elif new_events.n_cases != old_events.n_cases:\n invalid_cache['events'].add(key)\n self._log.warning(\" event length: %s %i->%i\", '/'.join(key), old_events.n_cases, new_events.n_cases)\n elif not np.all(new_events['i_start'] == old_events['i_start']):\n invalid_cache['events'].add(key)\n self._log.warning(\" trigger times changed: %s\", '/'.join(key))\n else:\n for var in old_events:\n if var == 'i_start':\n continue\n elif var not in new_events:\n invalid_cache['variables'].add(var)\n self._log.warning(\" var removed: %s (%s)\", var, '/'.join(key))\n continue\n old = old_events[var]\n new = new_events[var]\n if old.name != new.name:\n invalid_cache['variables'].add(var)\n self._log.warning(\" var name changed: %s (%s) %s->%s\", var, '/'.join(key), old.name, new.name)\n elif new.__class__ is not old.__class__:\n invalid_cache['variables'].add(var)\n self._log.warning(\" var type changed: %s (%s) %s->%s\", var, '/'.join(key), old.__class__, new.__class)\n elif not all_equal(old, new, True):\n invalid_cache['variables'].add(var)\n self._log.warning(\" var changed: %s (%s) %i values\", var, '/'.join(key), np.sum(new != old))\n\n # groups\n for group, members in cache_state['groups'].items():\n if group not in self._groups:\n invalid_cache['groups'].add(group)\n self._log.warning(\" Group removed: %s\", group)\n elif members != self._groups[group]:\n invalid_cache['groups'].add(group)\n log_list_change(self._log, \"Group\", group, members, self._groups[group])\n\n # raw\n changed, changed_ica = compare_pipelines(cache_state['raw'], new_state['raw'], self._log)\n if changed:\n invalid_cache['raw'].update(changed)\n for raw, status in changed_ica.items():\n filenames = self.glob('ica-file', raw=raw, subject='*', visit='*', match=False)\n if filenames:\n rel_paths = '\\n'.join(relpath(path, root) for path in filenames)\n print(f\"Outdated ICA files:\\n{rel_paths}\")\n ask_to_delete_ica_files(raw, status, filenames)\n\n # epochs\n for epoch, old_params in cache_state['epochs'].items():\n new_params = new_state['epochs'].get(epoch, None)\n if old_params != new_params:\n invalid_cache['epochs'].add(epoch)\n log_dict_change(self._log, 'Epoch', epoch, old_params, new_params)\n\n # parcs\n for parc, old_params in cache_state['parcs'].items():\n new_params = new_state['parcs'].get(parc, None)\n if old_params == new_params:\n continue\n elif new_params is None:\n # Don't automatically remove because they could be user-created\n continue\n new_parc = self._parcs[parc]\n if isinstance(new_parc, (FreeSurferParc, FSAverageParc)):\n # FreeSurferParc: Parcellations that are provided by the user\n # should not be automatically removed.\n # FSAverageParc: for other mrisubjects, the parcellation\n # should automatically update if the user changes the\n # fsaverage file.\n continue\n log_dict_change(self._log, \"Parc\", parc, old_params, new_params)\n invalid_cache['parcs'].add(parc)\n if any(p['kind'].endswith('seeded') for p in (new_params, old_params)):\n invalid_cache['parcs'].add(f'{parc}-?')\n invalid_cache['parcs'].add(f'{parc}-??')\n invalid_cache['parcs'].add(f'{parc}-???')\n\n # tests\n for test, old_params in cache_state['tests'].items():\n new_params = new_state['tests'].get(test, None)\n if old_params != new_params:\n invalid_cache['tests'].add(test)\n log_dict_change(self._log, \"Test\", test, old_params, new_params)\n\n # Secondary invalidations\n # ========================\n # changed events -> group result involving those subjects is also bad\n if 'events' in invalid_cache:\n subjects = {subject for subject, _ in invalid_cache['events']}\n for group, members in cache_state['groups'].items():\n if subjects.intersection(members):\n invalid_cache['groups'].add(group)\n\n # tests/epochs based on variables\n if 'variables' in invalid_cache:\n bad_vars = invalid_cache['variables']\n # tests using bad variable\n for test in cache_state['tests']:\n if test in invalid_cache['tests']:\n continue\n params = new_state['tests'][test]\n bad = bad_vars.intersection(find_test_vars(params))\n if bad:\n invalid_cache['tests'].add(test)\n self._log.debug(\" Test %s depends on changed variables %s\", test, ', '.join(bad))\n # epochs using bad variable\n epochs_vars = find_epochs_vars(cache_state['epochs'])\n for epoch, evars in epochs_vars.items():\n bad = bad_vars.intersection(evars)\n if bad:\n invalid_cache['epochs'].add(epoch)\n self._log.debug(\" Epoch %s depends on changed variables %s\", epoch, ', '.join(bad))\n\n # secondary epochs\n if 'epochs' in invalid_cache:\n for e in tuple(invalid_cache['epochs']):\n invalid_cache['epochs'].update(find_dependent_epochs(e, cache_state['epochs']))\n\n # epochs -> cov\n for cov, cov_params in self._covs.items():\n if cov_params.get('epoch') in invalid_cache['epochs']:\n invalid_cache['cov'].add(cov)\n\n return invalid_cache\n\n def _collect_invalid_files(self, invalid_cache, new_state, cache_state):\n rm = defaultdict(DictSet)\n\n # version\n if cache_state['version'] < 2:\n bad_parcs = []\n for parc, params in self._parcs.items():\n if params['kind'] == 'seeded':\n bad_parcs.append(parc + '-?')\n bad_parcs.append(parc + '-??')\n bad_parcs.append(parc + '-???')\n else:\n bad_parcs.append(parc)\n bad_tests = []\n for test, params in new_state['tests'].items():\n if params['kind'] == 'anova' and params['x'].count('*') > 1:\n bad_tests.append(test)\n if bad_tests and bad_parcs:\n self._log.warning(\" Invalid ANOVA tests: %s for %s\", bad_tests, bad_parcs)\n for test, parc in product(bad_tests, bad_parcs):\n rm['test-file'].add({'test': test, 'test_dims': parc})\n rm['report-file'].add({'test': test, 'folder': parc})\n\n # evoked files are based on old events\n for subject, recording in invalid_cache['events']:\n for epoch, params in self._epochs.items():\n if recording not in params.sessions:\n continue\n rm['evoked-file'].add({'subject': subject, 'epoch': epoch})\n\n # variables\n for var in invalid_cache['variables']:\n rm['evoked-file'].add({'model': f'*{var}*'})\n\n # groups\n for group in invalid_cache['groups']:\n rm['test-file'].add({'group': group})\n rm['group-mov-file'].add({'group': group})\n rm['report-file'].add({'group': group})\n\n # raw\n for raw in invalid_cache['raw']:\n rm['cached-raw-file'].add({'raw': raw})\n rm['evoked-file'].add({'raw': raw})\n rm['cov-file'].add({'raw': raw})\n analysis = {'analysis': f'{raw} *'}\n rm['test-file'].add(analysis)\n rm['report-file'].add(analysis)\n rm['group-mov-file'].add(analysis)\n rm['subject-mov-file'].add(analysis)\n\n # epochs\n for epoch in invalid_cache['epochs']:\n rm['evoked-file'].add({'epoch': epoch})\n rm['test-file'].add({'epoch': epoch})\n rm['report-file'].add({'epoch': epoch})\n rm['group-mov-file'].add({'epoch': epoch})\n rm['subject-mov-file'].add({'epoch': epoch})\n\n # cov\n for cov in invalid_cache['cov']:\n rm['cov-file'].add({'cov': cov})\n rm['inv-file'].add({'cov': cov})\n analysis = f'* {cov} *'\n rm['test-file'].add({'analysis': analysis})\n rm['report-file'].add({'analysis': analysis})\n rm['group-mov-file'].add({'analysis': analysis})\n rm['subject-mov-file'].add({'analysis': analysis})\n\n # parcs\n for parc in invalid_cache['parc']:\n rm['annot-file'].add({'parc': parc})\n rm['test-file'].add({'test_dims': parc})\n rm['test-file'].add({'test_dims': f'{parc}.*'})\n rm['report-file'].add({'folder': parc})\n rm['report-file'].add({'folder': f'{parc} *'})\n rm['report-file'].add({'folder': f'{parc.capitalize()} *'}) # pre 0.26\n rm['res-file'].add({'analysis': 'Source Annot',\n 'resname': f'{parc} * *',\n 'ext': 'p*'})\n\n # tests\n for test in invalid_cache['tests']:\n rm['test-file'].add({'test': test})\n rm['report-file'].add({'test': test})\n\n if not self.cache_inv:\n rm['inv-file'].add({})\n\n # secondary cache files\n for temp in tuple(rm):\n for stemp in self._secondary_cache[temp]:\n rm[stemp].update(rm[temp])\n\n return rm\n\n def _subclass_init(self):\n \"Allow subclass to register experimental features\"\n\n def __iter__(self):\n \"Iterate state through subjects and yield each subject name.\"\n for subject in self.iter():\n yield subject\n\n # mtime methods\n # -------------\n # _mtime() functions return the time at which any input files affecting the\n # given file changed, and None if inputs are missing. They don't check\n # whether the file actually exists (usually there is no need to recompute an\n # intermediate file if it is not needed).\n # _file_mtime() functions directly return the file's mtime, or None if it\n # does not exists or is outdated\n def _annot_file_mtime(self, make_for=None):\n \"\"\"Return max mtime of annot files or None if they do not exist.\n\n Can be user input, so we need to check the actual file.\n \"\"\"\n if make_for:\n with self._temporary_state:\n self.make_annot(mrisubject=make_for)\n return self._annot_file_mtime()\n\n mtime = 0\n for _ in self.iter('hemi'):\n fpath = self.get('annot-file')\n if exists(fpath):\n mtime = max(mtime, getmtime(fpath))\n else:\n return\n return mtime\n\n def _cov_mtime(self):\n params = self._covs[self.get('cov')]\n with self._temporary_state:\n if 'epoch' in params:\n self.set(epoch=params['epoch'])\n return self._epochs_mtime()\n else:\n self.set(session=params['session'])\n return self._raw_mtime()\n\n def _epochs_mtime(self):\n raw_mtime = self._raw_mtime()\n if raw_mtime:\n epoch = self._epochs[self.get('epoch')]\n rej_mtime = self._rej_mtime(epoch)\n if rej_mtime:\n return max(raw_mtime, rej_mtime)\n\n def _epochs_stc_mtime(self):\n \"Mtime affecting source estimates; does not check annot\"\n epochs_mtime = self._epochs_mtime()\n if epochs_mtime:\n inv_mtime = self._inv_mtime()\n if inv_mtime:\n return max(epochs_mtime, inv_mtime)\n\n def _evoked_mtime(self):\n return self._epochs_mtime()\n\n def _evoked_stc_mtime(self):\n \"Mtime if up-to-date, else None; do not check annot\"\n evoked_mtime = self._evoked_mtime()\n if evoked_mtime:\n inv_mtime = self._inv_mtime()\n if inv_mtime:\n return max(evoked_mtime, inv_mtime)\n\n def _fwd_mtime(self, subject=None, recording=None, fwd_recording=None):\n \"The last time at which input files affecting fwd-file changed\"\n trans = self.get('trans-file')\n if exists(trans):\n src = self.get('src-file')\n if exists(src):\n if fwd_recording is None:\n fwd_recording = self._get_fwd_recording(subject, recording)\n raw_mtime = self._raw_mtime('raw', False, subject, fwd_recording)\n if raw_mtime:\n trans_mtime = getmtime(trans)\n src_mtime = getmtime(src)\n return max(raw_mtime, trans_mtime, src_mtime)\n\n def _inv_mtime(self, fwd_recording=None):\n fwd_mtime = self._fwd_mtime(fwd_recording=fwd_recording)\n if fwd_mtime:\n cov_mtime = self._cov_mtime()\n if cov_mtime:\n return max(cov_mtime, fwd_mtime)\n\n def _raw_mtime(self, raw=None, bad_chs=True, subject=None, recording=None):\n if raw is None:\n raw = self.get('raw')\n elif raw not in self._raw:\n raise RuntimeError(f\"raw-mtime with raw={raw!r}\")\n pipe = self._raw[raw]\n if subject is None:\n subject = self.get('subject')\n if recording is None:\n recording = self.get('recording')\n return pipe.mtime(subject, recording, bad_chs)\n\n def _rej_mtime(self, epoch):\n \"\"\"rej-file mtime for secondary epoch definition\n\n Parameters\n ----------\n epoch : dict\n Epoch definition.\n \"\"\"\n rej = self._artifact_rejection[self.get('rej')]\n if rej['kind'] is None:\n return 1 # no rejection\n with self._temporary_state:\n paths = [self.get('rej-file', epoch=e) for e in epoch.rej_file_epochs]\n if all(exists(path) for path in paths):\n mtime = max(getmtime(path) for path in paths)\n return mtime\n\n def _result_file_mtime(self, dst, data, single_subject=False):\n \"\"\"MTime if up-to-date, else None (for reports and movies)\n\n Parameters\n ----------\n dst : str\n Filename.\n data : TestDims\n Data type.\n single_subject : bool\n Whether the corresponding test is performed for a single subject\n (as opposed to the current group).\n \"\"\"\n if exists(dst):\n mtime = self._result_mtime(data, single_subject)\n if mtime:\n dst_mtime = getmtime(dst)\n if dst_mtime > mtime:\n return dst_mtime\n\n def _result_mtime(self, data, single_subject):\n \"See ._result_file_mtime() above\"\n if data.source:\n if data.parc_level:\n if single_subject:\n out = self._annot_file_mtime(self.get('mrisubject'))\n elif data.parc_level == 'common':\n out = self._annot_file_mtime(self.get('common_brain'))\n elif data.parc_level == 'individual':\n out = 0\n for _ in self:\n mtime = self._annot_file_mtime()\n if mtime is None:\n return\n else:\n out = max(out, mtime)\n else:\n raise RuntimeError(f\"data={data.string!r}, parc_level={data.parc_level!r}\")\n else:\n out = 1\n\n if not out:\n return\n mtime_func = self._epochs_stc_mtime\n else:\n out = 1\n mtime_func = self._epochs_mtime\n\n if single_subject:\n mtime_iterator = (mtime_func(),)\n else:\n mtime_iterator = (mtime_func() for _ in self)\n\n for mtime in mtime_iterator:\n if not mtime:\n return\n out = max(out, mtime)\n return out\n\n def _process_subject_arg(self, subjects, kwargs):\n \"\"\"Process subject arg for methods that work on groups and subjects\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n kwargs : dict\n Additional state parameters to set.\n\n Returns\n -------\n subject : None | str\n Subject name if the value specifies a subject, None otherwise.\n group : None | str\n Group name if the value specifies a group, None otherwise.\n \"\"\"\n if subjects is None: # default:\n subjects = -1 if 'group' in kwargs else 1\n elif subjects is True: # legacy value:\n subjects = -1\n\n if isinstance(subjects, int):\n if subjects == 1:\n return self.get('subject', **kwargs), None\n elif subjects == -1:\n return None, self.get('group', **kwargs)\n else:\n raise ValueError(f\"subjects={subjects}\")\n elif isinstance(subjects, str):\n if subjects in self.get_field_values('group'):\n if 'group' in kwargs:\n if kwargs['group'] != subjects:\n raise ValueError(f\"group={kwargs['group']!r} inconsistent with subject={subjects!r}\")\n self.set(**kwargs)\n else:\n self.set(group=subjects, **kwargs)\n return None, subjects\n else:\n return self.get('subject', subject=subjects, **kwargs), None\n else:\n raise TypeError(f\"subjects={subjects!r}\")\n\n def _cluster_criteria_kwargs(self, data):\n criteria = self._cluster_criteria[self.get('select_clusters')]\n return {'min' + dim: criteria[dim] for dim in data.dims if dim in criteria}\n\n def _add_vars(self, ds, vardef: Union[None, str, Variables], groupvars=False):\n \"\"\"Add vars to the dataset\n\n Parameters\n ----------\n ds : Dataset\n Event dataset.\n vardef : dict | tuple\n Variable definition.\n groupvars : bool\n Apply GroupVars in ``self.variables`` (when adding variables to a\n dataset that does not originate from events, such as TRFs).\n \"\"\"\n if groupvars:\n self._variables.apply(ds, self, group_only=True)\n\n if vardef is None:\n return\n elif isinstance(vardef, str):\n try:\n vardef = self._tests[vardef].vars\n except KeyError:\n raise ValueError(f\"vardef={vardef!r}\")\n elif not isinstance(vardef, Variables):\n vardef = Variables(vardef)\n vardef.apply(ds, self)\n\n def _backup(self, dst_root, v=False):\n \"\"\"Backup all essential files to ``dst_root``.\n\n .. warning::\n Method is out of data and probably does not work as expected.\n\n Parameters\n ----------\n dst_root : str\n Directory to use as root for the backup.\n v : bool\n Verbose mode: list all files that will be copied and ask for\n confirmation.\n\n Notes\n -----\n For repeated backups ``dst_root`` can be the same. If a file has been\n previously backed up, it is only copied if the local copy has been\n modified more recently than the previous backup. If the backup has been\n modified more recently than the local copy, a warning is displayed.\n\n Currently, the following files are included in the backup::\n\n * All rejection files\n * The trans-file\n * All files in the ``meg/{subject}/logs`` directory\n * For scaled MRIs, the file specifying the scale parameters\n\n MRIs are currently not backed up.\n \"\"\"\n self._log.debug(\"Initiating backup to %s\" % dst_root)\n root = self.get('root')\n root_len = len(root) + 1\n\n dirs = [] # directories to create\n pairs = [] # (src, dst) pairs to copy\n for temp, state_mod in self._backup_files:\n # determine state\n if state_mod:\n state = self._backup_state.copy()\n state.update(state_mod)\n else:\n state = self._backup_state\n\n # find files to back up\n if temp.endswith('dir'):\n paths = []\n for dirpath in self.glob(temp, **state):\n for root_, _, filenames in os.walk(dirpath):\n paths.extend(join(root_, fn) for fn in filenames)\n else:\n paths = self.glob(temp, **state)\n\n # convert to (src, dst) pairs\n for src in paths:\n if not src.startswith(root):\n raise ValueError(\"Can only backup files in root directory\")\n tail = src[root_len:]\n dst = join(dst_root, tail)\n if exists(dst):\n src_m = getmtime(src)\n dst_m = getmtime(dst)\n if dst_m == src_m:\n continue\n elif dst_m > src_m:\n self._log.warning(\"Backup more recent than original: %s\", tail)\n continue\n else:\n i = 0\n while True:\n i = tail.find(os.sep, i + 1)\n if i == -1:\n break\n path = tail[:i]\n if path not in dirs:\n dirs.append(path)\n\n pairs.append((src, dst))\n\n if len(pairs) == 0:\n if v:\n print(\"All files backed up.\")\n else:\n self._log.info(\"All files backed up.\")\n return\n\n # verbose file list\n if v:\n paths = [relpath(src, root) for src, _ in pairs]\n print('\\n'.join(paths))\n cmd = 'x'\n while cmd not in 'yn':\n cmd = input(\"Proceed ([y]/n)? \")\n if cmd == 'n':\n print(\"Abort.\")\n return\n else:\n print(\"Backing up %i files ...\" % len(pairs))\n\n self._log.info(\"Backing up %i files ...\" % len(pairs))\n # create directories\n for dirname in dirs:\n dirpath = join(dst_root, dirname)\n if not exists(dirpath):\n os.mkdir(dirpath)\n # copy files\n for src, dst in pairs:\n shutil.copy2(src, dst)\n\n def clear_cache(self, level=1):\n \"\"\"Remove cached files.\n\n Parameters\n ----------\n level : int\n Level up to which to clear the cache (see notes below). The default\n is 1, which deletes all cached files.\n\n Notes\n -----\n Each lower level subsumes the higher levels:\n\n ``1``\n Delete all cached files.\n ``2``\n Epoched files - these need to be cleared when anything about the\n epoch definition changes (tmin, tmax, event inclusion, ...). Note\n that you might also have to manually update epoch rejection files\n with the :meth:`MneExperiment.make_epoch_selection` method.\n ``5``\n tests - these need to be cleared when the members of the relevant\n subject groups change.\n\n Examples\n --------\n To delete only test files, after adding raw data for a new subject to\n the experiment::\n\n >>> e.clear_cache(5)\n\n To delete cached data files after changing the selection criteria for\n a secondary epoch::\n\n >>> e.clear_cache(2)\n\n If criteria on a primary epoch are changed, the trial rejection has to\n be re-done in addition to clearing the cache.\n\n To delete all cached files and clear up hard drive space::\n\n >>> e.clear_cache(1)\n \"\"\"\n if level <= 1:\n self.rm('cache-dir', confirm=True)\n print(\"All cached data cleared.\")\n else:\n if level <= 2:\n self.rm('evoked-dir', confirm=True)\n self.rm('cov-dir', confirm=True)\n print(\"Cached epoch data cleared\")\n if level <= 5:\n self.rm('test-dir', confirm=True)\n print(\"Cached tests cleared.\")\n\n def get_field_values(self, field, exclude=(), **state):\n \"\"\"Find values for a field taking into account exclusion\n\n Parameters\n ----------\n field : str\n Field for which to find values.\n exclude : list of str\n Exclude these values.\n ...\n State parameters.\n \"\"\"\n if state:\n self.set(**state)\n if isinstance(exclude, str):\n exclude = (exclude,)\n\n if field == 'mrisubject':\n subjects = FileTree.get_field_values(self, 'subject')\n mri_subjects = self._mri_subjects[self.get('mri')]\n mrisubjects = sorted(mri_subjects[s] for s in subjects)\n if exclude:\n mrisubjects = [s for s in mrisubjects if s not in exclude]\n common_brain = self.get('common_brain')\n if common_brain and (not exclude or common_brain not in exclude):\n mrisubjects.insert(0, common_brain)\n return mrisubjects\n else:\n return FileTree.get_field_values(self, field, exclude)\n\n def _get_fwd_recording(self, subject: str = None, recording: str = None) -> str:\n if subject is None:\n subject = self.get('subject')\n if recording is None:\n recording = self.get('recording')\n try:\n return self._dig_sessions[subject][recording]\n except KeyError:\n raise FileMissing(f\"Raw data missing for {subject}, session {recording}\")\n\n def iter(self, fields='subject', exclude=None, values=None, group=None, progress_bar=None, **kwargs):\n \"\"\"\n Cycle the experiment's state through all values on the given fields\n\n Parameters\n ----------\n fields : sequence | str\n Field(s) over which should be iterated.\n exclude : dict {str: iterator over str}\n Exclude values from iteration (``{field: values_to_exclude}``).\n values : dict {str: iterator over str}\n Fields with custom values to iterate over (instead of the\n corresponding field values) with {name: (sequence of values)}\n entries.\n group : None | str\n If iterating over subjects, use this group ('all' for all except\n excluded subjects, 'all!' for all including excluded subjects, or\n a name defined in experiment.groups).\n progress_bar : str\n Message to show in the progress bar.\n ...\n Fields with constant values throughout the iteration.\n \"\"\"\n if group is not None:\n kwargs['group'] = group\n return FileTree.iter(self, fields, exclude, values, progress_bar, **kwargs)\n\n def iter_range(self, start=None, stop=None, field='subject'):\n \"\"\"Iterate through a range on a field with ordered values.\n\n Parameters\n ----------\n start : None | str\n Start value (inclusive). With ``None``, begin at the first value.\n stop : None | str\n Stop value (inclusive). With ``None``, end with the last value.\n field : str\n Name of the field.\n\n Returns\n -------\n iterator over value : str\n Current field value.\n \"\"\"\n values = self.get_field_values(field)\n if start is not None:\n start = values.index(start)\n if stop is not None:\n stop = values.index(stop) + 1\n values = values[start:stop]\n\n with self._temporary_state:\n for value in values:\n self._restore_state(discard_tip=False)\n self.set(**{field: value})\n yield value\n\n def _label_events(self, ds):\n # add standard variables\n ds['T'] = ds['i_start'] / ds.info['sfreq']\n ds['SOA'] = ds['T'].diff(0)\n ds['subject'] = Factor([ds.info['subject']], repeat=ds.n_cases, random=True)\n if len(self._sessions) > 1:\n ds[:, 'session'] = ds.info['session']\n if len(self._visits) > 1:\n ds[:, 'visit'] = ds.info['visit']\n self._variables.apply(ds, self)\n\n # subclass label_events\n info = ds.info\n ds = self.label_events(ds)\n if not isinstance(ds, Dataset):\n raise DefinitionError(f\"{self.__class__.__name__}.label_events() needs to return the events Dataset. Got {ds!r}.\")\n elif 'i_start' not in ds:\n raise DefinitionError(f\"The Dataset returned by {self.__class__.__name__}.label_events() does not contain a variable called `i_start`. This variable is required to ascribe events to data samples.\")\n elif 'trigger' not in ds:\n raise DefinitionError(f\"The Dataset returned by {self.__class__.__name__}.label_events() does not contain a variable called `trigger`. This variable is required to check rejection files.\")\n elif ds.info is not info:\n ds.info.update(info)\n return ds\n\n def label_events(self, ds):\n \"\"\"Add event labels to events loaded from raw files\n\n Parameters\n ----------\n ds : Dataset\n A Dataset containing events (with variables as returned by\n :func:`load.fiff.events`).\n\n Notes\n -----\n Override this method in MneExperiment subclasses to add event labels.\n The session that the events are from can be determined with\n ``ds.info['session']``.\n Calling the original (super-class) method is not necessary.\n \"\"\"\n return ds\n\n def label_subjects(self, ds):\n \"\"\"Label the subjects in ds\n\n Creates a boolean :class:`Var` in ``ds`` for each group marking group\n membership.\n\n Parameters\n ----------\n ds : Dataset\n A Dataset with 'subject' entry.\n \"\"\"\n subject = ds['subject']\n for name, subjects in self._groups.items():\n ds[name] = Var(subject.isin(subjects))\n\n def label_groups(self, subject, groups):\n \"\"\"Generate Factor for group membership\n\n Parameters\n ----------\n subject : Factor\n A Factor with subjects.\n groups : list of str | {str: str} dict\n Groups which to label (raises an error if group membership is not\n unique). To use labels other than the group names themselves, use\n a ``{group: label}`` dict.\n\n Returns\n -------\n group : Factor\n A :class:`Factor` that labels the group for each subject.\n \"\"\"\n if not isinstance(groups, dict):\n groups = {g: g for g in groups}\n labels = {s: [l for g, l in groups.items() if s in self._groups[g]] for s in subject.cells}\n problems = [s for s, g in labels.items() if len(g) != 1]\n if problems:\n desc = (', '.join(labels[s]) if labels[s] else 'no group' for s in problems)\n msg = ', '.join('%s (%s)' % pair for pair in zip(problems, desc))\n raise ValueError(f\"Groups {groups} are not unique for subjects: {msg}\")\n labels = {s: g[0] for s, g in labels.items()}\n return Factor(subject, labels=labels)\n\n def load_annot(self, **state):\n \"\"\"Load a parcellation (from an annot file)\n\n Returns\n -------\n labels : list of Label\n Labels in the parcellation (output of\n :func:`mne.read_labels_from_annot`).\n ...\n State parameters.\n \"\"\"\n self.make_annot(**state)\n return mne.read_labels_from_annot(self.get('mrisubject'),\n self.get('parc'), 'both',\n subjects_dir=self.get('mri-sdir'))\n\n def load_bad_channels(self, **kwargs):\n \"\"\"Load bad channels\n \n Parameters\n ----------\n ...\n State parameters.\n\n Returns\n -------\n bad_chs : list of str\n Bad chnnels.\n \"\"\"\n pipe = self._raw[self.get('raw', **kwargs)]\n return pipe.load_bad_channels(self.get('subject'), self.get('recording'))\n\n def _load_bem(self):\n subject = self.get('mrisubject')\n if subject == 'fsaverage' or is_fake_mri(self.get('mri-dir')):\n return mne.read_bem_surfaces(self.get('bem-file'))\n else:\n bem_dir = self.get('bem-dir')\n surfs = ('inner_skull', 'outer_skull', 'outer_skin')\n paths = {s: join(bem_dir, s + '.surf') for s in surfs}\n missing = [s for s in surfs if not exists(paths[s])]\n if missing:\n bem_dir = self.get('bem-dir')\n temp = join(\".*\", \"bem\", \"(.*)\")\n for surf in missing[:]:\n path = paths[surf]\n if os.path.islink(path):\n # try to fix broken symlinks\n old_target = os.readlink(path)\n m = re.match(temp, old_target)\n if m:\n new_target = m.group(1)\n if exists(join(bem_dir, new_target)):\n self._log.info(\"Fixing broken symlink for %s \"\n \"%s surface file\", subject, surf)\n os.unlink(path)\n os.symlink(new_target, path)\n missing.remove(surf)\n # continue\n # self._log.info(\"Deleting broken symlink \" + path)\n # os.unlink(path)\n if missing:\n self._log.info(\"%s %s missing for %s. Running \"\n \"mne.make_watershed_bem()...\",\n enumeration(missing).capitalize(),\n plural('surface', len(missing)), subject)\n # re-run watershed_bem\n # mne-python expects the environment variable\n os.environ['FREESURFER_HOME'] = subp.get_fs_home()\n mne.bem.make_watershed_bem(subject, self.get('mri-sdir'),\n overwrite=True)\n\n return mne.make_bem_model(subject, conductivity=(0.3,),\n subjects_dir=self.get('mri-sdir'))\n\n def load_cov(self, **kwargs):\n \"\"\"Load the covariance matrix\n\n Parameters\n ----------\n ...\n State parameters.\n \"\"\"\n return mne.read_cov(self.get('cov-file', make=True, **kwargs))\n\n def load_edf(self, **kwargs):\n \"\"\"Load the edf file (\"edf-file\" template)\n \n Parameters\n ----------\n ...\n State parameters.\n \"\"\"\n path = self.get('edf-file', fmatch=False, **kwargs)\n return load.eyelink.Edf(path)\n\n def load_epochs(self, subjects=None, baseline=False, ndvar=True,\n add_bads=True, reject=True, cat=None,\n decim=None, pad=0, data_raw=False, vardef=None, data='sensor',\n trigger_shift=True, tmin=None,\n tmax=None, tstop=None, interpolate_bads=False, **kwargs):\n \"\"\"\n Load a Dataset with epochs for a given epoch definition\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n baseline : bool | tuple\n Apply baseline correction using this period. True to use the\n epoch's baseline specification. The default is to not apply baseline\n correction.\n ndvar : bool | 'both'\n Convert epochs to an NDVar (named 'meg' for MEG data and 'eeg' for\n EEG data). Use 'both' to include NDVar and MNE Epochs.\n add_bads : bool | list\n Add bad channel information to the Raw. If True, bad channel\n information is retrieved from the bad channels file. Alternatively,\n a list of bad channels can be specified.\n reject : bool | 'keep'\n Reject bad trials. If ``True`` (default), bad trials are removed\n from the Dataset. Set to ``False`` to ignore the trial rejection.\n Set ``reject='keep'`` to load the rejection (added it to the events\n as ``'accept'`` variable), but keep bad trails.\n cat : sequence of cell-names\n Only load data for these cells (cells of model).\n decim : int\n Data decimation factor (the default is the factor specified in the\n epoch definition).\n pad : scalar\n Pad the epochs with this much time (in seconds; e.g. for spectral\n analysis).\n data_raw : bool\n Keep the :class:`mne.io.Raw` instance in ``ds.info['raw']``\n (default False).\n vardef : str\n Name of a test defining additional variables.\n data : str\n Data to load; 'sensor' to load all sensor data (default);\n 'sensor.rms' to return RMS over sensors. Only applies to NDVar\n output.\n trigger_shift : bool\n Apply post-baseline trigger-shift if it applies to the epoch\n (default True).\n tmin : scalar\n Override the epoch's ``tmin`` parameter.\n tmax : scalar\n Override the epoch's ``tmax`` parameter.\n tstop : scalar\n Override the epoch's ``tmax`` parameter as exclusive ``tstop``.\n interpolate_bads : bool\n Interpolate channels marked as bad for the whole recording (useful\n when comparing topographies across subjects; default False).\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-raw`: preprocessing pipeline\n - :ref:`state-epoch`: which events to use and time window\n - :ref:`state-rej`: which trials to use\n\n \"\"\"\n data = TestDims.coerce(data)\n if not data.sensor:\n raise ValueError(f\"data={data.string!r}; load_evoked is for loading sensor data\")\n elif data.sensor is not True:\n if not ndvar:\n raise ValueError(f\"data={data.string!r} with ndvar=False\")\n elif interpolate_bads:\n raise ValueError(f\"interpolate_bads={interpolate_bads!r} with data={data.string}\")\n if ndvar:\n if isinstance(ndvar, str):\n if ndvar != 'both':\n raise ValueError(\"ndvar=%s\" % repr(ndvar))\n subject, group = self._process_subject_arg(subjects, kwargs)\n epoch_name = self.get('epoch')\n\n if group is not None:\n dss = []\n for _ in self.iter(group=group, progress=f\"Load {epoch_name}\"):\n ds = self.load_epochs(None, baseline, ndvar, add_bads, reject, cat, decim, pad, data_raw, vardef, data, True, tmin, tmax, tstop, interpolate_bads)\n dss.append(ds)\n\n return combine(dss)\n\n # single subject\n epoch = self._epochs[epoch_name]\n if isinstance(epoch, EpochCollection):\n dss = []\n with self._temporary_state:\n for sub_epoch in epoch.collect:\n ds = self.load_epochs(subject, baseline, ndvar, add_bads, reject, cat, decim, pad, data_raw, vardef, data, trigger_shift, tmin, tmax, tstop, interpolate_bads, epoch=sub_epoch)\n ds[:, 'epoch'] = sub_epoch\n dss.append(ds)\n return combine(dss)\n\n if isinstance(add_bads, str):\n if add_bads == 'info':\n add_bads_to_info = True\n add_bads = True\n else:\n raise ValueError(f\"add_bads={add_bads!r}\")\n else:\n add_bads_to_info = False\n\n with self._temporary_state:\n ds = self.load_selected_events(add_bads=add_bads, reject=reject, data_raw=True, vardef=vardef, cat=cat)\n if ds.n_cases == 0:\n err = f\"No events left for epoch={epoch.name!r}, subject={subject!r}\"\n if cat:\n err += f\", cat={cat!r}\"\n raise RuntimeError(err)\n\n # load sensor space data\n if tmin is None:\n tmin = epoch.tmin\n if tmax is None and tstop is None:\n tmax = epoch.tmax\n if baseline is True:\n baseline = epoch.baseline\n if pad:\n tmin -= pad\n tmax += pad\n decim = decim_param(epoch, decim, ds.info['raw'].info)\n\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'The events passed to the Epochs constructor', RuntimeWarning)\n ds = load.fiff.add_mne_epochs(ds, tmin, tmax, baseline, decim=decim, drop_bad_chs=False, tstop=tstop)\n\n # post baseline-correction trigger shift\n if trigger_shift and epoch.post_baseline_trigger_shift:\n ds['epochs'] = shift_mne_epoch_trigger(ds['epochs'],\n ds[epoch.post_baseline_trigger_shift],\n epoch.post_baseline_trigger_shift_min,\n epoch.post_baseline_trigger_shift_max)\n info = ds['epochs'].info\n data_to_ndvar = data.data_to_ndvar(info)\n\n # determine channels to interpolate\n bads_all = None\n bads_individual = None\n if interpolate_bads:\n bads_all = info['bads']\n if ds.info[INTERPOLATE_CHANNELS] and any(ds[INTERPOLATE_CHANNELS]):\n bads_individual = ds[INTERPOLATE_CHANNELS]\n if bads_all:\n bads_all = set(bads_all)\n bads_individual = [sorted(bads_all.union(bads)) for bads in bads_individual]\n\n # interpolate bad channels\n if bads_all:\n if isinstance(interpolate_bads, str):\n if interpolate_bads == 'keep':\n reset_bads = False\n else:\n raise ValueError(f\"interpolate_bads={interpolate_bads}\")\n else:\n reset_bads = True\n ds['epochs'].interpolate_bads(reset_bads=reset_bads)\n\n # interpolate channels\n if reject and bads_individual:\n if 'mag' in data_to_ndvar:\n interp_path = self.get('interp-file')\n if exists(interp_path):\n interp_cache = load.unpickle(interp_path)\n else:\n interp_cache = {}\n n_in_cache = len(interp_cache)\n _interpolate_bads_meg(ds['epochs'], bads_individual, interp_cache)\n if len(interp_cache) > n_in_cache:\n save.pickle(interp_cache, interp_path)\n if 'eeg' in data_to_ndvar:\n _interpolate_bads_eeg(ds['epochs'], bads_individual)\n\n if ndvar:\n pipe = self._raw[self.get('raw')]\n exclude = () if add_bads_to_info else 'bads'\n for data_kind in data_to_ndvar:\n sysname = pipe.get_sysname(info, ds.info['subject'], data_kind)\n connectivity = pipe.get_connectivity(data_kind)\n name = 'meg' if data_kind == 'mag' else data_kind\n ds[name] = load.fiff.epochs_ndvar(ds['epochs'], data=data_kind, sysname=sysname, connectivity=connectivity, exclude=exclude)\n if add_bads_to_info:\n ds[name].info[BAD_CHANNELS] = ds['epochs'].info['bads']\n if isinstance(data.sensor, str):\n ds[name] = getattr(ds[name], data.sensor)('sensor')\n\n if ndvar != 'both':\n del ds['epochs']\n\n if not data_raw:\n del ds.info['raw']\n\n return ds\n\n def load_epochs_stc(self, subjects=None, baseline=True, src_baseline=False, cat=None, keep_epochs=False, morph=False, mask=False, data_raw=False, vardef=None, decim=None, ndvar=True, reject=True, **state):\n \"\"\"Load a Dataset with stcs for single epochs\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n Warning: loading single trial data for multiple subjects at once\n uses a lot of memory, which can lead to a periodically unresponsive\n terminal).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification (default).\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n cat : sequence of cell-names\n Only load data for these cells (cells of model).\n keep_epochs : bool | 'ndvar' | 'both'\n Keep the sensor space data in the Dataset that is returned (default\n False; True to keep :class:`mne.Epochs` object; ``'ndvar'`` to keep\n :class:`NDVar`; ``'both'`` to keep both).\n morph : bool\n Morph the source estimates to the common_brain (default False).\n mask : bool | str\n Discard data that is labelled 'unknown' by the parcellation (only\n applies to NDVars, default False).\n data_raw : bool\n Keep the :class:`mne.io.Raw` instance in ``ds.info['raw']``\n (default False).\n vardef : str\n Name of a test defining additional variables.\n decim : int\n Override the epoch decim factor.\n ndvar : bool\n Add the source estimates as :class:`NDVar` named \"src\" instead of a list of\n :class:`mne.SourceEstimate` objects named \"stc\" (default True).\n reject : bool | 'keep'\n Reject bad trials. If ``True`` (default), bad trials are removed\n from the Dataset. Set to ``False`` to ignore the trial rejection.\n Set ``reject='keep'`` to load the rejection (added it to the events\n as ``'accept'`` variable), but keep bad trails.\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-raw`: preprocessing pipeline\n - :ref:`state-epoch`: which events to use and time window\n - :ref:`state-rej`: which trials to use\n - :ref:`state-cov`: covariance matrix for inverse solution\n - :ref:`state-src`: source space\n - :ref:`state-inv`: inverse solution\n\n Returns\n -------\n epochs_dataset : Dataset\n Dataset containing single trial data (epochs).\n \"\"\"\n epoch_name = self.get('epoch')\n epoch = self._epochs[epoch_name]\n if not baseline and src_baseline and epoch.post_baseline_trigger_shift:\n raise NotImplementedError(\"src_baseline with post_baseline_trigger_shift\")\n subject, group = self._process_subject_arg(subjects, state)\n if group is not None:\n if data_raw:\n raise ValueError(f\"data_raw={data_raw!r} with group: Can not combine raw data from multiple subjects.\")\n elif keep_epochs:\n raise ValueError(f\"keep_epochs={keep_epochs!r} with group: Can not combine Epochs objects for different subjects. Set keep_epochs=False (default).\")\n elif not morph:\n raise ValueError(f\"morph={morph!r} with group: Source estimates can only be combined after morphing data to common brain model. Set morph=True.\")\n dss = []\n for _ in self.iter(group=group, progress_bar=f\"Load {epoch_name} STC\"):\n ds = self.load_epochs_stc(None, baseline, src_baseline, cat, keep_epochs, morph, mask, False, vardef, decim, ndvar, reject)\n dss.append(ds)\n return combine(dss)\n\n if keep_epochs is True:\n sns_ndvar = False\n del_epochs = False\n elif keep_epochs is False:\n sns_ndvar = False\n del_epochs = True\n elif keep_epochs == 'ndvar':\n sns_ndvar = 'both'\n del_epochs = True\n elif keep_epochs == 'both':\n sns_ndvar = 'both'\n del_epochs = False\n else:\n raise ValueError(f'keep_epochs={keep_epochs!r}')\n\n ds = self.load_epochs(subject, baseline, sns_ndvar, reject=reject, cat=cat, decim=decim, data_raw=data_raw, vardef=vardef)\n\n # load inv\n if src_baseline is True:\n src_baseline = epoch.baseline\n parc = self.get('parc') or None\n if isinstance(mask, str) and parc != mask:\n parc = mask\n self.set(parc=mask)\n # make sure annotation exists\n if parc:\n self.make_annot()\n epochs = ds['epochs']\n inv = self.load_inv(epochs)\n\n # determine whether initial source-space can be restricted\n mri_sdir = self.get('mri-sdir')\n mrisubject = self.get('mrisubject')\n is_scaled = find_source_subject(mrisubject, mri_sdir)\n if mask and (is_scaled or not morph):\n label = label_from_annot(inv['src'], mrisubject, mri_sdir, parc)\n else:\n label = None\n stc = apply_inverse_epochs(epochs, inv, label=label, **self._params['apply_inv_kw'])\n\n if ndvar:\n src = self.get('src')\n src = load.fiff.stc_ndvar(\n stc, mrisubject, src, mri_sdir, self._params['apply_inv_kw']['method'],\n self._params['make_inv_kw'].get('fixed', False), parc=parc,\n connectivity=self.get('connectivity'))\n if src_baseline:\n src -= src.summary(time=src_baseline)\n\n if morph:\n common_brain = self.get('common_brain')\n with self._temporary_state:\n self.make_annot(mrisubject=common_brain)\n ds['srcm'] = morph_source_space(src, common_brain)\n if mask and not is_scaled:\n _mask_ndvar(ds, 'srcm')\n else:\n ds['src'] = src\n else:\n if src_baseline:\n raise NotImplementedError(\"Baseline for SourceEstimate\")\n if morph:\n raise NotImplementedError(\"Morphing for SourceEstimate\")\n ds['stc'] = stc\n\n if del_epochs:\n del ds['epochs']\n return ds\n\n def load_events(self, subject=None, add_bads=True, data_raw=True, **kwargs):\n \"\"\"\n Load events from a raw file.\n\n Loads events from the corresponding raw file, adds the raw to the info\n dict.\n\n Parameters\n ----------\n subject : str\n Subject for which to load events (default is the current subject\n in the experiment's state).\n add_bads : False | True | list\n Add bad channel information to the Raw. If True, bad channel\n information is retrieved from the bad channels file. Alternatively,\n a list of bad channels can be specified.\n data_raw : bool\n Keep the :class:`mne.io.Raw` instance in ``ds.info['raw']``\n (default False).\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-raw`: preprocessing pipeline\n - :ref:`state-epoch`: which events to use and time window\n\n \"\"\"\n evt_file = self.get('event-file', mkdir=True, subject=subject, **kwargs)\n subject = self.get('subject')\n\n # search for and check cached version\n raw_mtime = self._raw_mtime(bad_chs=False, subject=subject)\n if exists(evt_file):\n ds = load.unpickle(evt_file)\n if ds.info['raw-mtime'] != raw_mtime:\n ds = None\n else:\n ds = None\n\n # refresh cache\n if ds is None:\n self._log.debug(\"Extracting events for %s %s %s\", self.get('raw'), subject, self.get('recording'))\n raw = self.load_raw(add_bads)\n ds = load.fiff.events(raw)\n del ds.info['raw']\n ds.info['sfreq'] = raw.info['sfreq']\n ds.info['raw-mtime'] = raw_mtime\n\n # add edf\n if self.has_edf[subject]:\n edf = self.load_edf()\n edf.add_t_to(ds)\n ds.info['edf'] = edf\n\n save.pickle(ds, evt_file)\n if data_raw:\n ds.info['raw'] = raw\n elif data_raw:\n ds.info['raw'] = self.load_raw(add_bads)\n\n ds.info['subject'] = subject\n ds.info['session'] = self.get('session')\n if len(self._visits) > 1:\n ds.info['visit'] = self.get('visit')\n\n if self.trigger_shift:\n if isinstance(self.trigger_shift, dict):\n trigger_shift = self.trigger_shift[subject]\n else:\n trigger_shift = self.trigger_shift\n\n if trigger_shift:\n ds['i_start'] += int(round(trigger_shift * ds.info['sfreq']))\n\n return self._label_events(ds)\n\n def load_evoked(self, subjects=None, baseline=False, ndvar=True, cat=None,\n decim=None, data_raw=False, vardef=None, data='sensor',\n **kwargs):\n \"\"\"\n Load a Dataset with the evoked responses for each subject.\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n baseline : bool | tuple\n Apply baseline correction using this period. True to use the\n epoch's baseline specification. The default is to not apply baseline\n correction.\n ndvar : bool\n Convert the mne Evoked objects to an NDVar (the name in the\n Dataset is 'meg' or 'eeg').\n cat : sequence of cell-names\n Only load data for these cells (cells of model).\n decim : int\n Data decimation factor (the default is the factor specified in the\n epoch definition).\n data_raw : bool\n Keep the :class:`mne.io.Raw` instance in ``ds.info['raw']``\n (default False).\n vardef : str\n Name of a test defining additional variables.\n data : str\n Data to load; 'sensor' to load all sensor data (default);\n 'sensor.rms' to return RMS over sensors. Only applies to NDVar\n output.\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-raw`: preprocessing pipeline\n - :ref:`state-epoch`: which events to use and time window\n - :ref:`state-rej`: which trials to use\n - :ref:`state-model`: how to group trials into conditions\n - :ref:`state-equalize_evoked_count`: control number of trials per cell\n\n \"\"\"\n subject, group = self._process_subject_arg(subjects, kwargs)\n epoch_name = self.get('epoch')\n epoch = self._epochs[epoch_name]\n data = TestDims.coerce(data)\n if not data.sensor:\n raise ValueError(f\"data={data.string!r}; load_evoked is for loading sensor data\")\n elif data.sensor is not True and not ndvar:\n raise ValueError(f\"data={data.string!r} with ndvar=False\")\n if baseline is True:\n baseline = epoch.baseline\n model = self.get('model')\n\n if group is not None:\n # when aggregating across sensors, do it before combining subjects\n # to avoid losing sensors that are not shared\n individual_ndvar = isinstance(data.sensor, str)\n desc = f'by {model}' if model else 'average'\n dss = [self.load_evoked(None, baseline, individual_ndvar, cat, decim, data_raw, vardef, data)\n for _ in self.iter(group=group, progress_bar=f\"Load {epoch_name} {desc}\")]\n if individual_ndvar:\n ndvar = False\n elif ndvar:\n # set interpolated channels to good\n for ds in dss:\n for e in ds['evoked']:\n if e.info['description'] is None:\n continue\n m = re.match(r\"Eelbrain (\\d+)\", e.info['description'])\n if not m:\n continue\n v = int(m.group(1))\n if v >= 11:\n e.info['bads'] = []\n ds = combine(dss, incomplete='drop')\n\n # check consistency in MNE objects' number of time points\n lens = [len(e.times) for e in ds['evoked']]\n ulens = set(lens)\n if len(ulens) > 1:\n err = [\"Unequal time axis sampling (len):\"]\n alens = np.array(lens)\n for l in ulens:\n err.append('%i: %r' % (l, ds['subject', alens == l].cells))\n raise DimensionMismatchError('\\n'.join(err))\n else: # single subject\n ds = self._make_evoked(decim, data_raw)\n\n if cat:\n if not model:\n raise TypeError(f\"cat={cat!r}: Can't set cat when model is ''\")\n model = ds.eval(model)\n idx = model.isin(cat)\n ds = ds.sub(idx)\n if ds.n_cases == 0:\n raise RuntimeError(f\"Selection with cat={cat!r} resulted in empty Dataset\")\n\n self._add_vars(ds, vardef)\n\n # baseline correction\n if isinstance(baseline, str):\n raise NotImplementedError\n elif baseline and not epoch.post_baseline_trigger_shift:\n for e in ds['evoked']:\n rescale(e.data, e.times, baseline, 'mean', copy=False)\n\n # convert to NDVar\n if ndvar:\n pipe = self._raw[self.get('raw')]\n info = ds[0, 'evoked'].info\n for data_kind in data.data_to_ndvar(info):\n sysname = pipe.get_sysname(info, subject, data_kind)\n connectivity = pipe.get_connectivity(data_kind)\n name = 'meg' if data_kind == 'mag' else data_kind\n ds[name] = load.fiff.evoked_ndvar(ds['evoked'], data=data_kind, sysname=sysname, connectivity=connectivity)\n if data_kind != 'eog' and isinstance(data.sensor, str):\n ds[name] = getattr(ds[name], data.sensor)('sensor')\n # if ndvar != 'both':\n # del ds['evoked']\n\n return ds\n\n def load_epochs_stf(self, subjects=None, baseline=True, mask=True, morph=False, keep_stc=False, **state):\n \"\"\"Load frequency space single trial data\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification. The default is True.\n mask : bool | str\n Discard data that is labelled 'unknown' by the parcellation (only\n applies to NDVars, default True).\n morph : bool\n Morph the source estimates to the common_brain (default False).\n keep_stc : bool\n Keep the source timecourse data in the Dataset that is returned\n (default False).\n ...\n State parameters.\n \"\"\"\n ds = self.load_epochs_stc(subjects, baseline, ndvar=True, morph=morph, mask=mask, **state)\n name = 'srcm' if morph else 'src'\n\n # apply morlet transformation\n freq_params = self.freqs[self.get('freq')]\n freq_range = freq_params['frequencies']\n ds['stf'] = cwt_morlet(ds[name], freq_range, use_fft=True,\n n_cycles=freq_params['n_cycles'],\n zero_mean=False, out='magnitude')\n\n if not keep_stc:\n del ds[name]\n\n return ds\n\n def load_evoked_stf(self, subjects=None, baseline=True, mask=True, morph=False, keep_stc=False, **state):\n \"\"\"Load frequency space evoked data\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification. The default is True.\n mask : bool | str\n Whether to just load the sources from the parcellation that are not\n defined as \"unknown\". Default is True.\n morph : bool\n Morph the source estimates to the common_brain (default False).\n keep_stc : bool\n Keep the source timecourse data in the Dataset that is returned\n (default False).\n ...\n State parameters.\n \"\"\"\n ds = self.load_evoked_stc(subjects, baseline, morph=morph, mask=mask, **state)\n name = 'srcm' if morph else 'src'\n\n # apply morlet transformation\n freq_params = self.freqs[self.get('freq')]\n freq_range = freq_params['frequencies']\n ds['stf'] = cwt_morlet(ds[name], freq_range, use_fft=True,\n n_cycles=freq_params['n_cycles'],\n zero_mean=False, out='magnitude')\n\n if not keep_stc:\n del ds[name]\n\n return ds\n\n def load_evoked_stc(self, subjects=None, baseline=True, src_baseline=False,\n cat=None, keep_evoked=False, morph=False, mask=False,\n data_raw=False, vardef=None, decim=None, ndvar=True,\n **state):\n \"\"\"Load evoked source estimates.\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification. The default is True.\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n cat : sequence of cell-names\n Only load data for these cells (cells of model).\n keep_evoked : bool\n Keep the sensor space data in the Dataset that is returned (default\n False).\n morph : bool\n Morph the source estimates to the common_brain (default False).\n mask : bool | str\n Discard data that is labelled 'unknown' by the parcellation (only\n applies to NDVars, default False). Can be set to a parcellation\n name or ``True`` to use the current parcellation.\n data_raw : bool\n Keep the :class:`mne.io.Raw` instance in ``ds.info['raw']``\n (default False).\n vardef : str\n Name of a test defining additional variables.\n decim : int\n Override the epoch decim factor.\n ndvar : bool\n Add the source estimates as NDVar named \"src\" instead of a list of\n :class:`mne.SourceEstimate` objects named \"stc\" (default True).\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-raw`: preprocessing pipeline\n - :ref:`state-epoch`: which events to use and time window\n - :ref:`state-rej`: which trials to use\n - :ref:`state-model`: how to group trials into conditions\n - :ref:`state-equalize_evoked_count`: control number of trials per cell\n - :ref:`state-cov`: covariance matrix for inverse solution\n - :ref:`state-src`: source space\n - :ref:`state-inv`: inverse solution\n\n \"\"\"\n if isinstance(mask, str):\n state['parc'] = mask\n # load sensor data (needs state in case it has 'group' entry)\n sns_ndvar = keep_evoked and ndvar\n ds = self.load_evoked(subjects, baseline, sns_ndvar, cat, decim, data_raw, vardef, **state)\n\n # check baseline\n epoch = self._epochs[self.get('epoch')]\n if src_baseline and epoch.post_baseline_trigger_shift:\n raise NotImplementedError(f\"src_baseline={src_baseline!r}: post_baseline_trigger_shift is not implemented for baseline correction in source space\")\n elif src_baseline is True:\n src_baseline = epoch.baseline\n\n # MRI subjects\n common_brain = self.get('common_brain')\n meg_subjects = ds['subject'].cells\n from_subjects = {} # for the purpose of morphing\n mri_subjects = {} # for representing\n for subject in meg_subjects:\n mri_subjects[subject] = self.get('mrisubject', subject=subject)\n if is_fake_mri(self.get('mri-dir')):\n from_subjects[subject] = common_brain\n else:\n from_subjects[subject] = mri_subjects[subject]\n\n # make sure annot files are available (needed only for NDVar)\n if ndvar:\n if morph:\n self.make_annot(mrisubject=common_brain)\n elif len(meg_subjects) > 1:\n raise ValueError(f\"ndvar=True, morph=False with multiple subjects: Can't create ndvars with data from different brains\")\n else:\n self.make_annot(mrisubject=mri_subjects[meg_subjects[0]])\n\n # convert evoked objects\n stcs = []\n invs = {}\n mm_cache = CacheDict(self.load_morph_matrix, 'mrisubject')\n for subject, evoked in tqdm(ds.zip('subject', 'evoked'), \"Localize\", ds.n_cases):\n # get inv\n if subject in invs:\n inv = invs[subject]\n else:\n inv = invs[subject] = self.load_inv(evoked, subject=subject)\n\n # apply inv\n stc = apply_inverse(evoked, inv, **self._params['apply_inv_kw'])\n\n # baseline correction\n if src_baseline:\n rescale(stc._data, stc.times, src_baseline, 'mean', copy=False)\n\n if morph:\n subject_from = from_subjects[subject]\n if subject_from == common_brain:\n stc.subject = common_brain\n else:\n mm, v_to = mm_cache[subject_from]\n stc = mne.morph_data_precomputed(subject_from, common_brain, stc, v_to, mm)\n stcs.append(stc)\n\n # add to Dataset\n if ndvar:\n if morph:\n key, subject = 'srcm', common_brain\n else:\n key, subject = 'src', mri_subjects[meg_subjects[0]]\n src = self.get('src')\n mri_sdir = self.get('mri-sdir')\n method = self._params['apply_inv_kw']['method']\n fixed = self._params['make_inv_kw'].get('fixed', False)\n parc = self.get('parc') or None\n ds[key] = load.fiff.stc_ndvar(stcs, subject, src, mri_sdir, method, fixed, parc=parc, connectivity=self.get('connectivity'))\n if mask:\n _mask_ndvar(ds, key)\n else:\n key = 'stcm' if morph else 'stc'\n ds[key] = stcs\n\n if not keep_evoked:\n del ds['evoked']\n\n return ds\n\n def load_fwd(self, surf_ori=True, ndvar=False, mask=None, **state):\n \"\"\"Load the forward solution\n\n Parameters\n ----------\n surf_ori : bool\n Force surface orientation (default True; only applies if\n ``ndvar=False``, :class:`NDVar` forward operators are alsways\n surface based).\n ndvar : bool\n Return forward solution as :class:`NDVar` (default is\n :class:`mne.forward.Forward`).\n mask : str | bool\n Remove source labelled \"unknown\". Can be parcellation name or True,\n in which case the current parcellation is used.\n ...\n State parameters.\n\n Returns\n -------\n forward_operator : mne.forward.Forward | NDVar\n Forward operator.\n \"\"\"\n if mask and not ndvar:\n raise NotImplementedError(\"mask is only implemented for ndvar=True\")\n elif isinstance(mask, str):\n state['parc'] = mask\n mask = True\n fwd_file = self.get('fwd-file', make=True, **state)\n src = self.get('src')\n if ndvar:\n if src.startswith('vol'):\n parc = None\n assert mask is None\n else:\n self.make_annot()\n parc = self.get('parc')\n fwd = load.fiff.forward_operator(fwd_file, src, self.get('mri-sdir'), parc)\n if mask:\n fwd = fwd.sub(source=np.invert(\n fwd.source.parc.startswith('unknown')))\n return fwd\n else:\n fwd = mne.read_forward_solution(fwd_file)\n if surf_ori:\n mne.convert_forward_solution(fwd, surf_ori, copy=False)\n return fwd\n\n def load_ica(self, **state):\n \"\"\"Load the mne-python ICA object\n\n Returns\n -------\n ica : mne.preprocessing.ICA\n ICA object for the current raw/rej setting.\n ...\n State parameters.\n \"\"\"\n path = self.make_ica(**state)\n return mne.preprocessing.read_ica(path)\n\n def load_inv(self, fiff=None, ndvar=False, mask=None, **state):\n \"\"\"Load the inverse operator\n\n Parameters\n ----------\n fiff : Raw | Epochs | Evoked | ...\n Object which provides the mne info dictionary (default: load the\n raw file).\n ndvar : bool\n Return the inverse operator as NDVar (default is \n :class:`mne.minimum_norm.InverseOperator`). The NDVar representation \n does not take into account any direction selectivity (loose/free \n orientation) or noise normalization properties.\n mask : str | bool\n Remove source labelled \"unknown\". Can be parcellation name or True,\n in which case the current parcellation is used.\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-raw`: preprocessing pipeline\n - :ref:`state-rej`: which trials to use\n - :ref:`state-cov`: covariance matrix for inverse solution\n - :ref:`state-src`: source space\n - :ref:`state-inv`: inverse solution\n\n \"\"\"\n if mask and not ndvar:\n raise NotImplementedError(\"mask is only implemented for ndvar=True\")\n elif isinstance(mask, str):\n state['parc'] = mask\n mask = True\n\n if state:\n self.set(**state)\n\n inv = dst = None\n if self.cache_inv:\n subject = self.get('subject')\n fwd_recording = self._get_fwd_recording(subject)\n with self._temporary_state:\n dst = self.get('inv-file', mkdir=True, recording=fwd_recording)\n if exists(dst) and cache_valid(getmtime(dst), self._inv_mtime(fwd_recording)):\n inv = mne.minimum_norm.read_inverse_operator(dst)\n\n if inv is None:\n src = self.get('src')\n if src[:3] == 'vol':\n inv = self.get('inv')\n if not (inv.startswith('vec') or inv.startswith('free')):\n raise ValueError(f'inv={inv!r} with src={src!r}: volume source space requires free or vector inverse')\n\n if fiff is None:\n fiff = self.load_raw()\n\n inv = make_inverse_operator(fiff.info, self.load_fwd(), self.load_cov(), use_cps=True, **self._params['make_inv_kw'])\n if dst:\n mne.minimum_norm.write_inverse_operator(dst, inv)\n\n if ndvar:\n inv = load.fiff.inverse_operator(inv, self.get('src'), self.get('mri-sdir'), self.get('parc'))\n if mask:\n inv = inv.sub(source=~inv.source.parc.startswith('unknown'))\n return inv\n\n def load_label(self, label, **kwargs):\n \"\"\"Retrieve a label as mne Label object\n\n Parameters\n ----------\n label : str\n Name of the label. If the label name does not end in '-bh' or '-rh'\n the combination of the labels ``label + '-lh'`` and\n ``label + '-rh'`` is returned.\n ...\n State parameters.\n \"\"\"\n labels = self._load_labels(label, **kwargs)\n if label in labels:\n return labels[label]\n elif not label.endswith(('-lh', '-rh')):\n return labels[label + '-lh'] + labels[label + '-rh']\n else:\n raise ValueError(\"Label %r could not be found in parc %r.\"\n % (label, self.get('parc')))\n\n def _load_labels(self, regexp=None, **kwargs):\n \"\"\"Load labels from an annotation file.\"\"\"\n self.make_annot(**kwargs)\n mri_sdir = self.get('mri-sdir')\n labels = mne.read_labels_from_annot(self.get('mrisubject'),\n self.get('parc'), regexp=regexp,\n subjects_dir=mri_sdir)\n return {l.name: l for l in labels}\n\n def load_morph_matrix(self, **state):\n \"\"\"Load the morph matrix from mrisubject to common_brain\n\n Parameters\n ----------\n ...\n State parameters.\n\n Returns\n -------\n mm : sparse matrix\n Morph matrix.\n vertices_to : list of 2 array\n Vertices of the morphed data.\n \"\"\"\n subjects_dir = self.get('mri-sdir', **state)\n subject_to = self.get('common_brain')\n subject_from = self.get('mrisubject')\n\n src_to = self.load_src(mrisubject=subject_to, match=False)\n src_from = self.load_src(mrisubject=subject_from, match=False)\n\n vertices_to = [src_to[0]['vertno'], src_to[1]['vertno']]\n vertices_from = [src_from[0]['vertno'], src_from[1]['vertno']]\n\n mm = mne.compute_morph_matrix(subject_from, subject_to, vertices_from,\n vertices_to, None, subjects_dir)\n return mm, vertices_to\n\n def load_neighbor_correlation(self, subjects=None, epoch=None, **state):\n \"\"\"Load sensor neighbor correlation\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n epoch : str\n Epoch to use for computing neighbor-correlation (by default, the\n whole session is used).\n\n Returns\n -------\n nc : NDVar | Dataset\n Sensor neighbor-correlation as :class:`NDVar` for a single subject\n or as :class:`Dataset` for multiple subjects.\n \"\"\"\n subject, group = self._process_subject_arg(subjects, state)\n if group is not None:\n if state:\n self.set(**state)\n lines = [(subject, self.load_neighbor_correlation(1, epoch)) for subject in self]\n return Dataset.from_caselist(['subject', 'nc'], lines)\n if epoch:\n if epoch is True:\n epoch = self.get('epoch')\n epoch_params = self._epochs[epoch]\n if len(epoch_params.sessions) != 1:\n raise ValueError(f\"epoch={epoch!r}: epoch has multiple session\")\n ds = self.load_epochs(epoch=epoch, reject=False, decim=1, **state)\n data = concatenate(ds['meg'])\n else:\n data = self.load_raw(ndvar=True, **state)\n return neighbor_correlation(data)\n\n def load_raw(self, add_bads=True, preload=False, ndvar=False, decim=1, **kwargs):\n \"\"\"\n Load a raw file as mne Raw object.\n\n Parameters\n ----------\n add_bads : bool | list\n Add bad channel information to the bad channels text file (default\n True).\n preload : bool\n Load raw data into memory (default False; see\n :func:`mne.io.read_raw_fif` parameter).\n ndvar : bool\n Load as NDVar instead of mne Raw object (default False).\n decim : int\n Decimate data (default 1, i.e. no decimation; value other than 1\n implies ``preload=True``)\n ...\n Applicable :ref:`state-parameters`:\n\n - :ref:`state-session`: from which session to load raw data\n - :ref:`state-raw`: preprocessing pipeline\n\n Notes\n -----\n Bad channels defined in the raw file itself are ignored in favor of the\n bad channels in the bad channels file.\n \"\"\"\n pipe = self._raw[self.get('raw', **kwargs)]\n if decim > 1:\n preload = True\n raw = pipe.load(self.get('subject'), self.get('recording'), add_bads, preload)\n if decim > 1:\n if ndvar:\n # avoid warning for downsampling event channel\n stim_picks = np.empty(0)\n events = np.empty((0, 3))\n else:\n stim_picks = events = None\n sfreq = int(round(raw.info['sfreq'] / decim))\n raw.resample(sfreq, stim_picks=stim_picks, events=events)\n\n if ndvar:\n data = TestDims('sensor')\n data_kind = data.data_to_ndvar(raw.info)[0]\n sysname = pipe.get_sysname(raw.info, self.get('subject'), data_kind)\n connectivity = pipe.get_connectivity(data_kind)\n raw = load.fiff.raw_ndvar(raw, sysname=sysname, connectivity=connectivity)\n\n return raw\n\n def _load_result_plotter(self, test, tstart, tstop, pmin, parc=None,\n mask=None, samples=10000, data='source',\n baseline=True, src_baseline=None,\n colors=None, labels=None, h=1.2, rc=None,\n dst=None, vec_fmt='svg', pix_fmt='png', **kwargs):\n \"\"\"Load cluster-based test result plotter\n\n Parameters\n ----------\n test : str\n Name of the test.\n tstart, tstop, pmin, parc, mask, samples, data, baseline, src_baseline\n Test parameters.\n colors : dict\n Colors for data cells as ``{cell: matplotlib_color}`` dictionary.\n labels : dict\n Labels for data in a ``{cell: label}`` dictionary (the default is to\n use cell names).\n h : scalar\n Plot height in inches (default 1.1).\n rc : dict\n Matplotlib rc-parameters dictionary (the default is optimized for\n the default plot size ``h=1.1``).\n dst : str\n Directory in which to place results (default is the ``result plots``\n directory).\n vec_fmt : str\n Format for vector graphics (default 'pdf').\n pix_fmt : str\n Format for pixel graphics (default 'png').\n ...\n State parameters.\n \"\"\"\n if not isinstance(self._tests[test], EvokedTest):\n raise NotImplementedError(\"Result-plots for %s\" %\n self._tests[test].__class__.__name__)\n elif data != 'source':\n raise NotImplementedError(\"data=%s\" % repr(data))\n elif not isinstance(pmin, float):\n raise NotImplementedError(\"Threshold-free tests\")\n\n from .._result_plots import ClusterPlotter\n\n # calls _set_analysis_options():\n ds, res = self.load_test(test, tstart, tstop, pmin, parc, mask, samples,\n data, baseline, src_baseline, True,\n **kwargs)\n if dst is None:\n dst = self.get('res-plot-dir', mkdir=True)\n\n return ClusterPlotter(ds, res, colors, dst, vec_fmt, pix_fmt, labels, h,\n rc)\n\n def load_selected_events(self, subjects=None, reject=True, add_bads=True,\n index=True, data_raw=False, vardef=None, cat=None,\n **kwargs):\n \"\"\"\n Load events and return a subset based on epoch and rejection\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n reject : bool | 'keep'\n Reject bad trials. If ``True`` (default), bad trials are removed\n from the Dataset. Set to ``False`` to ignore the trial rejection.\n Set ``reject='keep'`` to load the rejection (added it to the events\n as ``'accept'`` variable), but keep bad trails.\n add_bads : False | True | list\n Add bad channel information to the Raw. If True, bad channel\n information is retrieved from the bad channels file. Alternatively,\n a list of bad channels can be specified.\n index : bool | str\n Index the Dataset before rejection (provide index name as str).\n data_raw : bool\n Keep the :class:`mne.io.Raw` instance in ``ds.info['raw']``\n (default False).\n vardef : str\n Name of a test defining additional variables.\n cat : sequence of cell-names\n Only load data for these cells (cells of model).\n ...\n State parameters.\n\n Notes\n -----\n When trial rejection is set to automatic, not rejection is performed\n because no epochs are loaded.\n \"\"\"\n # process arguments\n if reject not in (True, False, 'keep'):\n raise ValueError(f\"reject={reject!r}\")\n\n if index is True:\n index = 'index'\n elif index and not isinstance(index, str):\n raise TypeError(f\"index={index!r}\")\n\n # case of loading events for a group\n subject, group = self._process_subject_arg(subjects, kwargs)\n if group is not None:\n if data_raw:\n raise ValueError(f\"data_var={data_raw!r}: can't keep raw when combining subjects\")\n dss = [self.load_selected_events(reject=reject, add_bads=add_bads, index=index, vardef=vardef) for _ in self.iter(group=group)]\n ds = combine(dss)\n return ds\n\n epoch = self._epochs[self.get('epoch')]\n if isinstance(epoch, EpochCollection):\n raise ValueError(f\"epoch={self.get('epoch')!r}; can't load events for collection epoch\")\n\n # rejection comes from somewhere else\n if isinstance(epoch, SuperEpoch):\n with self._temporary_state:\n dss = []\n raw = None\n # find bad channels\n if isinstance(add_bads, Sequence):\n bad_channels = list(add_bads)\n elif add_bads:\n bad_channels = sorted(set.union(*(\n set(self.load_bad_channels(session=session)) for\n session in epoch.sessions)))\n else:\n bad_channels = []\n # load events\n for session in epoch.sessions:\n self.set(session=session)\n # load events for this session\n session_dss = []\n for sub_epoch in epoch.sub_epochs:\n if self._epochs[sub_epoch].session != session:\n continue\n ds = self.load_selected_events(subject, reject, add_bads, index, True, epoch=sub_epoch)\n ds[:, 'epoch'] = sub_epoch\n session_dss.append(ds)\n ds = combine(session_dss)\n dss.append(ds)\n # combine raw\n raw_ = session_dss[0].info['raw']\n raw_.info['bads'] = bad_channels\n if raw is None:\n raw = raw_\n else:\n ds['i_start'] += raw.last_samp + 1 - raw_.first_samp\n raw.append(raw_)\n\n # combine bad channels\n ds = combine(dss)\n if data_raw:\n ds.info['raw'] = raw\n ds.info[BAD_CHANNELS] = bad_channels\n elif isinstance(epoch, SecondaryEpoch):\n with self._temporary_state:\n ds = self.load_selected_events(None, 'keep' if reject else False, add_bads, index, data_raw, epoch=epoch.sel_epoch)\n\n if epoch.sel:\n ds = ds.sub(epoch.sel)\n if index:\n ds.index(index)\n\n if reject is True:\n if self._artifact_rejection[self.get('rej')]['kind'] is not None:\n ds = ds.sub('accept')\n else:\n rej_params = self._artifact_rejection[self.get('rej')]\n # load files\n with self._temporary_state:\n ds = self.load_events(add_bads=add_bads, data_raw=data_raw, session=epoch.session)\n if reject and rej_params['kind'] is not None:\n rej_file = self.get('rej-file')\n if exists(rej_file):\n ds_sel = load.unpickle(rej_file)\n else:\n rej_file = self._get_rel('rej-file', 'root')\n raise FileMissing(f\"The rejection file at {rej_file} does not exist. Run .make_epoch_selection() first.\")\n else:\n ds_sel = None\n\n # primary event selection\n if epoch.sel:\n ds = ds.sub(epoch.sel)\n if index:\n ds.index(index)\n if epoch.n_cases is not None and ds.n_cases != epoch.n_cases:\n raise RuntimeError(f\"Number of epochs {ds.n_cases}, expected {epoch.n_cases}\")\n\n # rejection\n if ds_sel is not None:\n # check file\n if not np.all(ds['trigger'] == ds_sel['trigger']):\n # TODO: this warning should be given in make_epoch_selection already\n if np.all(ds[:-1, 'trigger'] == ds_sel['trigger']):\n ds = ds[:-1]\n self._log.warning(self.format(\"Last epoch for {subject} is missing\"))\n elif np.all(ds[1:, 'trigger'] == ds_sel['trigger']):\n ds = ds[1:]\n self._log.warning(self.format(\"First epoch for {subject} is missing\"))\n else:\n raise RuntimeError(f\"The epoch selection file contains different events (trigger IDs) from the epoch data loaded from the raw file. If the events included in the epoch were changed intentionally, delete the corresponding epoch rejection file and redo epoch rejection: {rej_file}\")\n\n if rej_params['interpolation']:\n ds.info[INTERPOLATE_CHANNELS] = True\n if INTERPOLATE_CHANNELS in ds_sel:\n ds[INTERPOLATE_CHANNELS] = ds_sel[INTERPOLATE_CHANNELS]\n else:\n ds[INTERPOLATE_CHANNELS] = Datalist([[]] * ds.n_cases,\n INTERPOLATE_CHANNELS,\n 'strlist')\n else:\n ds.info[INTERPOLATE_CHANNELS] = False\n\n # subset events\n if reject == 'keep':\n ds['accept'] = ds_sel['accept']\n elif reject is True:\n ds = ds.sub(ds_sel['accept'])\n else:\n raise RuntimeError(\"reject=%s\" % repr(reject))\n\n # bad channels\n if add_bads:\n if BAD_CHANNELS in ds_sel.info:\n ds.info[BAD_CHANNELS] = ds_sel.info[BAD_CHANNELS]\n else:\n ds.info[BAD_CHANNELS] = []\n else: # no artifact rejection\n ds.info[INTERPOLATE_CHANNELS] = False\n ds.info[BAD_CHANNELS] = []\n\n # apply trigger-shift\n if epoch.trigger_shift:\n shift = epoch.trigger_shift\n if isinstance(shift, str):\n shift = ds.eval(shift)\n if isinstance(shift, Var):\n shift = shift.x\n\n if np.isscalar(shift):\n ds['i_start'] += int(round(shift * ds.info['sfreq']))\n else:\n ds['i_start'] += np.round(shift * ds.info['sfreq']).astype(int)\n\n # Additional variables\n self._add_vars(ds, epoch.vars)\n self._add_vars(ds, vardef)\n\n # apply cat subset\n if cat:\n model = ds.eval(self.get('model'))\n idx = model.isin(cat)\n ds = ds.sub(idx)\n\n return ds\n\n def _load_spm(self, baseline=True, src_baseline=False):\n \"Load LM\"\n subject = self.get('subject')\n test = self.get('test')\n test_obj = self._tests[test]\n if not isinstance(test_obj, TwoStageTest):\n raise NotImplementedError(\"Test kind %r\" % test_obj.__class__.__name__)\n ds = self.load_epochs_stc(subject, baseline, src_baseline, mask=True, vardef=test_obj.vars)\n return testnd.LM('src', test_obj.stage_1, ds, subject=subject)\n\n def load_src(self, add_geom=False, ndvar=False, **state):\n \"\"\"Load the current source space\n \n Parameters\n ----------\n add_geom : bool\n Parameter for :func:`mne.read_source_spaces`.\n ndvar : bool\n Return as NDVar Dimension object (default False).\n ...\n State parameters.\n \"\"\"\n fpath = self.get('src-file', make=True, **state)\n if ndvar:\n src = self.get('src')\n if src.startswith('vol'):\n return VolumeSourceSpace.from_file(\n self.get('mri-sdir'), self.get('mrisubject'), src)\n return SourceSpace.from_file(\n self.get('mri-sdir'), self.get('mrisubject'), src, self.get('parc'))\n return mne.read_source_spaces(fpath, add_geom)\n\n def load_test(self, test, tstart=None, tstop=None, pmin=None, parc=None,\n mask=None, samples=10000, data='source', baseline=True,\n src_baseline=None, return_data=False, make=False, **state):\n \"\"\"Create and load spatio-temporal cluster test results\n\n Parameters\n ----------\n test : None | str\n Test for which to create a report (entry in MneExperiment.tests;\n None to use the test that was specified most recently).\n tstart : scalar\n Beginning of the time window for the test in seconds\n (default is the beginning of the epoch).\n tstop : scalar\n End of the time window for the test in seconds\n (default is the end of the epoch).\n pmin : float | 'tfce' | None\n Kind of test.\n parc : None | str\n Parcellation for which to collect distribution.\n mask : None | str\n Mask whole brain.\n samples : int\n Number of random permutations of the data used to determine cluster\n p values (default 10'000).\n data : str\n Data to test, for example:\n\n - ``'sensor'`` spatio-temporal test in sensor space.\n - ``'source'`` spatio-temporal test in source space.\n - ``'source.mean'`` ROI mean time course.\n - ``'sensor.rms'`` RMS across sensors.\n\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification (default).\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n return_data : bool\n Return the data along with the test result (see below).\n\n .. Warning::\n Single trial data (i.e., two-stage tests) take up a lot of\n memory and it might not be possible to load all data at once.\n Instead, loop through subjects and collect summary statistics.\n\n make : bool\n If the target file does not exist, create it (could take a long\n time depending on the test; if False, raise an IOError).\n ...\n State parameters (Use the ``group`` state parameter to select the \n subject group for which to perform the test).\n\n Returns\n -------\n ds : Dataset (if return_data==True)\n Data that forms the basis of the test.\n res : NDTest | ROITestResult\n Test result for the specified test (when performing tests in ROIs,\n an :class:`~_experiment.ROITestResult` object is returned).\n \"\"\"\n self.set(test=test, **state)\n data = TestDims.coerce(data, morph=True)\n self._set_analysis_options(data, baseline, src_baseline, pmin, tstart, tstop, parc, mask)\n return self._load_test(test, tstart, tstop, pmin, parc, mask, samples, data, baseline, src_baseline, return_data, make)\n\n def _load_test(self, test, tstart, tstop, pmin, parc, mask, samples, data,\n baseline, src_baseline, return_data, make):\n \"Load a cached test after _set_analysis_options() has been called\"\n test_obj = self._tests[test]\n\n dst = self.get('test-file', mkdir=True)\n\n # try to load cached test\n res = None\n desc = self._get_rel('test-file', 'test-dir')\n if self._result_file_mtime(dst, data):\n try:\n res = load.unpickle(dst)\n if data.source is True:\n update_subjects_dir(res, self.get('mri-sdir'), 2)\n except OldVersionError:\n res = None\n else:\n if res.samples >= samples or res.samples == -1:\n self._log.info(\"Load cached test: %s\", desc)\n if not return_data:\n return res\n elif not make:\n raise IOError(\"The requested test %s is cached with \"\n \"samples=%i, but you request samples=%i; Set \"\n \"make=True to perform the test.\" %\n (desc, res.samples, samples))\n else:\n res = None\n elif not make and exists(dst):\n raise IOError(\"The requested test is outdated: %s. Set make=True \"\n \"to perform the test.\" % desc)\n\n if res is None and not make:\n raise IOError(\"The requested test is not cached: %s. Set make=True \"\n \"to perform the test.\" % desc)\n\n # parc/mask\n parc_dim = None\n if data.source is True:\n if parc:\n mask = True\n parc_dim = 'source'\n elif mask:\n if pmin is None: # can as well collect dist for parc\n parc_dim = 'source'\n elif isinstance(data.source, str):\n if not isinstance(parc, str):\n raise TypeError(f\"parc needs to be set for ROI test (data={data.string!r})\")\n elif mask is not None:\n raise TypeError(f\"mask={mask!r}: invalid for data={data.string!r}\")\n elif parc is not None:\n raise TypeError(f\"parc={parc!r}: invalid for data={data.string!r}\")\n elif mask is not None:\n raise TypeError(f\"mask={mask!r}: invalid for data={data.string!r}\")\n\n do_test = res is None\n if do_test:\n test_kwargs = self._test_kwargs(samples, pmin, tstart, tstop, data, parc_dim)\n else:\n test_kwargs = None\n\n if isinstance(test_obj, TwoStageTest):\n if isinstance(data.source, str):\n res_data, res = self._make_test_rois_2stage(baseline, src_baseline, test_obj, samples, test_kwargs, res, data, return_data)\n elif data.source is True:\n res_data, res = self._make_test_2stage(baseline, src_baseline, mask, test_obj, test_kwargs, res, data, return_data)\n else:\n raise NotImplementedError(f\"Two-stage test with data={data.string!r}\")\n elif isinstance(data.source, str):\n res_data, res = self._make_test_rois(baseline, src_baseline, test_obj, samples, pmin, test_kwargs, res, data)\n else:\n if data.sensor:\n res_data = self.load_evoked(True, baseline, True, test_obj._within_cat, data=data, vardef=test_obj.vars)\n elif data.source:\n res_data = self.load_evoked_stc(True, baseline, src_baseline, morph=True, cat=test_obj._within_cat, mask=mask, vardef=test_obj.vars)\n else:\n raise ValueError(f\"data={data.string!r}\")\n\n if do_test:\n self._log.info(\"Make test: %s\", desc)\n res = self._make_test(data.y_name, res_data, test_obj, test_kwargs)\n\n if do_test:\n save.pickle(res, dst)\n\n if return_data:\n return res_data, res\n else:\n return res\n\n @staticmethod\n def _src_to_label_tc(ds, func):\n src = ds.pop('src')\n out = {}\n for label in src.source.parc.cells:\n if label.startswith('unknown-'):\n continue\n label_ds = ds.copy()\n label_ds['label_tc'] = getattr(src, func)(source=label)\n out[label] = label_ds\n return out\n\n def _make_test_rois(self, baseline, src_baseline, test_obj, samples, pmin, test_kwargs, res, data):\n # load data\n dss_list = []\n n_trials_dss = []\n labels = set()\n subjects = self.get_field_values('subject')\n for _ in self.iter(progress_bar=\"Loading data\"):\n ds = self.load_evoked_stc(1, baseline, src_baseline, vardef=test_obj.vars)\n dss = self._src_to_label_tc(ds, data.source)\n n_trials_dss.append(ds)\n dss_list.append(dss)\n labels.update(dss.keys())\n\n label_dss = {label: [dss[label] for dss in dss_list if label in dss] for label in labels}\n label_data = {label: combine(dss, incomplete='drop') for label, dss in label_dss.items()}\n if res is not None:\n return label_data, res\n\n n_trials_ds = combine(n_trials_dss, incomplete='drop')\n\n # n subjects per label\n n_per_label = {label: len(dss) for label, dss in label_dss.items()}\n\n # compute results\n do_mcc = (\n len(labels) > 1 and # more than one ROI\n pmin not in (None, 'tfce') and # not implemented\n len(set(n_per_label.values())) == 1 # equal n permutations\n )\n label_results = {\n label: self._make_test('label_tc', ds, test_obj, test_kwargs, do_mcc)\n for label, ds in label_data.items()\n }\n\n if do_mcc:\n cdists = [res._cdist for res in label_results.values()]\n merged_dist = _MergedTemporalClusterDist(cdists)\n else:\n merged_dist = None\n\n res = ROITestResult(subjects, samples, n_trials_ds, merged_dist, label_results)\n return label_data, res\n\n def _make_test_rois_2stage(self, baseline, src_baseline, test_obj, samples, test_kwargs, res, data, return_data):\n # stage 1\n lms = []\n res_data = []\n n_trials_dss = []\n subjects = self.get_field_values('subject')\n for subject in self.iter(progress_bar=\"Loading stage 1 models\"):\n if test_obj.model is None:\n ds = self.load_epochs_stc(1, baseline, src_baseline, mask=True, vardef=test_obj.vars)\n else:\n ds = self.load_evoked_stc(1, baseline, src_baseline, mask=True, vardef=test_obj.vars, model=test_obj._within_model)\n\n dss = self._src_to_label_tc(ds, data.source)\n if res is None:\n lms.append({label: test_obj.make_stage_1('label_tc', ds, subject) for label, ds in dss.items()})\n n_trials_dss.append(ds)\n if return_data:\n res_data.append(dss)\n\n # stage 2\n if res is None:\n labels = set(chain.from_iterable(lms))\n ress = {}\n for label in sorted(labels):\n label_lms = [subject_lms[label] for subject_lms in lms if label in subject_lms]\n if len(label_lms) <= 2:\n continue\n ress[label] = test_obj.make_stage_2(label_lms, test_kwargs)\n n_trials_ds = combine(n_trials_dss, incomplete='drop')\n res = ROI2StageResult(subjects, samples, n_trials_ds, None, ress)\n\n if return_data:\n data_out = {}\n for label in res.keys():\n label_data = [subject_data[label] for subject_data in res_data if label in subject_data]\n data_out[label] = combine(label_data)\n else:\n data_out = None\n return data_out, res\n\n def _make_test_2stage(self, baseline, src_baseline, mask, test_obj, test_kwargs, res, data, return_data):\n # stage 1\n lms = []\n res_data = []\n for subject in self.iter(progress_bar=\"Loading stage 1 models\"):\n if test_obj.model is None:\n ds = self.load_epochs_stc(1, baseline, src_baseline, morph=True, mask=mask, vardef=test_obj.vars)\n else:\n ds = self.load_evoked_stc(1, baseline, src_baseline, morph=True, mask=mask, vardef=test_obj.vars, model=test_obj._within_model)\n\n if res is None:\n lms.append(test_obj.make_stage_1(data.y_name, ds, subject))\n if return_data:\n res_data.append(ds)\n\n # stage 2\n if res is None:\n res = test_obj.make_stage_2(lms, test_kwargs)\n if return_data:\n res_data = combine(res_data)\n return res_data, res\n\n def make_annot(self, redo=False, **state):\n \"\"\"Make sure the annot files for both hemispheres exist\n\n Parameters\n ----------\n redo : bool\n Even if the file exists, recreate it (default False).\n ...\n State parameters.\n\n Returns\n -------\n mtime : float | None\n Modification time of the existing files, or None if they were newly\n created.\n \"\"\"\n self.set(**state)\n\n # variables\n parc, p = self._get_parc()\n if p is None:\n return\n\n mrisubject = self.get('mrisubject')\n common_brain = self.get('common_brain')\n mtime = self._annot_file_mtime()\n if mrisubject != common_brain:\n is_fake = is_fake_mri(self.get('mri-dir'))\n if p.morph_from_fsaverage or is_fake:\n # make sure annot exists for common brain\n self.set(mrisubject=common_brain, match=False)\n common_brain_mtime = self.make_annot()\n self.set(mrisubject=mrisubject, match=False)\n if not redo and cache_valid(mtime, common_brain_mtime):\n return mtime\n elif is_fake:\n for _ in self.iter('hemi'):\n self.make_copy('annot-file', 'mrisubject', common_brain,\n mrisubject)\n else:\n self.get('label-dir', make=True)\n subjects_dir = self.get('mri-sdir')\n for hemi in ('lh', 'rh'):\n cmd = [\"mri_surf2surf\", \"--srcsubject\", common_brain,\n \"--trgsubject\", mrisubject, \"--sval-annot\", parc,\n \"--tval\", parc, \"--hemi\", hemi]\n subp.run_freesurfer_command(cmd, subjects_dir)\n fix_annot_names(mrisubject, parc, common_brain,\n subjects_dir=subjects_dir)\n return\n\n if not redo and mtime:\n return mtime\n elif not p.make:\n if redo and mtime:\n raise RuntimeError(\n f\"The {parc} parcellation cannot be created automatically \"\n f\"for {mrisubject}. Please update the corresponding \"\n f\"*.annot files manually.\")\n else:\n raise RuntimeError(\n f\"The {parc} parcellation cannot be created automatically \"\n f\"and is missing for {mrisubject}. Please add the \"\n f\"corresponding *.annot files to the subject's label \"\n f\"directory.\")\n\n # make parcs: common_brain | non-morphed\n labels = self._make_annot(parc, p, mrisubject)\n write_labels_to_annot(labels, mrisubject, parc, True,\n self.get('mri-sdir'))\n\n def _make_annot(self, parc, p, subject):\n \"\"\"Return labels\n\n Notes\n -----\n Only called to make custom annotation files for the common_brain\n \"\"\"\n subjects_dir = self.get('mri-sdir')\n if isinstance(p, CombinationParc):\n with self._temporary_state:\n base = {l.name: l for l in self.load_annot(parc=p.base)}\n labels = []\n for name, exp in p.labels.items():\n labels += combination_label(name, exp, base, subjects_dir)\n elif isinstance(p, SeededParc):\n if p.mask:\n with self._temporary_state:\n self.make_annot(parc=p.mask)\n name, extent = SEEDED_PARC_RE.match(parc).groups()\n labels = labels_from_mni_coords(\n p.seeds_for_subject(subject), float(extent), subject, p.surface,\n p.mask, subjects_dir, parc)\n elif isinstance(p, EelbrainParc) and p.name == 'lobes':\n if subject != 'fsaverage':\n raise RuntimeError(\"lobes parcellation can only be created for \"\n \"fsaverage, not for %s\" % subject)\n\n # load source annot\n with self._temporary_state:\n labels = self.load_annot(parc='PALS_B12_Lobes')\n\n # sort labels\n labels = [l for l in labels if l.name[:-3] != 'MEDIAL.WALL']\n\n # rename good labels\n rename_label(labels, 'LOBE.FRONTAL', 'frontal')\n rename_label(labels, 'LOBE.OCCIPITAL', 'occipital')\n rename_label(labels, 'LOBE.PARIETAL', 'parietal')\n rename_label(labels, 'LOBE.TEMPORAL', 'temporal')\n\n # reassign unwanted labels\n targets = ('frontal', 'occipital', 'parietal', 'temporal')\n dissolve_label(labels, 'LOBE.LIMBIC', targets, subjects_dir)\n dissolve_label(labels, 'GYRUS', targets, subjects_dir, 'rh')\n dissolve_label(labels, '???', targets, subjects_dir)\n dissolve_label(labels, '????', targets, subjects_dir, 'rh')\n dissolve_label(labels, '???????', targets, subjects_dir, 'rh')\n elif isinstance(p, LabelParc):\n labels = []\n hemis = ('lh.', 'rh.')\n path = join(subjects_dir, subject, 'label', '%s.label')\n for label in p.labels:\n if label.startswith(hemis):\n labels.append(mne.read_label(path % label))\n else:\n labels.extend(mne.read_label(path % (hemi + label)) for hemi in hemis)\n else:\n raise NotImplementedError(\n \"At least one of the annot files for the custom parcellation \"\n \"%r is missing for %r, and a make function is not \"\n \"implemented.\" % (parc, subject))\n return labels\n\n def make_bad_channels(self, bad_chs=(), redo=False, **kwargs):\n \"\"\"Write the bad channel definition file for a raw file\n\n If the file already exists, new bad channels are added to the old ones.\n In order to replace the old file with only the new values, set\n ``redo=True``.\n\n Parameters\n ----------\n bad_chs : iterator of str\n Names of the channels to set as bad. Numerical entries are\n interpreted as \"MEG XXX\". If bad_chs contains entries not present\n in the raw data, a ValueError is raised.\n redo : bool\n If the file already exists, replace it (instead of adding).\n ...\n State parameters.\n\n See Also\n --------\n make_bad_channels_auto : find bad channels automatically\n load_bad_channels : load the current bad_channels file\n merge_bad_channels : merge bad channel definitions for all sessions\n \"\"\"\n pipe = self._raw[self.get('raw', **kwargs)]\n pipe.make_bad_channels(self.get('subject'), self.get('recording'), bad_chs, redo)\n\n def make_bad_channels_auto(self, flat=1e-14, redo=False, **state):\n \"\"\"Automatically detect bad channels\n\n Works on ``raw='raw'``\n\n Parameters\n ----------\n flat : scalar\n Threshold for detecting flat channels: channels with ``std < flat``\n are considered bad (default 1e-14).\n redo : bool\n If the file already exists, replace it (instead of adding).\n ...\n State parameters.\n \"\"\"\n if state:\n self.set(**state)\n pipe = self._raw['raw']\n pipe.make_bad_channels_auto(self.get('subject'), self.get('recording'), flat, redo)\n\n def make_bad_channels_neighbor_correlation(self, r, epoch=None, **state):\n \"\"\"Exclude bad channels based on low average neighbor-correlation\n\n Parameters\n ----------\n r : scalar\n Minimum admissible neighbor correlation. Any channel whose average\n correlation with its neighbors is below this value is added to the\n list of bad channels (e.g., 0.3).\n epoch : str\n Epoch to use for computing neighbor-correlation (by default, the\n whole session is used).\n ...\n State parameters.\n\n Notes\n -----\n Data is loaded for the currently specified ``raw`` setting, but bad\n channels apply to all ``raw`` settings equally. Hence, when using this\n method with multiple subjects, it is important to set ``raw`` to the\n same value.\n \"\"\"\n nc = self.load_neighbor_correlation(1, epoch, **state)\n bad_chs = nc.sensor.names[nc < r]\n if bad_chs:\n self.make_bad_channels(bad_chs)\n\n def make_besa_evt(self, redo=False, **state):\n \"\"\"Make the trigger and event files needed for besa\n\n Parameters\n ----------\n redo : bool\n If besa files already exist, overwrite them.\n ...\n State parameters.\n\n Notes\n -----\n Ignores the *decim* epoch parameter.\n\n Target files are saved relative to the *besa-root* location.\n \"\"\"\n self.set(**state)\n rej = self.get('rej')\n trig_dest = self.get('besa-trig', rej='', mkdir=True)\n evt_dest = self.get('besa-evt', rej=rej, mkdir=True)\n if not redo and exists(evt_dest) and exists(trig_dest):\n return\n\n # load events\n ds = self.load_selected_events(reject='keep')\n\n # save triggers\n if redo or not exists(trig_dest):\n save.meg160_triggers(ds, trig_dest, pad=1)\n if not redo and exists(evt_dest):\n return\n else:\n ds.index('besa_index', 1)\n\n # reject bad trials\n ds = ds.sub('accept')\n\n # save evt\n epoch = self._epochs[self.get('epoch')]\n save.besa_evt(ds, tstart=epoch.tmin, tstop=epoch.tmax, dest=evt_dest)\n\n def make_copy(self, temp, field, src, dst, redo=False):\n \"\"\"Make a copy of a file to a new path by substituting one field value\n\n Parameters\n ----------\n temp : str\n Template of the file which to copy.\n field : str\n Field in which the source and target of the link are distinguished.\n src : str\n Value for field on the source file.\n dst : str\n Value for field on the destination filename.\n redo : bool\n If the target file already exists, overwrite it.\n\n See Also\n --------\n copy : Copy muliple files to a different root directory\n \"\"\"\n dst_path = self.get(temp, mkdir=True, **{field: dst})\n if not redo and exists(dst_path):\n return\n\n src_path = self.get(temp, **{field: src})\n if isdir(src_path):\n raise ValueError(\"Can only copy files, not directories.\")\n shutil.copyfile(src_path, dst_path)\n\n def make_cov(self):\n \"Make a noise covariance (cov) file\"\n dest = self.get('cov-file', mkdir=True)\n if exists(dest):\n mtime = self._cov_mtime()\n if mtime and getmtime(dest) > mtime:\n return\n\n self._log.debug(\"Make cov-file %s\", dest)\n params = self._covs[self.get('cov')]\n method = params.get('method', 'empirical')\n keep_sample_mean = params.get('keep_sample_mean', True)\n reg = params.get('reg', None)\n\n if 'epoch' in params:\n with self._temporary_state:\n ds = self.load_epochs(None, True, False, decim=1, epoch=params['epoch'])\n epochs = ds['epochs']\n cov = mne.compute_covariance(epochs, keep_sample_mean, method=method)\n info = epochs.info\n else:\n with self._temporary_state:\n raw = self.load_raw(session=params['session'])\n cov = mne.compute_raw_covariance(raw, method=method)\n info = raw.info\n epochs = None\n\n if reg is True:\n cov = mne.cov.regularize(cov, info, rank=None)\n elif isinstance(reg, dict):\n cov = mne.cov.regularize(cov, info, **reg)\n elif reg == 'best':\n if mne.pick_types(epochs.info, meg='grad', eeg=True, ref_meg=False).size:\n raise NotImplementedError(\"EEG or gradiometer sensors\")\n elif epochs is None:\n raise NotImplementedError(\"reg='best' for raw covariance\")\n reg_vs = np.arange(0, 0.21, 0.01)\n covs = [mne.cov.regularize(cov, epochs.info, mag=v, rank=None) for v in reg_vs]\n\n # compute whitened global field power\n evoked = epochs.average()\n picks = mne.pick_types(evoked.info, meg='mag', ref_meg=False)\n gfps = [mne.whiten_evoked(evoked, cov, picks).data.std(0)\n for cov in covs]\n\n # apply padding\n t_pad = params.get('reg_eval_win_pad', 0)\n if t_pad:\n n_pad = int(t_pad * epochs.info['sfreq'])\n if len(gfps[0]) <= 2 * n_pad:\n msg = \"Covariance padding (%s) is bigger than epoch\" % t_pad\n raise ValueError(msg)\n padding = slice(n_pad, -n_pad)\n gfps = [gfp[padding] for gfp in gfps]\n\n vs = [gfp.mean() for gfp in gfps]\n i = np.argmin(np.abs(1 - np.array(vs)))\n cov = covs[i]\n\n # save cov value\n with open(self.get('cov-info-file', mkdir=True), 'w') as fid:\n fid.write('%s\\n' % reg_vs[i])\n elif reg is not None:\n raise RuntimeError(f\"reg={reg!r} in {params}\")\n\n cov.save(dest)\n\n def _make_evoked(self, decim, data_raw):\n \"\"\"Make files with evoked sensor data.\n\n Parameters\n ----------\n decim : int\n Data decimation factor (the default is the factor specified in the\n epoch definition).\n \"\"\"\n dst = self.get('evoked-file', mkdir=True)\n epoch = self._epochs[self.get('epoch')]\n # determine whether using default decimation\n if decim:\n if epoch.decim:\n default_decim = decim == epoch.decim\n else:\n raw = self.load_raw(False)\n default_decim = decim == raw.info['sfreq'] / epoch.samplingrate\n else:\n default_decim = True\n use_cache = default_decim\n model = self.get('model')\n equal_count = self.get('equalize_evoked_count') == 'eq'\n if use_cache and exists(dst) and cache_valid(getmtime(dst), self._evoked_mtime()):\n ds = self.load_selected_events(data_raw=data_raw)\n ds = ds.aggregate(model, drop_bad=True, equal_count=equal_count,\n drop=('i_start', 't_edf', 'T', 'index', 'trigger'))\n ds['evoked'] = mne.read_evokeds(dst, proj=False)\n return ds\n\n self._log.debug(\"Make evoked %s\", dst)\n # load the epochs (post baseline-correction trigger shift requires\n # baseline corrected evoked\n if epoch.post_baseline_trigger_shift:\n ds = self.load_epochs(ndvar=False, baseline=True, decim=decim, data_raw=data_raw, interpolate_bads='keep')\n else:\n ds = self.load_epochs(ndvar=False, decim=decim, data_raw=data_raw, interpolate_bads='keep')\n\n # aggregate\n ds_agg = ds.aggregate(model, drop_bad=True, equal_count=equal_count,\n drop=('i_start', 't_edf', 'T', 'index', 'trigger'),\n never_drop=('epochs',))\n ds_agg.rename('epochs', 'evoked')\n\n # save\n for e in ds_agg['evoked']:\n e.info['description'] = f\"Eelbrain {CACHE_STATE_VERSION}\"\n if use_cache:\n mne.write_evokeds(dst, ds_agg['evoked'])\n\n return ds_agg\n\n def make_fwd(self):\n \"\"\"Make the forward model\"\"\"\n subject = self.get('subject')\n fwd_recording = self._get_fwd_recording(subject)\n with self._temporary_state:\n dst = self.get('fwd-file', recording=fwd_recording)\n if exists(dst):\n if cache_valid(getmtime(dst), self._fwd_mtime(subject, fwd_recording=fwd_recording)):\n return dst\n # get trans for correct visit for fwd_session\n trans = self.get('trans-file')\n\n src = self.get('src-file', make=True)\n pipe = self._raw[self.get('raw')]\n raw = pipe.load(subject, fwd_recording)\n bem = self._load_bem()\n src = mne.read_source_spaces(src)\n\n self._log.debug(f\"make_fwd {basename(dst)}...\")\n bemsol = mne.make_bem_solution(bem)\n fwd = mne.make_forward_solution(raw.info, trans, src, bemsol, ignore_ref=True)\n for s, s0 in zip(fwd['src'], src):\n if s['nuse'] != s0['nuse']:\n raise RuntimeError(f\"The forward solution {basename(dst)} contains fewer sources than the source space. This could be due to a corrupted bem file with sources outside of the inner skull surface.\")\n mne.write_forward_solution(dst, fwd, True)\n return dst\n\n def make_ica_selection(self, epoch=None, decim=None, session=None, **state):\n \"\"\"Select ICA components to remove through a GUI\n\n Parameters\n ----------\n epoch : str\n Epoch to use for visualization in the GUI (default is to use the\n raw data).\n decim : int\n Downsample data for visualization (to improve GUI performance;\n for raw data, the default is ~100 Hz, for epochs the default is the\n epoch setting).\n session : str | list of str\n One or more sessions for which to plot the raw data (this parameter\n can not be used together with ``epoch``; default is the session in\n the current state).\n ...\n State parameters.\n\n Notes\n -----\n Computing ICA decomposition can take a while. In order to precompute\n the decomposition for all subjects before doing the selection use\n :meth:`.make_ica()` in a loop as in::\n\n >>> for subject in e:\n ... e.make_ica()\n ...\n \"\"\"\n # ICA\n path = self.make_ica(**state)\n # display data\n subject = self.get('subject')\n pipe = self._raw[self.get('raw')]\n bads = pipe.load_bad_channels(subject, self.get('recording'))\n with self._temporary_state, warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'The measurement information indicates a low-pass', RuntimeWarning)\n if epoch is None:\n if session is None:\n session = self.get('session')\n raw = pipe.load_concatenated_source_raw(subject, session, self.get('visit'))\n events = mne.make_fixed_length_events(raw)\n ds = Dataset()\n decim = int(raw.info['sfreq'] // 100) if decim is None else decim\n ds['epochs'] = mne.Epochs(raw, events, 1, 0, 1, baseline=None, proj=False, decim=decim, preload=True)\n elif session is not None:\n raise TypeError(f\"session={session!r} with epoch={epoch!r}\")\n else:\n ds = self.load_epochs(ndvar=False, epoch=epoch, reject=False, raw=pipe.source.name, decim=decim, add_bads=bads)\n info = ds['epochs'].info\n data = TestDims('sensor')\n data_kind = data.data_to_ndvar(info)[0]\n sysname = pipe.get_sysname(info, subject, data_kind)\n connectivity = pipe.get_connectivity(data_kind)\n gui.select_components(path, ds, sysname, connectivity)\n\n def make_ica(self, **state):\n \"\"\"Compute ICA decomposition for a :class:`pipeline.RawICA` preprocessing step\n\n If a corresponding file exists, a basic check is done as to whether the\n bad channels have changed, and if so the ICA is recomputed.\n\n Parameters\n ----------\n ...\n State parameters.\n\n Returns\n -------\n path : str\n Path to the ICA file.\n\n Notes\n -----\n ICA decomposition can take some time. This function can be used to\n precompute ICA decompositions for all subjects after trial pre-rejection\n has been completed::\n\n >>> for subject in e:\n ... e.make_ica()\n\n \"\"\"\n if state:\n self.set(**state)\n pipe = self._raw[self.get('raw')]\n if not isinstance(pipe, RawICA):\n ica_raws = [key for key, pipe in self._raw.items() if isinstance(pipe, RawICA)]\n if len(ica_raws) > 1:\n raise ValueError(f\"raw={pipe.name!r} does not involve ICA; set raw to an ICA processing step ({enumeration(ica_raws)})\")\n elif len(ica_raws) == 1:\n print(f\"raw: {pipe.name} -> {ica_raws[0]}\")\n return self.make_ica(raw=ica_raws[0])\n else:\n raise RuntimeError(\"Experiment has no RawICA processing step\")\n return pipe.make_ica(self.get('subject'), self.get('visit'))\n\n def make_link(self, temp, field, src, dst, redo=False):\n \"\"\"Make a hard link\n\n Make a hard link at the file with the ``dst`` value on ``field``,\n linking to the file with the ``src`` value of ``field``.\n\n Parameters\n ----------\n temp : str\n Template of the file for which to make a link.\n field : str\n Field in which the source and target of the link are distinguished.\n src : str\n Value for field on the source file.\n dst : str\n Value for field on the destination filename.\n redo : bool\n If the target file already exists, overwrite it.\n \"\"\"\n dst_path = self.get(temp, **{field: dst})\n if not redo and exists(dst_path):\n return\n\n src_path = self.get(temp, **{field: src})\n os.link(src_path, dst_path)\n\n def make_mov_ga_dspm(self, subjects=None, baseline=True, src_baseline=False,\n fmin=2, surf=None, views=None, hemi=None, time_dilation=4.,\n foreground=None, background=None, smoothing_steps=None,\n dst=None, redo=False, **state):\n \"\"\"Make a grand average movie from dSPM values (requires PySurfer 0.6)\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification (default).\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n fmin : scalar\n Minimum dSPM value to draw (default 2). fmax is 3 * fmin.\n surf : str\n Surface on which to plot data.\n views : str | tuple of str\n View(s) of the brain to include in the movie.\n hemi : 'lh' | 'rh' | 'both' | 'split'\n Which hemispheres to plot.\n time_dilation : scalar\n Factor by which to slow the passage of time. For example, with\n ``time_dilation=4`` (the default) a segment of data for 500 ms will\n last 2 s.\n foreground : mayavi color\n Figure foreground color (i.e., the text color).\n background : mayavi color\n Figure background color.\n smoothing_steps : None | int\n Number of smoothing steps if data is spatially undersampled (pysurfer\n ``Brain.add_data()`` argument).\n dst : str (optional)\n Path to save the movie. The default is a file in the results\n folder with a name determined based on the input data. Plotting\n parameters (``view`` and all subsequent parameters) are not\n included in the filename. \"~\" is expanded to the user's home\n folder.\n redo : bool\n Make the movie even if the target file exists already.\n ...\n State parameters.\n \"\"\"\n state['model'] = ''\n subject, group = self._process_subject_arg(subjects, state)\n data = TestDims(\"source\", morph=bool(group))\n brain_kwargs = self._surfer_plot_kwargs(surf, views, foreground, background,\n smoothing_steps, hemi)\n self._set_analysis_options(data, baseline, src_baseline, None, None, None)\n self.set(equalize_evoked_count='',\n resname=\"GA dSPM %s %s\" % (brain_kwargs['surf'], fmin))\n\n if dst is None:\n if group is None:\n dst = self.get('subject-mov-file', mkdir=True)\n else:\n dst = self.get('group-mov-file', mkdir=True)\n else:\n dst = os.path.expanduser(dst)\n\n if not redo and self._result_file_mtime(dst, data, group is None):\n return\n\n plot._brain.assert_can_save_movies()\n if group is None:\n ds = self.load_evoked_stc(subject, baseline, src_baseline)\n y = ds['src']\n else:\n ds = self.load_evoked_stc(group, baseline, src_baseline, morph=True)\n y = ds['srcm']\n\n brain = plot.brain.dspm(y, fmin, fmin * 3, colorbar=False, **brain_kwargs)\n brain.save_movie(dst, time_dilation)\n brain.close()\n\n def make_mov_ttest(self, subjects=None, model='', c1=None, c0=None, p=0.05,\n baseline=True, src_baseline=False,\n surf=None, views=None, hemi=None, time_dilation=4.,\n foreground=None, background=None, smoothing_steps=None,\n dst=None, redo=False, **state):\n \"\"\"Make a t-test movie (requires PySurfer 0.6)\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n model : None | str\n Model on which the conditions c1 and c0 are defined. The default\n (``''``) is the grand average.\n c1 : None | str | tuple\n Test condition (cell in model). If None, the grand average is\n used and c0 has to be a scalar.\n c0 : str | scalar\n Control condition (cell on model) or scalar against which to\n compare c1.\n p : 0.1 | 0.05 | 0.01 | .001\n Maximum p value to draw.\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification (default).\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n surf : str\n Surface on which to plot data.\n views : str | tuple of str\n View(s) of the brain to include in the movie.\n hemi : 'lh' | 'rh' | 'both' | 'split'\n Which hemispheres to plot.\n time_dilation : scalar\n Factor by which to slow the passage of time. For example, with\n ``time_dilation=4`` (the default) a segment of data for 500 ms will\n last 2 s.\n foreground : mayavi color\n Figure foreground color (i.e., the text color).\n background : mayavi color\n Figure background color.\n smoothing_steps : None | int\n Number of smoothing steps if data is spatially undersampled (pysurfer\n ``Brain.add_data()`` argument).\n dst : str (optional)\n Path to save the movie. The default is a file in the results\n folder with a name determined based on the input data. Plotting\n parameters (``view`` and all subsequent parameters) are not\n included in the filename. \"~\" is expanded to the user's home\n folder.\n redo : bool\n Make the movie even if the target file exists already.\n ...\n State parameters.\n \"\"\"\n if p == 0.1:\n pmid = 0.05\n pmin = 0.01\n elif p == 0.05:\n pmid = 0.01\n pmin = 0.001\n elif p == 0.01:\n pmid = 0.001\n pmin = 0.001\n elif p == 0.001:\n pmid = 0.0001\n pmin = 0.00001\n else:\n raise ValueError(\"p=%s\" % p)\n\n data = TestDims(\"source\", morph=True)\n brain_kwargs = self._surfer_plot_kwargs(surf, views, foreground, background,\n smoothing_steps, hemi)\n surf = brain_kwargs['surf']\n if model:\n if not c1:\n raise ValueError(\"If x is specified, c1 needs to be specified; \"\n \"got c1=%s\" % repr(c1))\n elif c0:\n resname = \"t-test %s-%s {test_options} %s\" % (c1, c0, surf)\n cat = (c1, c0)\n else:\n resname = \"t-test %s {test_options} %s\" % (c1, surf)\n cat = (c1,)\n elif c1 or c0:\n raise ValueError(\"If x is not specified, c1 and c0 should not be \"\n \"specified either; got c1=%s, c0=%s\"\n % (repr(c1), repr(c0)))\n else:\n resname = \"t-test GA {test_options} %s\" % surf\n cat = None\n\n state.update(resname=resname, model=model)\n with self._temporary_state:\n subject, group = self._process_subject_arg(subjects, state)\n self._set_analysis_options(data, baseline, src_baseline, p, None, None)\n\n if dst is None:\n if group is None:\n dst = self.get('subject-mov-file', mkdir=True)\n else:\n dst = self.get('group-mov-file', mkdir=True)\n else:\n dst = os.path.expanduser(dst)\n\n if not redo and self._result_file_mtime(dst, data, group is None):\n return\n\n plot._brain.assert_can_save_movies()\n if group is None:\n ds = self.load_epochs_stc(subject, baseline, src_baseline, cat=cat)\n y = 'src'\n else:\n ds = self.load_evoked_stc(group, baseline, src_baseline, morph=True, cat=cat)\n y = 'srcm'\n\n # find/apply cluster criteria\n state = self._cluster_criteria_kwargs(data)\n if state:\n state.update(samples=0, pmin=p)\n\n # compute t-maps\n if c0:\n if group:\n res = testnd.ttest_rel(y, model, c1, c0, match='subject', ds=ds, **state)\n else:\n res = testnd.ttest_ind(y, model, c1, c0, ds=ds, **state)\n else:\n res = testnd.ttest_1samp(y, ds=ds, **state)\n\n # select cluster-corrected t-map\n if state:\n tmap = res.masked_parameter_map(None)\n else:\n tmap = res.t\n\n # make movie\n brain = plot.brain.dspm(tmap, ttest_t(p, res.df), ttest_t(pmin, res.df),\n ttest_t(pmid, res.df), surf=surf)\n brain.save_movie(dst, time_dilation)\n brain.close()\n\n def make_mrat_evoked(self, **kwargs):\n \"\"\"Produce the sensor data fiff files needed for MRAT sensor analysis\n\n Parameters\n ----------\n ...\n State parameters.\n\n Examples\n --------\n To produce evoked files for all subjects in the experiment:\n\n >>> experiment.set(model='factor1%factor2')\n >>> for _ in experiment:\n >>> experiment.make_mrat_evoked()\n ...\n \"\"\"\n ds = self.load_evoked(ndvar=False, **kwargs)\n\n # create fiffs\n model = self.get('model')\n factors = [f.strip() for f in model.split('%')]\n for case in ds.itercases():\n condition = '_'.join(case[f] for f in factors)\n path = self.get('mrat-sns-file', mkdir=True,\n mrat_condition=condition)\n evoked = case['evoked']\n evoked.save(path)\n\n def make_mrat_stcs(self, **kwargs):\n \"\"\"Produce the STC files needed for the MRAT analysis tool\n\n Parameters\n ----------\n ...\n State parameters.\n\n Examples\n --------\n To produce stc files for all subjects in the experiment:\n\n >>> experiment.set_inv('free')\n >>> experiment.set(model='factor1%factor2')\n >>> for _ in experiment:\n >>> experiment.make_mrat_stcs()\n ...\n \"\"\"\n ds = self.load_evoked_stc(morph=True, ndvar=False, **kwargs)\n\n # save condition info\n info_file = self.get('mrat_info-file', mkdir=True)\n ds.save_txt(info_file)\n\n # create stcs\n model = self.get('model')\n factors = [f.strip() for f in model.split('%')]\n for case in ds.itercases():\n condition = '_'.join(case[f] for f in factors)\n path = self.get('mrat-src-file', mkdir=True,\n mrat_condition=condition)\n stc = case['stcm']\n stc.save(path)\n\n def make_plot_annot(self, surf='inflated', redo=False, **state):\n \"\"\"Create a figure for the contents of an annotation file\n\n Parameters\n ----------\n surf : str\n FreeSurfer surface on which to plot the annotation.\n redo : bool\n If the target file already exists, overwrite it.\n ...\n State parameters.\n \"\"\"\n if is_fake_mri(self.get('mri-dir', **state)):\n mrisubject = self.get('common_brain')\n self.set(mrisubject=mrisubject, match=False)\n\n dst = self.get('res-file', mkdir=True, ext='png',\n analysis='Source Annot',\n resname=\"{parc} {mrisubject} %s\" % surf)\n if not redo and exists(dst):\n return\n\n brain = self.plot_annot(surf=surf, axw=600)\n brain.save_image(dst, 'rgba', True)\n legend = brain.plot_legend(show=False)\n legend.save(dst[:-3] + 'pdf', transparent=True)\n brain.close()\n legend.close()\n\n def make_plot_label(self, label, surf='inflated', redo=False, **state):\n if is_fake_mri(self.get('mri-dir', **state)):\n mrisubject = self.get('common_brain')\n self.set(mrisubject=mrisubject, match=False)\n\n dst = self._make_plot_label_dst(surf, label)\n if not redo and exists(dst):\n return\n\n brain = self.plot_label(label, surf=surf)\n brain.save_image(dst, 'rgba', True)\n\n def make_plots_labels(self, surf='inflated', redo=False, **state):\n self.set(**state)\n with self._temporary_state:\n if is_fake_mri(self.get('mri-dir')):\n self.set(mrisubject=self.get('common_brain'), match=False)\n\n labels = tuple(self._load_labels().values())\n dsts = [self._make_plot_label_dst(surf, label.name) for label in labels]\n if not redo and all(exists(dst) for dst in dsts):\n return\n\n brain = self.plot_brain(surf, None, 'split', ['lat', 'med'], w=1200)\n for label, dst in zip(labels, dsts):\n brain.add_label(label)\n brain.save_image(dst, 'rgba', True)\n brain.remove_labels(hemi='lh')\n\n def _make_plot_label_dst(self, surf, label):\n return self.get('res-deep-file', mkdir=True, analysis='Source Labels',\n folder=\"{parc} {mrisubject} %s\" % surf, resname=label,\n ext='png')\n\n def make_raw(self, **kwargs):\n \"\"\"Make a raw file\n \n Parameters\n ----------\n ...\n State parameters.\n\n Notes\n -----\n Due to the electronics of the KIT system sensors, signal lower than\n 0.16 Hz is not recorded even when recording at DC.\n \"\"\"\n if kwargs:\n self.set(**kwargs)\n pipe = self._raw[self.get('raw')]\n pipe.cache(self.get('subject'), self.get('recording'))\n\n def make_epoch_selection(self, decim=None, auto=None, overwrite=None, **state):\n \"\"\"Open :func:`gui.select_epochs` for manual epoch selection\n\n The GUI is opened with the correct file name; if the corresponding\n file exists, it is loaded, and upon saving the correct path is\n the default.\n\n Parameters\n ----------\n decim : int\n Decimate epochs for the purpose of faster display. Decimation is\n applied relative to the raw data file (i.e., if the raw data is\n sampled at a 1000 Hz, ``decim=10`` results in a sampling rate of\n 100 Hz for display purposes. The default is to use the decim\n parameter specified in the epoch definition.\n auto : scalar (optional)\n Perform automatic rejection instead of showing the GUI by supplying\n a an absolute threshold (for example, ``1e-12`` to reject any epoch\n in which the absolute of at least one channel exceeds 1 picotesla).\n If a rejection file already exists also set ``overwrite=True``.\n overwrite : bool\n If ``auto`` is specified and a rejection file already exists,\n overwrite the old file. The default is to raise an error if the\n file exists (``None``). Set to ``False`` to quietly keep the exising\n file.\n ...\n State parameters.\n \"\"\"\n rej = self.get('rej', **state)\n rej_args = self._artifact_rejection[rej]\n if rej_args['kind'] != 'manual':\n raise ValueError(f\"rej={rej!r}; Epoch rejection is not manual\")\n\n epoch = self._epochs[self.get('epoch')]\n if not isinstance(epoch, PrimaryEpoch):\n if isinstance(epoch, SecondaryEpoch):\n raise ValueError(f\"The current epoch {epoch.name!r} inherits selections from {epoch.sel_epoch!r}. To access a rejection file for this epoch, call `e.set(epoch={epoch.sel_epoch!r})` and then call `e.make_epoch_selection()` again.\")\n elif isinstance(epoch, SuperEpoch):\n raise ValueError(f\"The current epoch {epoch.name!r} inherits selections from these other epochs: {epoch.sub_epochs!r}. To access selections for these epochs, call `e.make_epoch_selection(epoch=epoch)` for each.\")\n else:\n raise ValueError(f\"The current epoch {epoch.name!r} is not a primary epoch and inherits selections from other epochs. Generate trial rejection for these epochs.\")\n\n path = self.get('rej-file', mkdir=True, session=epoch.session)\n\n if auto is not None and overwrite is not True and exists(path):\n if overwrite is False:\n return\n elif overwrite is None:\n raise IOError(self.format(\"A rejection file already exists for {subject}, epoch {epoch}, rej {rej}. Set the overwrite parameter to specify how to handle existing files.\"))\n else:\n raise TypeError(f\"overwrite={overwrite!r}\")\n\n ds = self.load_epochs(reject=False, trigger_shift=False, decim=decim)\n has_meg = 'meg' in ds\n has_grad = 'grad' in ds\n has_eeg = 'eeg' in ds\n has_eog = 'eog' in ds\n if sum((has_meg, has_grad, has_eeg)) > 1:\n raise NotImplementedError(\"Rejection GUI for multiple channel types\")\n elif has_meg:\n y_name = 'meg'\n vlim = 2e-12\n elif has_grad:\n raise NotImplementedError(\"Rejection GUI for gradiometer data\")\n elif has_eeg:\n y_name = 'eeg'\n vlim = 1.5e-4\n else:\n raise RuntimeError(\"No data found\")\n\n if has_eog:\n eog_sns = [] # TODO: use EOG\n else:\n eog_sns = self._eog_sns.get(ds[y_name].sensor.sysname)\n\n if auto is not None:\n # create rejection\n rej_ds = new_rejection_ds(ds)\n rej_ds[:, 'accept'] = ds[y_name].abs().max(('sensor', 'time')) <= auto\n # create description for info\n args = [f\"auto={auto!r}\"]\n if overwrite is True:\n args.append(\"overwrite=True\")\n if decim is not None:\n args.append(f\"decim={decim!r}\")\n rej_ds.info['desc'] = f\"Created with {self.__class__.__name__}.make_epoch_selection({', '.join(args)})\"\n # save\n save.pickle(rej_ds, path)\n # print info\n n_rej = rej_ds.eval(\"sum(accept == False)\")\n print(self.format(f\"{n_rej} of {rej_ds.n_cases} epochs rejected with threshold {auto} for {{subject}}, epoch {{epoch}}\"))\n return\n\n # don't mark eog sns if it is bad\n bad_channels = self.load_bad_channels()\n eog_sns = [c for c in eog_sns if c not in bad_channels]\n\n gui.select_epochs(ds, y_name, path=path, vlim=vlim, mark=eog_sns)\n\n def _need_not_recompute_report(self, dst, samples, data, redo):\n \"Check (and log) whether the report needs to be redone\"\n desc = self._get_rel('report-file', 'res-dir')\n if not exists(dst):\n self._log.debug(\"New report: %s\", desc)\n elif redo:\n self._log.debug(\"Redoing report: %s\", desc)\n elif not self._result_file_mtime(dst, data):\n self._log.debug(\"Report outdated: %s\", desc)\n else:\n meta = read_meta(dst)\n if 'samples' in meta:\n if int(meta['samples']) >= samples:\n self._log.debug(\"Report up to date: %s\", desc)\n return True\n else:\n self._log.debug(\"Report file used %s samples, recomputing \"\n \"with %i: %s\", meta['samples'], samples,\n desc)\n else:\n self._log.debug(\"Report created prior to Eelbrain 0.25, can \"\n \"not check number of samples. Delete manually \"\n \"to recompute: %s\", desc)\n return True\n\n def make_report(self, test, parc=None, mask=None, pmin=None, tstart=None,\n tstop=None, samples=10000, baseline=True,\n src_baseline=None, include=0.2, redo=False, **state):\n \"\"\"Create an HTML report on spatio-temporal clusters\n\n Parameters\n ----------\n test : str\n Test for which to create a report (entry in MneExperiment.tests).\n parc : None | str\n Find clusters in each label of parc (as opposed to the whole\n brain).\n mask : None | str\n Parcellation to apply as mask. Can only be specified if parc==None.\n pmin : None | scalar, 1 > pmin > 0 | 'tfce'\n Equivalent p-value for cluster threshold, or 'tfce' for\n threshold-free cluster enhancement.\n tstart : scalar\n Beginning of the time window for the test in seconds\n (default is the beginning of the epoch).\n tstop : scalar\n End of the time window for the test in seconds\n (default is the end of the epoch).\n samples : int > 0\n Number of samples used to determine cluster p values for spatio-\n temporal clusters (default 10,000).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification (default).\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n include : 0 < scalar <= 1\n Create plots for all clusters with p-values smaller or equal this value.\n redo : bool\n If the target file already exists, delete and recreate it. This\n only applies to the HTML result file, not to the test.\n ...\n State parameters.\n\n See Also\n --------\n load_test : load corresponding data and tests\n \"\"\"\n if samples < 1:\n raise ValueError(\"samples needs to be > 0\")\n elif include <= 0 or include > 1:\n raise ValueError(\"include needs to be 0 < include <= 1, got %s\"\n % repr(include))\n\n self.set(**state)\n data = TestDims('source', morph=True)\n self._set_analysis_options(data, baseline, src_baseline, pmin, tstart, tstop, parc, mask)\n dst = self.get('report-file', mkdir=True, test=test)\n if self._need_not_recompute_report(dst, samples, data, redo):\n return\n\n # start report\n title = self.format('{recording} {test_desc}')\n report = Report(title)\n report.add_paragraph(self._report_methods_brief(dst))\n\n if isinstance(self._tests[test], TwoStageTest):\n self._two_stage_report(report, data, test, baseline, src_baseline, pmin, samples, tstart, tstop, parc, mask, include)\n else:\n self._evoked_report(report, data, test, baseline, src_baseline, pmin, samples, tstart, tstop, parc, mask, include)\n\n # report signature\n report.sign(('eelbrain', 'mne', 'surfer', 'scipy', 'numpy'))\n report.save_html(dst, meta={'samples': samples})\n\n def _evoked_report(self, report, data, test, baseline, src_baseline, pmin, samples, tstart, tstop, parc, mask, include):\n # load data\n ds, res = self._load_test(test, tstart, tstop, pmin, parc, mask, samples, data, baseline, src_baseline, True, True)\n\n # info\n surfer_kwargs = self._surfer_plot_kwargs()\n self._report_test_info(report.add_section(\"Test Info\"), ds, test, res, data, include)\n if parc:\n section = report.add_section(parc)\n caption = \"Labels in the %s parcellation.\" % parc\n self._report_parc_image(section, caption)\n elif mask:\n title = \"Whole Brain Masked by %s\" % mask\n section = report.add_section(title)\n caption = \"Mask: %s\" % mask.capitalize()\n self._report_parc_image(section, caption)\n\n colors = plot.colors_for_categorial(ds.eval(res._plot_model()))\n report.append(_report.source_time_results(res, ds, colors, include,\n surfer_kwargs, parc=parc))\n\n def _two_stage_report(self, report, data, test, baseline, src_baseline, pmin, samples, tstart, tstop, parc, mask, include):\n test_obj = self._tests[test]\n return_data = test_obj._within_model is not None\n rlm = self._load_test(test, tstart, tstop, pmin, parc, mask, samples, data, baseline, src_baseline, return_data, True)\n if return_data:\n group_ds, rlm = rlm\n else:\n group_ds = None\n\n # start report\n surfer_kwargs = self._surfer_plot_kwargs()\n info_section = report.add_section(\"Test Info\")\n if parc:\n section = report.add_section(parc)\n caption = \"Labels in the %s parcellation.\" % parc\n self._report_parc_image(section, caption)\n elif mask:\n title = \"Whole Brain Masked by %s\" % mask\n section = report.add_section(title)\n caption = \"Mask: %s\" % mask.capitalize()\n self._report_parc_image(section, caption)\n\n # Design matrix\n section = report.add_section(\"Design Matrix\")\n section.append(rlm.design())\n\n # add results to report\n for term in rlm.column_names:\n res = rlm.tests[term]\n ds = rlm.coefficients_dataset(term)\n report.append(\n _report.source_time_results(\n res, ds, None, include, surfer_kwargs, term, y='coeff'))\n\n self._report_test_info(info_section, group_ds or ds, test_obj, res, data)\n\n def make_report_rois(self, test, parc=None, pmin=None, tstart=None, tstop=None,\n samples=10000, baseline=True, src_baseline=False,\n redo=False, **state):\n \"\"\"Create an HTML report on ROI time courses\n\n Parameters\n ----------\n test : str\n Test for which to create a report (entry in MneExperiment.tests).\n parc : str\n Parcellation that defines ROIs.\n pmin : None | scalar, 1 > pmin > 0 | 'tfce'\n Equivalent p-value for cluster threshold, or 'tfce' for\n threshold-free cluster enhancement.\n tstart : scalar\n Beginning of the time window for the test in seconds\n (default is the beginning of the epoch).\n tstop : scalar\n End of the time window for the test in seconds\n (default is the end of the epoch).\n samples : int > 0\n Number of samples used to determine cluster p values for spatio-\n temporal clusters (default 1000).\n baseline : bool | tuple\n Apply baseline correction using this period in sensor space.\n True to use the epoch's baseline specification (default).\n src_baseline : bool | tuple\n Apply baseline correction using this period in source space.\n True to use the epoch's baseline specification. The default is to\n not apply baseline correction.\n redo : bool\n If the target file already exists, delete and recreate it.\n ...\n State parameters.\n\n See Also\n --------\n load_test : load corresponding data and tests (use ``data=\"source.mean\"``)\n \"\"\"\n test_obj = self._tests[test]\n if samples < 1:\n raise ValueError(\"Need samples > 0 to run permutation test.\")\n elif isinstance(test_obj, TwoStageTest):\n raise NotImplementedError(\"ROI analysis not implemented for two-\"\n \"stage tests\")\n\n if parc is not None:\n state['parc'] = parc\n parc = self.get('parc', **state)\n if not parc:\n raise ValueError(\"No parcellation specified\")\n data = TestDims('source.mean')\n self._set_analysis_options(data, baseline, src_baseline, pmin, tstart, tstop, parc)\n dst = self.get('report-file', mkdir=True, test=test)\n if self._need_not_recompute_report(dst, samples, data, redo):\n return\n\n res_data, res = self._load_test(test, tstart, tstop, pmin, parc, None, samples, data, baseline, src_baseline, True, True)\n\n # sorted labels\n labels_lh = []\n labels_rh = []\n for label in res.res.keys():\n if label.endswith('-lh'):\n labels_lh.append(label)\n elif label.endswith('-rh'):\n labels_rh.append(label)\n else:\n raise NotImplementedError(\"Label named %s\" % repr(label.name))\n labels_lh.sort()\n labels_rh.sort()\n\n # start report\n title = self.format('{recording} {test_desc}')\n report = Report(title)\n\n # method intro (compose it later when data is available)\n ds0 = res_data[label]\n res0 = res.res[label]\n info_section = report.add_section(\"Test Info\")\n self._report_test_info(info_section, res.n_trials_ds, test_obj, res0, data)\n\n # add parc image\n section = report.add_section(parc)\n caption = \"ROIs in the %s parcellation.\" % parc\n self._report_parc_image(section, caption, res.subjects)\n\n # add content body\n n_subjects = len(res.subjects)\n colors = plot.colors_for_categorial(ds0.eval(res0._plot_model()))\n for label in chain(labels_lh, labels_rh):\n res_i = res.res[label]\n ds = res_data[label]\n title = label[:-3].capitalize()\n caption = \"Mean in label %s.\" % label\n n = len(ds['subject'].cells)\n if n < n_subjects:\n title += ' (n=%i)' % n\n caption += \" Data from %i of %i subjects.\" % (n, n_subjects)\n section.append(_report.time_results(\n res_i, ds, colors, title, caption, merged_dist=res.merged_dist))\n\n report.sign(('eelbrain', 'mne', 'surfer', 'scipy', 'numpy'))\n report.save_html(dst, meta={'samples': samples})\n\n def _make_report_eeg(self, test, pmin=None, tstart=None, tstop=None,\n samples=10000, baseline=True, include=1, **state):\n # outdated (cache, load_test())\n \"\"\"Create an HTML report on EEG sensor space spatio-temporal clusters\n\n Parameters\n ----------\n test : str\n Test for which to create a report (entry in MneExperiment.tests).\n pmin : None | scalar, 1 > pmin > 0 | 'tfce'\n Equivalent p-value for cluster threshold, or 'tfce' for\n threshold-free cluster enhancement.\n tstart : scalar\n Beginning of the time window for the test in seconds\n (default is the beginning of the epoch).\n tstop : scalar\n End of the time window for the test in seconds\n (default is the end of the epoch).\n samples : int > 0\n Number of samples used to determine cluster p values for spatio-\n temporal clusters (default 1000).\n baseline : bool | tuple\n Apply baseline correction using this period. True to use the epoch's\n baseline specification (default).\n include : 0 < scalar <= 1\n Create plots for all clusters with p-values smaller or equal this\n value (the default is 1, i.e. to show all clusters).\n ...\n State parameters.\n \"\"\"\n data = TestDims(\"sensor\")\n self._set_analysis_options(data, baseline, None, pmin, tstart, tstop)\n dst = self.get('report-file', mkdir=True, fmatch=False, test=test,\n folder=\"EEG Spatio-Temporal\", modality='eeg',\n **state)\n if self._need_not_recompute_report(dst, samples, data, False):\n return\n\n # load data\n ds, res = self.load_test(test, tstart, tstop, pmin, None, None, samples,\n 'sensor', baseline, None, True, True)\n\n # start report\n title = self.format('{recording} {test_desc}')\n report = Report(title)\n\n # info\n info_section = report.add_section(\"Test Info\")\n self._report_test_info(info_section, ds, test, res, data, include)\n\n # add connectivity image\n p = plot.SensorMap(ds['eeg'], connectivity=True, show=False)\n image_conn = p.image(\"connectivity.png\")\n info_section.add_figure(\"Sensor map with connectivity\", image_conn)\n p.close()\n\n colors = plot.colors_for_categorial(ds.eval(res._plot_model()))\n report.append(_report.sensor_time_results(res, ds, colors, include))\n report.sign(('eelbrain', 'mne', 'scipy', 'numpy'))\n report.save_html(dst)\n\n def _make_report_eeg_sensors(self, test, sensors=('FZ', 'CZ', 'PZ', 'O1', 'O2'),\n pmin=None, tstart=None, tstop=None,\n samples=10000, baseline=True, redo=False,\n **state):\n # outdated (cache)\n \"\"\"Create an HTML report on individual EEG sensors\n\n Parameters\n ----------\n test : str\n Test for which to create a report (entry in MneExperiment.tests).\n sensors : sequence of str\n Names of the sensors which to include.\n pmin : None | scalar, 1 > pmin > 0 | 'tfce'\n Equivalent p-value for cluster threshold, or 'tfce' for\n threshold-free cluster enhancement.\n tstart : scalar\n Beginning of the time window for the test in seconds\n (default is the beginning of the epoch).\n tstop : scalar\n End of the time window for the test in seconds\n (default is the end of the epoch).\n samples : int > 0\n Number of samples used to determine cluster p values for spatio-\n temporal clusters (default 1000).\n baseline : bool | tuple\n Apply baseline correction using this period. True to use the epoch's\n baseline specification (default).\n redo : bool\n If the target file already exists, delete and recreate it. This\n only applies to the HTML result file, not to the test.\n ...\n State parameters.\n \"\"\"\n data = TestDims('sensor.sub')\n self._set_analysis_options(data, baseline, None, pmin, tstart, tstop)\n dst = self.get('report-file', mkdir=True, fmatch=False, test=test,\n folder=\"EEG Sensors\", modality='eeg', **state)\n if self._need_not_recompute_report(dst, samples, data, redo):\n return\n\n # load data\n test_obj = self._tests[test]\n ds = self.load_evoked(self.get('group'), baseline, True, vardef=test_obj.vars)\n\n # test that sensors are in the data\n eeg = ds['eeg']\n missing = [s for s in sensors if s not in eeg.sensor.names]\n if missing:\n raise ValueError(\"The following sensors are not in the data: %s\" % missing)\n\n # start report\n title = self.format('{recording} {test_desc}')\n report = Report(title)\n\n # info\n info_section = report.add_section(\"Test Info\")\n\n # add sensor map\n p = plot.SensorMap(ds['eeg'], show=False)\n p.mark_sensors(sensors)\n info_section.add_figure(\"Sensor map\", p)\n p.close()\n\n # main body\n caption = \"Signal at %s.\"\n test_kwargs = self._test_kwargs(samples, pmin, tstart, tstop, ('time', 'sensor'), None)\n ress = [self._make_test(eeg.sub(sensor=sensor), ds, test_obj, test_kwargs) for\n sensor in sensors]\n colors = plot.colors_for_categorial(ds.eval(ress[0]._plot_model()))\n for sensor, res in zip(sensors, ress):\n report.append(_report.time_results(res, ds, colors, sensor, caption % sensor))\n\n self._report_test_info(info_section, ds, test, res, data)\n report.sign(('eelbrain', 'mne', 'scipy', 'numpy'))\n report.save_html(dst)\n\n @staticmethod\n def _report_methods_brief(path):\n path = Path(path)\n items = [*path.parts[:-1], path.stem]\n return List('Methods brief', items[-3:])\n\n def _report_subject_info(self, ds, model):\n \"\"\"Table with subject information\n\n Parameters\n ----------\n ds : Dataset\n Dataset with ``subject`` and ``n`` variables, and any factors in\n ``model``.\n model : str\n The model used for aggregating.\n \"\"\"\n s_ds = self.show_subjects(asds=True)\n if 'n' in ds:\n if model:\n n_ds = table.repmeas('n', model, 'subject', ds=ds)\n else:\n n_ds = ds\n n_ds_aligned = align1(n_ds, s_ds['subject'], 'subject')\n s_ds.update(n_ds_aligned)\n return s_ds.as_table(\n midrule=True, count=True,\n caption=\"All subjects included in the analysis with trials per \"\n \"condition\")\n\n def _report_test_info(self, section, ds, test, res, data, include=None, model=True):\n \"\"\"Top-level report info function\n\n Returns\n -------\n info : Table\n Table with preprocessing and test info.\n \"\"\"\n test_obj = self._tests[test] if isinstance(test, str) else test\n\n # List of preprocessing parameters\n info = List(\"Analysis:\")\n # epoch\n epoch = self.format('epoch = {epoch}')\n evoked_kind = self.get('evoked_kind')\n if evoked_kind:\n epoch += f' {evoked_kind}'\n if model is True:\n model = self.get('model')\n if model:\n epoch += f\" ~ {model}\"\n info.add_item(epoch)\n # inverse solution\n if data.source:\n info.add_item(self.format(\"cov = {cov}\"))\n info.add_item(self.format(\"inv = {inv}\"))\n # test\n info.add_item(\"test = %s (%s)\" % (test_obj.kind, test_obj.desc))\n if include is not None:\n info.add_item(f\"Separate plots of all clusters with a p-value < {include}\")\n section.append(info)\n\n # Statistical methods (for temporal tests, res is only representative)\n info = res.info_list()\n section.append(info)\n\n # subjects and state\n section.append(self._report_subject_info(ds, test_obj.model))\n section.append(self.show_state(hide=('hemi', 'subject', 'mrisubject')))\n return info\n\n def _report_parc_image(self, section, caption, subjects=None):\n \"Add picture of the current parcellation\"\n parc_name, parc = self._get_parc()\n with self._temporary_state:\n if isinstance(parc, IndividualSeededParc):\n if subjects is None:\n raise RuntimeError(\"subjects needs to be specified for \"\n \"plotting individual parcellations\")\n legend = None\n for subject in self:\n # make sure there is at least one label\n if not any(not l.name.startswith('unknown-') for l in\n self.load_annot()):\n section.add_image_figure(\"No labels\", subject)\n continue\n brain = self.plot_annot()\n if legend is None:\n p = brain.plot_legend(show=False)\n legend = p.image('parc-legend')\n p.close()\n section.add_image_figure(brain.image('parc'), subject)\n brain.close()\n return\n\n # one parc for all subjects\n self.set(mrisubject=self.get('common_brain'))\n brain = self.plot_annot(axw=500)\n legend = brain.plot_legend(show=False)\n content = [brain.image('parc'), legend.image('parc-legend')]\n section.add_image_figure(content, caption)\n brain.close()\n legend.close()\n\n def _make_report_lm(self, pmin=0.01, baseline=True, src_baseline=False,\n mask='lobes'):\n \"\"\"Report for a first level (single subject) LM\n\n Parameters\n ----------\n pmin : scalar\n Threshold p-value for uncorrected SPMs.\n \"\"\"\n if not isinstance(self._tests[self.get('test')], TwoStageTest):\n raise NotImplementedError(\"Only two-stage tests\")\n\n with self._temporary_state:\n self._set_analysis_options('source', baseline, src_baseline, pmin, None, None, mask=mask)\n dst = self.get('subject-spm-report', mkdir=True)\n lm = self._load_spm(baseline, src_baseline)\n\n title = self.format('{recording} {test_desc}')\n surfer_kwargs = self._surfer_plot_kwargs()\n\n report = Report(title)\n report.append(_report.source_time_lm(lm, pmin, surfer_kwargs))\n\n # report signature\n report.sign(('eelbrain', 'mne', 'surfer', 'scipy', 'numpy'))\n report.save_html(dst)\n\n def make_report_coreg(self, file_name=None, **state):\n \"\"\"Create HTML report with plots of the MEG/MRI coregistration\n\n Parameters\n ----------\n file_name : str\n Where to save the report (default is in the root/methods director).\n ...\n State parameters.\n \"\"\"\n from matplotlib import pyplot\n from mayavi import mlab\n\n mri = self.get('mri', **state)\n group = self.get('group')\n title = 'Coregistration'\n if group != 'all':\n title += ' ' + group\n if mri:\n title += ' ' + mri\n if file_name is None:\n file_name = join(self.get('methods-dir', mkdir=True), title + '.html')\n report = Report(title)\n for subject in self:\n mrisubject = self.get('mrisubject')\n fig = self.plot_coreg()\n fig.scene.camera.parallel_projection = True\n fig.scene.camera.parallel_scale = .175\n mlab.draw(fig)\n\n # front\n mlab.view(90, 90, 1, figure=fig)\n im_front = Image.from_array(mlab.screenshot(figure=fig), 'front')\n\n # left\n mlab.view(0, 270, 1, roll=90, figure=fig)\n im_left = Image.from_array(mlab.screenshot(figure=fig), 'left')\n\n mlab.close(fig)\n\n # MRI/BEM figure\n if is_fake_mri(self.get('mri-dir')):\n bem_fig = None\n else:\n bem_fig = mne.viz.plot_bem(mrisubject, self.get('mri-sdir'),\n brain_surfaces='white', show=False)\n\n # add to report\n if subject == mrisubject:\n title = subject\n caption = \"Coregistration for subject %s.\" % subject\n else:\n title = \"%s (%s)\" % (subject, mrisubject)\n caption = (\"Coregistration for subject %s (MRI-subject %s).\" %\n (subject, mrisubject))\n section = report.add_section(title)\n if bem_fig is None:\n section.add_figure(caption, (im_front, im_left))\n else:\n section.add_figure(caption, (im_front, im_left, bem_fig))\n pyplot.close(bem_fig)\n\n report.sign()\n report.save_html(file_name)\n\n def make_src(self, **kwargs):\n \"\"\"Make the source space\n \n Parameters\n ----------\n ...\n State parameters.\n \"\"\"\n dst = self.get('src-file', **kwargs)\n subject = self.get('mrisubject')\n common_brain = self.get('common_brain')\n\n is_scaled = (subject != common_brain) and is_fake_mri(self.get('mri-dir'))\n\n if is_scaled:\n # make sure the source space exists for the original\n with self._temporary_state:\n self.make_src(mrisubject=common_brain)\n orig = self.get('src-file')\n\n if exists(dst):\n if getmtime(dst) >= getmtime(orig):\n return\n os.remove(dst)\n\n src = self.get('src')\n self._log.info(f\"Scaling {src} source space for {subject}...\")\n subjects_dir = self.get('mri-sdir')\n mne.scale_source_space(subject, f'{{subject}}-{src}-src.fif', subjects_dir=subjects_dir)\n elif exists(dst):\n return\n else:\n src = self.get('src')\n kind, param, special = SRC_RE.match(src).groups()\n self._log.info(f\"Generating {src} source space for {subject}...\")\n if kind == 'vol':\n if subject == 'fsaverage':\n bem = self.get('bem-file')\n else:\n raise NotImplementedError(\"Volume source space for subject other than fsaverage\")\n if special == 'brainstem':\n name = 'brainstem'\n voi = ['Brain-Stem', '3rd-Ventricle']\n voi_lat = ('Thalamus-Proper', 'VentralDC')\n remove_midline = False\n elif special == 'cortex':\n name = 'cortex'\n voi = []\n voi_lat = ('Cerebral-Cortex',)\n remove_midline = True\n elif special == '':\n name = 'cortex'\n voi = []\n voi_lat = ('Cerebral-Cortex', 'Cerebral-White-Matter')\n remove_midline = True\n else:\n raise RuntimeError(f'src={src!r}')\n voi.extend('%s-%s' % fmt for fmt in product(('Left', 'Right'), voi_lat))\n sss = mne.setup_volume_source_space(\n subject, pos=float(param), bem=bem,\n mri=join(self.get('mri-dir'), 'mri', 'aseg.mgz'),\n volume_label=voi, subjects_dir=self.get('mri-sdir'))\n sss = merge_volume_source_space(sss, name)\n sss = prune_volume_source_space(sss, int(param), 2, remove_midline=remove_midline)\n else:\n assert not special\n spacing = kind + param\n sss = mne.setup_source_space(subject, spacing=spacing, add_dist=True, subjects_dir=self.get('mri-sdir'))\n mne.write_source_spaces(dst, sss)\n\n def _test_kwargs(self, samples, pmin, tstart, tstop, data, parc_dim):\n \"Compile kwargs for testnd tests\"\n kwargs = {'samples': samples, 'tstart': tstart, 'tstop': tstop,\n 'parc': parc_dim}\n if pmin == 'tfce':\n kwargs['tfce'] = True\n elif pmin is not None:\n kwargs['pmin'] = pmin\n kwargs.update(self._cluster_criteria_kwargs(data))\n return kwargs\n\n def _make_test(self, y, ds, test, kwargs, force_permutation=False):\n \"\"\"Compute test results\n\n Parameters\n ----------\n y : NDVar\n Dependent variable.\n ds : Dataset\n Other variables.\n test : Test | str\n Test, or name of the test to perform.\n kwargs : dict\n Test parameters (from :meth:`._test_kwargs`).\n force_permutation : bool\n Conduct permutations regardless of whether there are any clusters.\n \"\"\"\n test_obj = test if isinstance(test, Test) else self._tests[test]\n if not isinstance(test_obj, EvokedTest):\n raise RuntimeError(\"Test kind=%s\" % test_obj.kind)\n return test_obj.make(y, ds, force_permutation, kwargs)\n\n def merge_bad_channels(self):\n \"\"\"Merge bad channel definitions for different sessions\n\n Load the bad channel definitions for all sessions of the current\n subject and save the union for all sessions.\n\n See Also\n --------\n make_bad_channels : set bad channels for a single session\n \"\"\"\n n_chars = max(map(len, self._sessions))\n # collect bad channels\n bads = set()\n sessions = []\n with self._temporary_state:\n # ICARaw merges bad channels dynamically, so explicit merge needs to\n # be performed lower in the hierarchy\n self.set(raw='raw')\n for session in self.iter('session'):\n if exists(self.get('raw-file')):\n bads.update(self.load_bad_channels())\n sessions.append(session)\n else:\n print(\"%%-%is: skipping, raw file missing\" % n_chars % session)\n # update bad channel files\n for session in sessions:\n print(session.ljust(n_chars), end=': ')\n self.make_bad_channels(bads, session=session)\n\n def next(self, field='subject'):\n \"\"\"Change field to the next value\n\n Parameters\n ----------\n field : str | list of str\n The field for which the value should be changed (default 'subject').\n Can also contain multiple fields, e.g. ``['subject', 'session']``.\n \"\"\"\n if isinstance(field, str):\n current = self.get(field)\n values = self.get_field_values(field)\n def fmt(x): return x\n else:\n current = tuple(self.get(f) for f in field)\n values = list(product(*(self.get_field_values(f) for f in field)))\n def fmt(x): return '/'.join(x)\n\n # find the index of the next value\n if current in values:\n idx = values.index(current) + 1\n if idx == len(values):\n idx = -1\n else:\n for idx in range(len(values)):\n if values[idx] > current:\n break\n else:\n idx = -1\n\n # set the next value\n if idx == -1:\n next_ = values[0]\n print(f\"The last {fmt(field)} was reached; rewinding to {fmt(next_)}\")\n else:\n next_ = values[idx]\n print(f\"{fmt(field)}: {fmt(current)} -> {fmt(next_)}\")\n\n if isinstance(field, str):\n self.set(**{field: next_})\n else:\n self.set(**dict(zip(field, next_)))\n\n def plot_annot(self, parc=None, surf=None, views=None, hemi=None,\n borders=False, alpha=0.7, w=None, h=None, axw=None, axh=None,\n foreground=None, background=None, seeds=False, **state):\n \"\"\"Plot the annot file on which the current parcellation is based\n\n Parameters\n ----------\n parc : None | str\n Parcellation to plot. If None (default), use parc from the current\n state.\n surf : 'inflated' | 'pial' | 'smoothwm' | 'sphere' | 'white'\n Freesurfer surface to use as brain geometry.\n views : str | iterator of str\n View or views to show in the figure.\n hemi : 'lh' | 'rh' | 'both' | 'split'\n Which hemispheres to plot (default includes hemisphere with more\n than one label in the annot file).\n borders : bool | int\n Show only label borders (PySurfer Brain.add_annotation() argument).\n alpha : scalar\n Alpha of the annotation (1=opaque, 0=transparent, default 0.7).\n axw : int\n Figure width per hemisphere.\n foreground : mayavi color\n Figure foreground color (i.e., the text color).\n background : mayavi color\n Figure background color.\n seeds : bool\n Plot seeds as points (only applies to seeded parcellations).\n ...\n State parameters.\n\n Returns\n -------\n brain : Brain\n PySurfer Brain with the parcellation plot.\n legend : ColorList\n ColorList figure with the legend.\n \"\"\"\n if parc is not None:\n state['parc'] = parc\n self.set(**state)\n\n self.make_annot()\n\n parc_name, parc = self._get_parc()\n if seeds:\n if not isinstance(parc, SeededParc):\n raise ValueError(\n \"seeds=True is only valid for seeded parcellation, \"\n \"not for parc=%r\" % (parc_name,))\n # if seeds are defined on a scaled common-brain, we need to plot the\n # scaled brain:\n plot_on_scaled_common_brain = isinstance(parc, IndividualSeededParc)\n else:\n plot_on_scaled_common_brain = False\n\n mri_sdir = self.get('mri-sdir')\n if (not plot_on_scaled_common_brain) and is_fake_mri(self.get('mri-dir')):\n subject = self.get('common_brain')\n else:\n subject = self.get('mrisubject')\n\n kwa = self._surfer_plot_kwargs(surf, views, foreground, background,\n None, hemi)\n brain = plot.brain.annot(parc_name, subject, borders=borders, alpha=alpha,\n w=w, h=h, axw=axw, axh=axh,\n subjects_dir=mri_sdir, **kwa)\n if seeds:\n from mayavi import mlab\n\n seeds = parc.seeds_for_subject(subject)\n seed_points = {hemi: [np.atleast_2d(coords) for name, coords in\n seeds.items() if name.endswith(hemi)]\n for hemi in ('lh', 'rh')}\n plot_points = {hemi: np.vstack(points).T if len(points) else None\n for hemi, points in seed_points.items()}\n for hemisphere in brain.brains:\n if plot_points[hemisphere.hemi] is None:\n continue\n x, y, z = plot_points[hemisphere.hemi]\n mlab.points3d(x, y, z, figure=hemisphere._f, color=(1, 0, 0),\n scale_factor=10)\n brain.set_parallel_view(scale=True)\n\n return brain\n\n def plot_brain(self, common_brain=True, **brain_kwargs):\n \"\"\"Plot the brain model\n\n Parameters\n ----------\n common_brain : bool\n If the current mrisubject is a scaled MRI, use the common_brain\n instead.\n ... :\n :class:`~plot._brain_object.Brain` options as keyword arguments.\n \"\"\"\n from ..plot._brain_object import Brain\n\n brain_args = self._surfer_plot_kwargs()\n brain_args.update(brain_kwargs)\n brain_args['subjects_dir'] = self.get('mri-sdir')\n\n # find subject\n if common_brain and is_fake_mri(self.get('mri-dir')):\n mrisubject = self.get('common_brain')\n self.set(mrisubject=mrisubject, match=False)\n else:\n mrisubject = self.get('mrisubject')\n\n return Brain(mrisubject, **brain_args)\n\n def plot_coreg(self, dig=True, parallel=True, **state):\n \"\"\"Plot the coregistration (Head shape and MEG helmet)\n\n Parameters\n ----------\n dig : bool\n Plot the digitization points (default True; 'fiducials' to plot\n fiducial points only).\n parallel : bool\n Set parallel view.\n ...\n State parameters.\n\n Notes\n -----\n Uses :func:`mne.viz.plot_alignment`\n \"\"\"\n self.set(**state)\n with self._temporary_state:\n raw = self.load_raw(raw='raw')\n fig = mne.viz.plot_alignment(\n raw.info, self.get('trans-file'), self.get('mrisubject'),\n self.get('mri-sdir'), meg=('helmet', 'sensors'), dig=dig,\n interaction='terrain')\n if parallel:\n fig.scene.camera.parallel_projection = True\n fig.scene.camera.parallel_scale = .2\n fig.scene.camera.position = [0, .5, .04]\n fig.scene.camera.focal_point = [0, 0, .04]\n fig.render()\n return fig\n\n def plot_whitened_gfp(self, s_start=None, s_stop=None, run=None):\n \"\"\"Plot the GFP of the whitened evoked to evaluate the the covariance matrix\n\n Parameters\n ----------\n s_start : str\n Subject at which to start (default is the first subject).\n s_stop: str\n Subject at which to stop (default is the last subject).\n run : bool\n Run the GUI after plotting (default depends on environment).\n \"\"\"\n gfps = []\n subjects = []\n with self._temporary_state:\n self.set(model='')\n for subject in self.iter_range(s_start, s_stop):\n cov = self.load_cov()\n picks = np.arange(len(cov.ch_names))\n ds = self.load_evoked(baseline=True)\n whitened_evoked = mne.whiten_evoked(ds[0, 'evoked'], cov, picks)\n gfp = whitened_evoked.data.std(0)\n\n gfps.append(gfp)\n subjects.append(subject)\n\n colors = plot.colors_for_oneway(subjects)\n title = \"Whitened Global Field Power (%s)\" % self.get('cov')\n fig = plot._base.Figure(1, title, h=7, run=run)\n ax = fig._axes[0]\n for subject, gfp in zip(subjects, gfps):\n ax.plot(whitened_evoked.times, gfp, label=subject,\n color=colors[subject])\n ax.legend(loc='right')\n fig.show()\n return fig\n\n def plot_evoked(self, subjects=None, separate=False, baseline=True, ylim='same',\n run=None, **kwargs):\n \"\"\"Plot evoked sensor data\n\n Parameters\n ----------\n subjects : str | 1 | -1\n Subject(s) for which to load data. Can be a single subject\n name or a group name such as ``'all'``. ``1`` to use the current\n subject; ``-1`` for the current group. Default is current subject\n (or group if ``group`` is specified).\n separate : bool\n When plotting a group, plot all subjects separately instead or the group\n average (default False).\n baseline : bool | tuple\n Apply baseline correction using this period. True to use the epoch's\n baseline specification (default).\n ylim : 'same' | 'different'\n Use the same or different y-axis limits for different subjects\n (default 'same').\n run : bool\n Run the GUI after plotting (default in accordance with plotting\n default).\n ...\n State parameters.\n \"\"\"\n subject, group = self._process_subject_arg(subjects, kwargs)\n model = self.get('model') or None\n epoch = self.get('epoch')\n if model:\n model_name = f\"~{model}\"\n elif subject or separate:\n model_name = \"Average\"\n else:\n model_name = \"Grand Average\"\n\n if subject:\n ds = self.load_evoked(baseline=baseline)\n y = guess_y(ds)\n title = f\"{subject} {epoch} {model_name}\"\n return plot.TopoButterfly(y, model, ds=ds, title=title, run=run)\n elif separate:\n plots = []\n vlim = []\n for subject in self.iter(group=group):\n ds = self.load_evoked(baseline=baseline)\n y = guess_y(ds)\n title = f\"{subject} {epoch} {model_name}\"\n p = plot.TopoButterfly(y, model, ds=ds, title=title, run=False)\n plots.append(p)\n vlim.append(p.get_vlim())\n\n if ylim.startswith('s'):\n vlim = np.array(vlim)\n vmax = np.abs(vlim, out=vlim).max()\n for p in plots:\n p.set_vlim(vmax)\n elif not ylim.startswith('d'):\n raise ValueError(\"ylim=%s\" % repr(ylim))\n\n if run or plot._base.do_autorun():\n gui.run()\n else:\n ds = self.load_evoked(group, baseline=baseline)\n y = guess_y(ds)\n title = f\"{group} {epoch} {model_name}\"\n return plot.TopoButterfly(y, model, ds=ds, title=title, run=run)\n\n def plot_label(self, label, surf=None, views=None, w=600):\n \"\"\"Plot a label\"\"\"\n if isinstance(label, str):\n label = self.load_label(label)\n title = label.name\n hemi = 'split' if isinstance(label, mne.BiHemiLabel) else label.hemi\n kwargs = self._surfer_plot_kwargs(surf, views, hemi=hemi)\n brain = self.plot_brain(title=title, w=w, **kwargs)\n brain.add_label(label, alpha=0.75)\n return brain\n\n def plot_raw(self, decim=10, xlim=5, add_bads=True, subtract_mean=False, **state):\n \"\"\"Plot raw sensor data\n\n Parameters\n ----------\n decim : int\n Decimate data for faster plotting (default 10).\n xlim : scalar\n Number of seconds to display (default 5 s).\n add_bads : bool | list\n Add bad channel information to the bad channels text file (default\n True).\n subtract_mean : bool\n Subtract the mean from each channel (useful when plotting raw data\n recorded with DC offset).\n ...\n State parameters.\n \"\"\"\n raw = self.load_raw(add_bads, ndvar=True, decim=decim, **state)\n name = self.format(\"{subject} {recording} {raw}\")\n if raw.info['meas'] == 'V':\n vmax = 1.5e-4\n elif raw.info['meas'] == 'B':\n vmax = 2e-12\n else:\n vmax = None\n if subtract_mean:\n raw -= raw.mean('time')\n return plot.TopoButterfly(raw, w=0, h=3, xlim=xlim, vmax=vmax, name=name)\n\n def run_mne_analyze(self, modal=False):\n \"\"\"Run mne_analyze\n\n Parameters\n ----------\n modal : bool\n Causes the shell to block until mne_analyze is closed.\n\n Notes\n -----\n Sets the current directory to raw-dir, and sets the SUBJECT and\n SUBJECTS_DIR to current values\n \"\"\"\n subp.run_mne_analyze(self.get('raw-dir'), self.get('mrisubject'),\n self.get('mri-sdir'), modal)\n\n def run_mne_browse_raw(self, modal=False):\n \"\"\"Run mne_analyze\n\n Parameters\n ----------\n modal : bool\n Causes the shell to block until mne_browse_raw is closed.\n\n Notes\n -----\n Sets the current directory to raw-dir, and sets the SUBJECT and\n SUBJECTS_DIR to current values\n \"\"\"\n subp.run_mne_browse_raw(self.get('raw-dir'), self.get('mrisubject'),\n self.get('mri-sdir'), modal)\n\n def set(self, subject=None, match=True, allow_asterisk=False, **state):\n \"\"\"\n Set variable values.\n\n Parameters\n ----------\n subject : str\n Set the `subject` value. The corresponding `mrisubject` is\n automatically set to the corresponding mri subject.\n match : bool\n For fields with pre-defined values, only allow valid values (default\n ``True``).\n allow_asterisk : bool\n If a value contains ``'*'``, set the value without the normal value\n evaluation and checking mechanisms (default ``False``).\n ...\n State parameters.\n \"\"\"\n if subject is not None:\n if 'group' not in state:\n state['subject'] = subject\n subject = None\n FileTree.set(self, match, allow_asterisk, **state)\n if subject is not None:\n FileTree.set(self, match, allow_asterisk, subject=subject)\n\n def _post_set_group(self, _, group):\n if group == '*' or group not in self._groups:\n return\n group_members = self._groups[group]\n self._field_values['subject'] = group_members\n subject = self.get('subject')\n if subject != '*' and subject not in group_members and group_members:\n self.set(group_members[0])\n\n def set_inv(self, ori='free', snr=3, method='dSPM', depth=None,\n pick_normal=False):\n \"\"\"Set the type of inverse solution used for source estimation\n\n Parameters\n ----------\n ori : 'free' | 'fixed' | 'vec' | float (0, 1)\n Orientation constraint (default ``'free'``; use a ``float`` to\n specify a loose orientation constraint).\n\n At each source point, ...\n\n - ``free``: ... estimate a current dipole with arbitrary direction.\n For further analysis, only the magnitude of the current is\n retained, while the direction is ignored. This is good for\n detecting changes in neural current strength when current\n direction is variable (for example, due to anatomical differences\n between subjects).\n - ``fixed``: ... estimate current flow orthogonal to the cortical\n surface. The sign of the estimates indicates current direction\n relative to the surface (positive for current out of the brain).\n - ``vec``: ... estimate a current vector with arbitrary direction,\n and return this current as 3 dimensional vector.\n - loose (``float``): ... estimate a current dipole with arbitrary\n direction. Then, multiple the two components parallel to the\n surface with this number, and retain the magnitude.\n\n snr : scalar\n SNR estimate used for regularization (default 3; the general\n recommendation is 3 for averaged responses, and 1 for raw or single\n trial data).\n method : 'MNE' | 'dSPM' | 'sLORETA' | 'eLORETA'\n Noise normalization method. ``MNE`` uses unnormalized current\n estimates. ``dSPM`` [1]_ (default) ``sLORETA`` [2]_ and eLORETA [3]_\n normalize each the estimate at each source with an estimate of the\n noise at that source (default ``'dSPM'``).\n depth : None | float [0, 1]\n Depth weighting [4]_ (default ``None`` to use mne default 0.8; ``0`` to\n disable depth weighting).\n pick_normal : bool\n Estimate a free orientation current vector, then pick the component\n orthogonal to the cortical surface and discard the parallel\n components.\n\n References\n ----------\n .. [1] Dale A, Liu A, Fischl B, Buckner R. (2000)\n Dynamic statistical parametric mapping: combining fMRI and MEG\n for high-resolution imaging of cortical activity.\n Neuron, 26:55-67.\n `10.1016/S0896-6273(00)81138-1\n <https://doi.org/10.1016/S0896-6273(00)81138-1>`_\n .. [2] Pascual-Marqui RD (2002),\n Standardized low resolution brain electromagnetic tomography\n (sLORETA): technical details.\n Methods Find. Exp. Clin. Pharmacology, 24(D):5-12.\n .. [3] Pascual-Marqui RD (2007).\n Discrete, 3D distributed, linear imaging methods of electric\n neuronal activity. Part 1: exact, zero error localization.\n `arXiv:0710.3341 <https://arxiv.org/abs/0710.3341>`_\n .. [4] Lin F, Witzel T, Ahlfors S P, Stufflebeam S M, Belliveau J W,\n Hämäläinen M S. (2006) Assessing and improving the spatial accuracy\n in MEG source localization by depth-weighted minimum-norm estimates.\n NeuroImage, 31(1):160–171.\n `10.1016/j.neuroimage.2005.11.054\n <https://doi.org/10.1016/j.neuroimage.2005.11.054>`_\n\n \"\"\"\n self.set(inv=self._inv_str(ori, snr, method, depth, pick_normal))\n\n @staticmethod\n def _inv_str(ori, snr, method, depth, pick_normal):\n \"Construct inv str from settings\"\n if isinstance(ori, str):\n if ori not in ('free', 'fixed', 'vec'):\n raise ValueError(f'ori={ori!r}')\n elif not 0 < ori < 1:\n raise ValueError(f\"ori={ori!r}; must be in range (0, 1)\")\n else:\n ori = f'loose{str(ori)[1:]}'\n\n if snr <= 0:\n raise ValueError(f\"snr={snr!r}\")\n\n if method not in INV_METHODS:\n raise ValueError(f\"method={method!r}\")\n\n items = [ori, f'{snr:g}', method]\n\n if depth is None:\n pass\n elif not 0 <= depth <= 1:\n raise ValueError(f\"depth={depth!r}; must be in range [0, 1]\")\n else:\n items.append(f'{depth:g}')\n\n if pick_normal:\n if ori in ('vec', 'fixed'):\n raise ValueError(f\"ori={ori!r} and pick_normal=True are incompatible\")\n items.append('pick_normal')\n\n return '-'.join(items)\n\n @staticmethod\n def _parse_inv(inv):\n \"(ori, snr, method, depth, pick_normal)\"\n m = inv_re.match(inv)\n if m is None:\n raise ValueError(f\"inv={inv!r}: invalid inverse specification\")\n\n ori, snr, method, depth, pick_normal = m.groups()\n if ori.startswith('loose'):\n ori = float(ori[5:])\n if not 0 < ori < 1:\n raise ValueError(f\"inv={inv!r}: loose parameter needs to be in range (0, 1)\")\n elif pick_normal and ori in ('vec', 'fixed'):\n raise ValueError(f\"inv={inv!r}: {ori} incompatible with pick_normal\")\n\n snr = float(snr)\n if snr <= 0:\n raise ValueError(f\"inv={inv!r}: snr={snr!r}\")\n\n if method not in INV_METHODS:\n raise ValueError(f\"inv={inv!r}: method={method!r}\")\n\n if depth is not None:\n depth = float(depth)\n if not 0 <= depth <= 1:\n raise ValueError(f\"inv={inv!r}: depth={depth!r}, needs to be in range [0, 1]\")\n\n return ori, snr, method, depth, bool(pick_normal)\n\n @classmethod\n def _eval_inv(cls, inv):\n cls._parse_inv(inv)\n return inv\n\n def _update_inv_cache(self, fields):\n if fields['inv'] == '*':\n return '*'\n m = inv_re.match(fields['inv'])\n ori, snr, method, depth, pick_normal = m.groups()\n if depth:\n return f'{ori}-{depth}'\n else:\n return ori\n\n def _post_set_inv(self, _, inv):\n if '*' in inv:\n self._params['make_inv_kw'] = None\n self._params['apply_inv_kw'] = None\n return\n\n ori, snr, method, depth, pick_normal = self._parse_inv(inv)\n\n if ori == 'fixed':\n make_kw = {'fixed': True}\n elif ori == 'free' or ori == 'vec':\n make_kw = {'loose': 1}\n elif isinstance(ori, float):\n make_kw = {'loose': ori}\n else:\n raise RuntimeError(\"ori=%r (in inv=%r)\" % (ori, inv))\n\n if depth is None:\n make_kw['depth'] = 0.8\n elif depth == 0:\n make_kw['depth'] = None\n else:\n make_kw['depth'] = depth\n\n apply_kw = {'method': method, 'lambda2': 1. / snr ** 2}\n if ori == 'vec':\n apply_kw['pick_ori'] = 'vector'\n elif pick_normal:\n apply_kw['pick_ori'] = 'normal'\n\n self._params['make_inv_kw'] = make_kw\n self._params['apply_inv_kw'] = apply_kw\n\n def _eval_model(self, model):\n if model == '':\n return model\n elif len(model) > 1 and '*' in model:\n raise ValueError(\"model=%r; To specify interactions, use '%' instead of '*'\")\n\n factors = [v.strip() for v in model.split('%')]\n\n # find order value for each factor\n ordered_factors = {}\n unordered_factors = []\n for factor in sorted(factors):\n assert_is_legal_dataset_key(factor)\n if factor in self._model_order:\n ordered_factors[self._model_order.index(factor)] = factor\n else:\n unordered_factors.append(factor)\n\n # recompose\n model = [ordered_factors[v] for v in sorted(ordered_factors)]\n if unordered_factors:\n model.extend(unordered_factors)\n return '%'.join(model)\n\n def _eval_src(self, src):\n m = SRC_RE.match(src)\n if not m:\n raise ValueError(f'src={src}')\n kind, param, special = m.groups()\n if special and kind != 'vol':\n raise ValueError(f'src={src}')\n return src\n\n def _update_mrisubject(self, fields):\n subject = fields['subject']\n mri = fields['mri']\n if subject == '*' or mri == '*':\n return '*'\n return self._mri_subjects[mri][subject]\n\n def _update_session(self, fields):\n epoch = fields['epoch']\n if epoch in self._epochs:\n epoch = self._epochs[epoch]\n if isinstance(epoch, (PrimaryEpoch, SecondaryEpoch)):\n return epoch.session\n else:\n return # default for non-primary epoch\n elif not epoch or epoch == '*':\n return # don't force session\n return '*' # if a named epoch is not in _epochs it might be a removed epoch\n\n def _update_src_name(self, fields):\n \"Because 'ico-4' is treated in filenames as ''\"\n return '' if fields['src'] == 'ico-4' else fields['src']\n\n def _eval_parc(self, parc):\n if parc in self._parcs:\n if isinstance(self._parcs[parc], SeededParc):\n raise ValueError(\"Seeded parc set without size, use e.g. \"\n \"parc='%s-25'\" % parc)\n else:\n return parc\n m = SEEDED_PARC_RE.match(parc)\n if m:\n name = m.group(1)\n if isinstance(self._parcs.get(name), SeededParc):\n return parc\n else:\n raise ValueError(\"No seeded parc with name %r\" % name)\n else:\n raise ValueError(\"parc=%r\" % parc)\n\n def _get_parc(self):\n \"\"\"Parc information\n\n Returns\n -------\n parc : str\n The current parc setting.\n params : dict | None\n The parc definition (``None`` for ``parc=''``).\n \"\"\"\n parc = self.get('parc')\n if parc == '':\n return '', None\n elif parc in self._parcs:\n return parc, self._parcs[parc]\n else:\n return parc, self._parcs[SEEDED_PARC_RE.match(parc).group(1)]\n\n def _post_set_test(self, _, test):\n if test != '*' and test in self._tests: # with vmatch=False, test object might not be availale\n test_obj = self._tests[test]\n if test_obj.model is not None:\n self.set(model=test_obj._within_model)\n\n def _set_analysis_options(self, data, baseline, src_baseline, pmin, tstart, tstop, parc=None, mask=None, decim=None, test_options=(), folder_options=()):\n \"\"\"Set templates for paths with test parameters\n\n analysis: preprocessing up to source estimate epochs (not parcellation)\n folder: parcellation (human readable)\n test_dims: parcellation (as used for spatio-temporal cluster test\n test_options: baseline, permutation test method etc.\n\n also sets `parc`\n\n Parameters\n ----------\n data : TestDims\n Whether the analysis is in sensor or source space.\n ...\n src_baseline :\n Should be False if data=='sensor'.\n ...\n decim : int\n Decimation factor (default is None, i.e. based on epochs).\n test_options : sequence of str\n Additional, test-specific tags (for use by TRFExperiment only).\n \"\"\"\n data = TestDims.coerce(data)\n # data kind (sensor or source space)\n if data.sensor:\n analysis = '{sns_kind} {evoked_kind}'\n elif data.source:\n analysis = '{src_kind} {evoked_kind}'\n else:\n raise RuntimeError(f\"data={data.string!r}\")\n\n # determine report folder (reports) and test_dims (test-files)\n kwargs = {'test_dims': data.string}\n if data.source is True:\n if parc is None:\n if mask:\n folder = \"%s masked\" % mask\n kwargs['parc'] = mask\n if pmin is None:\n # When not doing clustering, parc does not affect\n # results, so we don't need to distinguish parc and mask\n kwargs['test_dims'] = mask\n else: # parc means disconnecting\n kwargs['test_dims'] = '%s-mask' % mask\n else:\n folder = \"Whole Brain\"\n # only compute unmasked test once (probably rare anyways)\n kwargs['parc'] = 'aparc'\n kwargs['test_dims'] = 'unmasked'\n elif mask:\n raise ValueError(\"Can't specify mask together with parc\")\n elif pmin is None or pmin == 'tfce':\n raise NotImplementedError(\n \"Threshold-free test (pmin=%r) is not implemented for \"\n \"parcellation (parc parameter). Use a mask instead, or do \"\n \"a cluster-based test.\" % pmin)\n else:\n folder = parc\n kwargs['parc'] = parc\n kwargs['test_dims'] = parc\n elif data.source: # source-space ROIs\n if not parc:\n raise ValueError(\"Need parc for ROI definition\")\n kwargs['parc'] = parc\n kwargs['test_dims'] = '%s.%s' % (parc, data.source)\n if data.source == 'mean':\n folder = f'{parc} ROIs'\n else:\n folder = f'{parc} {data.source}'\n elif parc:\n raise ValueError(f\"Sensor analysis (data={data.string!r}) can't have parc\")\n elif data.sensor:\n folder = 'Sensor' if data.y_name == 'meg' else 'EEG'\n if data.sensor is not True:\n folder = f'{folder} {data.sensor}'\n else:\n raise RuntimeError(f\"data={data.string!r}\")\n\n if folder_options:\n folder += ' ' + ' '.join(folder_options)\n\n # test properties\n items = []\n\n # baseline (default is baseline correcting in sensor space)\n epoch_baseline = self._epochs[self.get('epoch')].baseline\n if src_baseline:\n assert data.source\n if baseline is True or baseline == epoch_baseline:\n items.append('snsbl')\n elif baseline:\n items.append('snsbl=%s' % _time_window_str(baseline))\n\n if src_baseline is True or src_baseline == epoch_baseline:\n items.append('srcbl')\n else:\n items.append('srcbl=%s' % _time_window_str(src_baseline))\n else:\n if not baseline:\n items.append('nobl')\n elif baseline is True or baseline == epoch_baseline:\n pass\n else:\n items.append('bl=%s' % _time_window_str(baseline))\n\n # pmin\n if pmin is not None:\n # source connectivity\n connectivity = self.get('connectivity')\n if connectivity and not data.source:\n raise NotImplementedError(f\"connectivity={connectivity!r} is not implemented for data={data!r}\")\n elif connectivity:\n items.append(connectivity)\n\n items.append(str(pmin))\n\n # cluster criteria\n if pmin != 'tfce':\n select_clusters = self.get('select_clusters')\n if select_clusters:\n items.append(select_clusters)\n\n # time window\n if tstart is not None or tstop is not None:\n items.append(_time_window_str((tstart, tstop)))\n if decim is not None:\n assert isinstance(decim, int)\n items.append(str(decim))\n\n items.extend(test_options)\n\n self.set(test_options=' '.join(items), analysis=analysis, folder=folder, **kwargs)\n\n @staticmethod\n def _parse_test_options(test_options: FieldCode):\n code = FieldCode.coerce(test_options)\n out = {}\n # baseline\n if 'bl' in code.lookahead_1:\n out['baseline'] = code.next()\n if 'srcbl' in code.lookahead_1:\n out['baseline'] = (out['baseline'], code.next())\n # connectivity\n if code.lookahead_1 == 'link-midline':\n out['connectivity'] = code.next()\n # pmin\n if code.lookahead_1 == 'tfce' or code.lookahead_1.startswith('0.'):\n out['pmin'] = code.next()\n # time-window\n if '-' in code.lookahead_1:\n out['time_window'] = code.next()\n # decim\n if code.lookahead_1.isdigit():\n out['decim'] = code.next()\n return out\n\n def show_bad_channels(self, sessions=None, **state):\n \"\"\"List bad channels\n\n Parameters\n ----------\n sessions : True | sequence of str\n By default, bad channels for the current session are shown. Set\n ``sessions`` to ``True`` to show bad channels for all sessions, or\n a list of session names to show bad channeles for these sessions.\n ...\n State parameters.\n\n Notes\n -----\n ICA Raw pipes merge bad channels from different sessions (by combining\n the bad channels from all sessions).\n \"\"\"\n if state:\n self.set(**state)\n\n if sessions is True:\n use_sessions = self._sessions\n elif sessions:\n use_sessions = sessions\n else:\n use_sessions = None\n\n if use_sessions is None:\n bad_by_s = {k: self.load_bad_channels() for k in self}\n list_sessions = False\n else:\n bad_channels = {k: self.load_bad_channels() for k in\n self.iter(('subject', 'session'), values={'session': use_sessions})}\n # whether they are equal between sessions\n bad_by_s = {}\n for (subject, session), bads in bad_channels.items():\n if subject in bad_by_s:\n if bad_by_s[subject] != bads:\n list_sessions = True\n break\n else:\n bad_by_s[subject] = bads\n else:\n list_sessions = False\n\n # table\n session_desc = ', '.join(use_sessions) if use_sessions else self.get('session')\n caption = f\"Bad channels in {session_desc}\"\n if list_sessions:\n t = fmtxt.Table('l' * (1 + len(use_sessions)), caption=caption)\n t.cells('Subject', *use_sessions)\n t.midrule()\n for subject in sorted(bad_by_s):\n t.cell(subject)\n for session in use_sessions:\n t.cell(', '.join(bad_channels[subject, session]))\n else:\n if use_sessions is not None:\n caption += \" (all sessions equal)\"\n t = fmtxt.Table('ll', caption=caption)\n t.cells('Subject', 'Bad channels')\n t.midrule()\n for subject in sorted(bad_by_s):\n t.cells(subject, ', '.join(bad_by_s[subject]))\n return t\n\n def show_file_status(self, temp, col=None, row='subject', *args, **kwargs):\n \"\"\"Compile a table about the existence of files\n\n Parameters\n ----------\n temp : str\n The name of the path template for the files to examine.\n col : None | str\n Field over which to alternate columns (default is a single column).\n row : str\n Field over which to alternate rows (default 'subject').\n count : bool\n Add a column with a number for each line (default True).\n present : 'time' | 'date' | str\n String to display when a given file is present. 'time' to use last\n modification date and time (default); 'date' for date only.\n absent : str\n String to display when a given file is absent (default '-').\n ... :\n :meth:`MneExperiment.iter` parameters.\n\n Examples\n --------\n >>> e.show_file_status('rej-file')\n Subject Rej-file\n -------------------------------\n 0 A0005 07/22/15 13:03:08\n 1 A0008 07/22/15 13:07:57\n 2 A0028 07/22/15 13:22:04\n 3 A0048 07/22/15 13:25:29\n >>> e.show_file_status('rej-file', 'raw')\n Subject 0-40 0.1-40 1-40 Clm\n ---------------------------------------------------\n 0 A0005 - 07/22/15 13:03:08 - -\n 1 A0008 - 07/22/15 13:07:57 - -\n 2 A0028 - 07/22/15 13:22:04 - -\n 3 A0048 - 07/22/15 13:25:29 - -\n \"\"\"\n return FileTree.show_file_status(self, temp, row, col, *args, **kwargs)\n\n def show_raw_info(self, **state):\n \"Display the selected pipeline for raw processing\"\n raw = self.get('raw', **state)\n pipe = source_pipe = self._raw[raw]\n pipeline = [pipe]\n while source_pipe.name != 'raw':\n source_pipe = source_pipe.source\n pipeline.insert(0, source_pipe)\n print(f\"Preprocessing pipeline: {' --> '.join(p.name for p in pipeline)}\")\n\n # pipe-specific\n if isinstance(pipe, RawICA):\n table = fmtxt.Table('lrr')\n table.cells('Subject', 'n components', 'reject')\n table.midrule()\n for subject in self:\n table.cell(subject)\n filename = self.get('ica-file')\n if exists(filename):\n ica = self.load_ica()\n table.cells(ica.n_components_, len(ica.exclude))\n else:\n table.cells(\"No ICA-file\", '')\n print()\n print(table)\n\n def show_reg_params(self, asds=False, **kwargs):\n \"\"\"Show the covariance matrix regularization parameters\n\n Parameters\n ----------\n asds : bool\n Return a dataset with the parameters (default False).\n ...\n State parameters.\n \"\"\"\n if kwargs:\n self.set(**kwargs)\n subjects = []\n reg = []\n for subject in self:\n path = self.get('cov-info-file')\n if exists(path):\n with open(path, 'r') as fid:\n text = fid.read()\n reg.append(float(text.strip()))\n else:\n reg.append(float('nan'))\n subjects.append(subject)\n ds = Dataset()\n ds['subject'] = Factor(subjects)\n ds['reg'] = Var(reg)\n if asds:\n return ds\n else:\n print(ds)\n\n def show_rej_info(self, flagp=None, asds=False, bads=False, **state):\n \"\"\"Information about artifact rejection\n\n Parameters\n ----------\n flagp : scalar\n Flag entries whose percentage of good trials is lower than this\n number.\n asds : bool\n Return a Dataset with the information (default is to print it).\n bads : bool\n Display bad channel names (not just number of bad channels).\n\n Notes\n -----\n To display the number of components rejected of an ICA raw pipe, use\n :meth:`~MneExperiment.show_raw_info`.\n \"\"\"\n # TODO: include ICA raw preprocessing pipes\n if state:\n self.set(**state)\n raw_name = self.get('raw')\n epoch_name = self.get('epoch')\n rej_name = self.get('rej')\n rej = self._artifact_rejection[rej_name]\n has_epoch_rejection = rej['kind'] is not None\n has_interp = rej.get('interpolation')\n\n subjects = []\n n_events = []\n n_good = []\n bad_chs = []\n n_interp = []\n\n for subject in self:\n subjects.append(subject)\n bads_raw = self.load_bad_channels()\n try:\n ds = self.load_selected_events(reject='keep')\n except FileMissing:\n ds = self.load_selected_events(reject=False)\n bad_chs.append((bads_raw, ()))\n if has_epoch_rejection:\n n_good.append(float('nan'))\n if has_interp:\n n_interp.append(float('nan'))\n else:\n bads_rej = set(ds.info[BAD_CHANNELS]).difference(bads_raw)\n bad_chs.append((bads_raw, bads_rej))\n if has_epoch_rejection:\n n_good.append(ds['accept'].sum())\n if has_interp:\n n_interp.append(np.mean([len(chi) for chi in ds[INTERPOLATE_CHANNELS]]))\n n_events.append(ds.n_cases)\n has_interp = has_interp and any(n_interp)\n caption = f\"Rejection info for raw={raw_name}, epoch={epoch_name}, rej={rej_name}. Percent is rounded to one decimal.\"\n\n # format bad channels\n if bads:\n func = ', '.join\n else:\n func = len\n bad_chs = [(func(bads_raw), func(bads_rej)) for bads_raw, bads_rej in bad_chs]\n\n if any(bads_rej for bads_raw, bads_rej in bad_chs):\n caption += \" Bad channels: defined in bad_channels file and in rej-file.\"\n bad_chs = [f'{bads_raw} + {bads_rej}' for bads_raw, bads_rej in bad_chs]\n else:\n bad_chs = [f'{bads_raw}' for bads_raw, bads_rej in bad_chs]\n\n if bads:\n bad_chs = [s.replace('MEG ', '') for s in bad_chs]\n\n if has_interp:\n caption += \" ch_interp: average number of channels interpolated per epoch, rounded to one decimal.\"\n out = Dataset(caption=caption)\n out['subject'] = Factor(subjects)\n out['n_events'] = Var(n_events)\n if has_epoch_rejection:\n out['n_good'] = Var(n_good)\n out['percent'] = Var(np.round(100 * out['n_good'] / out['n_events'], 1))\n if flagp:\n out['flag'] = Factor(out['percent'] < flagp, labels={False: '', True: '*'})\n out['bad_channels'] = Factor(bad_chs)\n if has_interp:\n out['ch_interp'] = Var(np.round(n_interp, 1))\n\n if asds:\n return out\n else:\n print(out)\n\n def show_subjects(self, mri=True, mrisubject=False, caption=True, asds=False, **state):\n \"\"\"Create a Dataset with subject information\n\n Parameters\n ----------\n mri : bool\n Add a column specifying whether the subject is using a scaled MRI\n or whether it has its own MRI.\n mrisubject : bool\n Add a column showing the MRI subject corresponding to each subject.\n caption : bool | str\n Caption for the table (For True, use the default \"Subject in group\n {group}\".\n asds : bool\n Return the table as Dataset instead of an FMTxt Table.\n ...\n State parameters.\n \"\"\"\n if isinstance(mri, str):\n state['mri'] = mri\n mri = True\n if state:\n self.set(**state)\n\n # caption\n if caption is True:\n caption = self.format(\"Subjects in group {group}\")\n\n subject_list = []\n mri_list = []\n mrisubject_list = []\n for subject in self.iter():\n subject_list.append(subject)\n mrisubject_ = self.get('mrisubject')\n mrisubject_list.append(mrisubject_)\n if mri:\n mri_dir = self.get('mri-dir')\n if not exists(mri_dir):\n mri_list.append('*missing')\n elif is_fake_mri(mri_dir):\n mri_sdir = self.get('mri-sdir')\n info = mne.coreg.read_mri_cfg(mrisubject_, mri_sdir)\n cell = \"%s * %s\" % (info['subject_from'],\n str(info['scale']))\n mri_list.append(cell)\n else:\n mri_list.append(mrisubject_)\n\n ds = Dataset(caption=caption)\n ds['subject'] = Factor(subject_list)\n if mri:\n ds['mri'] = Factor(mri_list)\n if mrisubject:\n ds['mrisubject'] = Factor(mrisubject_list)\n\n if asds:\n return ds\n else:\n return ds.as_table(midrule=True, count=True)\n\n def show_input_tree(self):\n \"\"\"Print a tree of the files needed as input\n\n See Also\n --------\n show_tree: show complete tree (including secondary, optional and cache)\n \"\"\"\n return self.show_tree(fields=['raw-file', 'trans-file', 'mri-dir'])\n\n def _surfer_plot_kwargs(self, surf=None, views=None, foreground=None,\n background=None, smoothing_steps=None, hemi=None):\n out = self._brain_plot_defaults.copy()\n out.update(self.brain_plot_defaults)\n if views:\n out['views'] = views\n else:\n parc, p = self._get_parc()\n if p is not None and p.views:\n out['views'] = p.views\n\n if surf:\n out['surf'] = surf\n if foreground:\n out['foreground'] = foreground\n if background:\n out['background'] = background\n if smoothing_steps:\n out['smoothing_steps'] = smoothing_steps\n if hemi:\n out['hemi'] = hemi\n return out\n"
] |
[
[
"numpy.abs",
"numpy.clip",
"numpy.min",
"numpy.arange",
"numpy.sort",
"numpy.testing.assert_array_equal",
"numpy.random.normal",
"numpy.equal",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.all"
],
[
"numpy.abs",
"numpy.invert",
"numpy.arange",
"numpy.vstack",
"numpy.round",
"numpy.atleast_2d",
"numpy.all",
"numpy.isscalar",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pratishtha-abrol/tfx-addons
|
[
"ca9936442d60104e80dd04914e2a6ee6047d0d5c"
] |
[
"tfrecord-to-numpy.py"
] |
[
"# Reading a TFRecord file\n# Serialized tensors can be easily parsed using `tf.train.Example.ParseFromString`\n\nimport tensorflow as tf\nimport numpy as np\n\nfilename = \"something\"\nfilenames = [filename]\nraw_dataset = tf.data.TFRecordDataset(filenames)\nprint(raw_dataset)\n\nfor raw_record in raw_dataset.take(1):\n example = tf.train.Example()\n example.ParseFromString(raw_record.numpy())\n print(example)\n\n# That returns a `tf.train.Example` proto which is dificult to use as is, but it's fundamentally a representation of a:\n#\n# Dict[str,\n# Union[List[float],\n# List[int],\n# List[str]]]\n#\n# The following code manually converts the `Example` to a dictionary of NumPy arrays, without using tensorflow Ops.\n# Refer to [the PROTO file](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto) for details.\n\nresult = {}\n# example.features.feature is the dictionary\nfor key, feature in example.features.feature.items():\n # The values are the Feature objects which contain a `kind` which contains:\n # one of three fields: bytes_list, float_list, int64_list\n\n kind = feature.WhichOneof('kind')\n result[key] = np.array(getattr(feature, kind).value)\n\nprint(result)"
] |
[
[
"tensorflow.data.TFRecordDataset",
"tensorflow.train.Example"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kstennettlull/dagster
|
[
"dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6"
] |
[
"examples/docs_snippets/docs_snippets/guides/dagster/dagster_type_factories/schema.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport pandera as pa\n\nMIN_DATE = pd.Timestamp(\"2021-10-01\")\n\ndf = pd.read_csv(\n \"./ebike_trips.csv\",\n parse_dates=[\"start_time\", \"end_time\"],\n)\n\ntrips_schema = pa.DataFrameSchema(\n columns={\n \"bike_id\": pa.Column(\n int, checks=pa.Check.ge(0)\n ), # ge: greater than or equal to\n \"start_time\": pa.Column(pd.Timestamp, checks=pa.Check.ge(MIN_DATE)),\n \"end_time\": pa.Column(pd.Timestamp, checks=pa.Check.ge(MIN_DATE)),\n },\n)\n"
] |
[
[
"pandas.Timestamp",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
minhncedutw/prac-keras-yolo3
|
[
"61b0d90a92875272eeeb27ee860d915475a9aecc"
] |
[
"train_re.py"
] |
[
"'''\n File name: keras-yolo3\n Author: minhnc-lab\n Date created(MM/DD/YYYY): 2019-05-03\n Last modified(MM/DD/YYYY HH:MM): 2019-05-03 10:28\n Python Version: 3.6\n Other modules: [None]\n\n Copyright = Copyright (C) 2017 of CONG-MINH NGUYEN\n Credits = [None] # people who reported bug fixes, made suggestions, etc. but did not actually write the code\n License = None\n Version = 0.9.0.1\n Maintainer = [None]\n Email = [email protected]\n Status = Prototype # \"Prototype\", \"Development\", or \"Production\"\n Project Style: https://dev.to/codemouse92/dead-simple-python-project-structure-and-imports-38c6\n Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting\n'''\n\n#==============================================================================\n# Imported Modules\n#==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os.path\nimport sys\nimport time\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\" # The GPU id to use, usually either \"0\" or \"1\"\n\nimport json\nimport numpy as np\n\nimport tensorflow as tf\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom callbacks import CustomModelCheckpoint, CustomTensorBoard\nfrom keras.models import load_model\n\nfrom voc import parse_voc_annotation\nfrom generator import BatchGenerator\nfrom utils.utils import normalize, makedirs, evaluate\nfrom utils.multi_gpu_model import multi_gpu_model\nfrom yolo_re import create_yolov3_model, dummy_loss\n\n#==============================================================================\n# Constant Definitions\n#==============================================================================\n\n#==============================================================================\n# Function Definitions\n#==============================================================================\ndef create_training_instances(train_annot_folder, train_image_folder, train_cache,\n valid_annot_folder, valid_image_folder, valid_cache,\n labels):\n # parse annotations of the training set\n train_ints, train_labels = parse_voc_annotation(ann_dir=train_annot_folder, img_dir=train_image_folder, cache_name=train_cache, labels=labels)\n\n # parse annotations of the validation set, if any, otherwise split the training set\n if os.path.exists(valid_annot_folder):\n valid_ints, valid_labels = parse_voc_annotation(valid_annot_folder, valid_image_folder, valid_cache, labels)\n else:\n print(\"valid_annot_folder not exists. Spliting the trainining set.\")\n\n train_valid_split = int(0.8 * len(train_ints))\n np.random.seed(seed=0)\n np.random.shuffle(train_ints)\n np.random.seed(seed=None)\n\n valid_ints = train_ints[train_valid_split:]\n train_ints = train_ints[:train_valid_split]\n\n # compare the seen labels with the given labels in config.json\n if len(labels) > 0:\n overlap_labels = set(labels).intersection(set(train_labels.keys()))\n\n print('Seen labels: \\t' + str(train_labels))\n print('Given labels: \\t' + str(labels))\n\n # return None, None, None if some given label is not in the dataset\n if len(overlap_labels) < len(labels):\n print('Some labels have no annotations! Please revise the list of labels in the config.json.')\n return None, None, None\n else:\n print('No labels are provided. Train on all seen labels.')\n print(train_labels)\n labels = train_labels.keys()\n\n max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])\n\n return train_ints, valid_ints, sorted(labels), max_box_per_image\n\n\ndef create_model(\n nb_class,\n anchors,\n max_box_per_image,\n max_grid,\n batch_size,\n warmup_batches,\n ignore_thresh,\n multi_gpu,\n saved_weights_name,\n lr,\n grid_scales,\n obj_scale,\n noobj_scale,\n xywh_scale,\n class_scale\n):\n if multi_gpu > 1:\n with tf.device('/cpu:0'):\n template_model, infer_model = create_yolov3_model(\n nb_class =nb_class,\n anchors =anchors,\n max_box_per_image =max_box_per_image,\n max_grid =max_grid,\n batch_size =batch_size//multi_gpu,\n warmup_batches =warmup_batches,\n ignore_thresh =ignore_thresh,\n grid_scales =grid_scales,\n obj_scale =obj_scale,\n noobj_scale =noobj_scale,\n xywh_scale =xywh_scale,\n class_scale =class_scale\n )\n else:\n template_model, infer_model = create_yolov3_model(\n nb_class =nb_class,\n anchors =anchors,\n max_box_per_image =max_box_per_image,\n max_grid =max_grid,\n batch_size =batch_size,\n warmup_batches =warmup_batches,\n ignore_thresh =ignore_thresh,\n grid_scales =grid_scales,\n obj_scale =obj_scale,\n noobj_scale =noobj_scale,\n xywh_scale =xywh_scale,\n class_scale =class_scale\n )\n\n # load the pretrained weight if exists, otherwise load the backend weight only\n if os.path.exists(saved_weights_name):\n print(\"\\nLoading pretrained weights.\\n\")\n template_model.load_weights(saved_weights_name)\n else:\n template_model.load_weights(\"backend.h5\", by_name=True)\n\n train_model = template_model\n\n optimizer = Adam(lr=lr, clipnorm=0.001)\n train_model.compile(loss=dummy_loss, optimizer=optimizer)\n\n return train_model, infer_model\n\n\ndef create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):\n makedirs(path=tensorboard_logs)\n\n early_stop = EarlyStopping(\n monitor ='loss',\n min_delta =0.01,\n patience =5,\n mode ='min',\n verbose =1\n )\n\n checkpoint = CustomModelCheckpoint(\n model_to_save =model_to_save,\n filepath =saved_weights_name,\n monitor ='loss',\n verbose =1,\n save_best_only =True,\n mode ='min',\n period =1\n )\n reduce_on_plateau = ReduceLROnPlateau(\n monitor ='loss',\n factor =0.1,\n patience =2,\n verbose =1,\n mode ='min',\n epsilon =0.01,\n cooldown =0,\n min_lr =0\n )\n tensorboard = CustomTensorBoard(\n log_dir =tensorboard_logs,\n write_graph =True,\n write_images =True\n )\n return [early_stop, checkpoint, reduce_on_plateau, tensorboard]\n\n#==============================================================================\n# Main function\n#==============================================================================\ndef _main_(args):\n print('Hello World! This is {:s}'.format(args.desc))\n\n config_path = args.conf\n with open(config_path) as config_buffer:\n config = json.loads(config_buffer.read())\n\n ###############################\n # Prepare the data\n ###############################\n ## Parse the annotations\n train_ints, valid_ints, labels, max_box_per_image = create_training_instances(\n config['train']['train_annot_folder'],\n config['train']['train_image_folder'],\n config['train']['cache_name'],\n config['valid']['valid_annot_folder'],\n config['valid']['valid_image_folder'],\n config['valid']['cache_name'],\n config['model']['labels']\n )\n print('\\nTraining on: \\t' + str(labels) + '\\n')\n\n ## Create the generators\n train_generator = BatchGenerator(\n instances =train_ints,\n anchors =config['model']['anchors'],\n labels =labels,\n downsample =32,\n max_box_per_image =max_box_per_image,\n batch_size =config['train']['batch_size'],\n min_net_size =config['model']['min_input_size'],\n max_net_size =config['model']['max_input_size'],\n shuffle =True,\n jitter =0.3,\n norm =normalize\n )\n\n valid_generator = BatchGenerator(\n instances =valid_ints,\n anchors =config['model']['anchors'],\n labels =labels,\n downsample =32,\n max_box_per_image =max_box_per_image,\n batch_size =config['train']['batch_size'],\n min_net_size =config['model']['min_input_size'],\n max_net_size =config['model']['max_input_size'],\n shuffle =True,\n jitter =0.0,\n norm =normalize # /255\n )\n\n ###############################\n # Create the model\n ###############################\n if os.path.exists(config['train']['saved_weights_name']):\n config['train']['warmup_epochs'] = 0\n warmup_batches = config['train']['warmup_epochs'] * (config['train']['train_times']*len(train_generator))\n\n os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']\n multi_gpu = len(config['train']['gpus'].split(','))\n\n train_model, infer_model = create_model(\n nb_class =len(labels),\n anchors =config['model']['anchors'],\n max_box_per_image =max_box_per_image,\n max_grid =[config['model']['max_input_size'], config['model']['max_input_size']],\n batch_size =config['train']['batch_size'],\n warmup_batches =warmup_batches,\n ignore_thresh =config['train']['ignore_thresh'],\n multi_gpu =multi_gpu,\n saved_weights_name =config['train']['saved_weights_name'],\n lr =config['train']['learning_rate'],\n grid_scales =config['train']['grid_scales'],\n obj_scale =config['train']['obj_scale'],\n noobj_scale =config['train']['noobj_scale'],\n xywh_scale =config['train']['xywh_scale'],\n class_scale =config['train']['class_scale']\n )\n\n ###############################\n # Kick off the training\n ###############################\n callbacks = create_callbacks(saved_weights_name =config['train']['saved_weights_name'],\n tensorboard_logs =config['train']['tensorboard_dir'],\n model_to_save =infer_model\n )\n\n train_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=len(train_generator) * config['train']['train_times'],\n epochs=config['train']['nb_epochs'] + config['train']['warmup_epochs'],\n verbose=2 if config['train']['debug'] else 1,\n callbacks=callbacks,\n workers=4,\n max_queue_size=8\n )\n\n ###############################\n # Run the evaluation\n ###############################\n # compute mAP for all the classes\n average_precisions = evaluate(infer_model, valid_generator)\n\n # print the score\n for label, average_precision in average_precisions.items():\n print(labels[label] + ': {:.4f}'.format(average_precision))\n print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='Your program name!!!')\n argparser.add_argument('-d', '--desc', help='description of the program', default='redo the keras-yolo3')\n argparser.add_argument('-c', '--conf', default='config.json', help='path to configuration file')\n\n args = argparser.parse_args()\n _main_(args)\n"
] |
[
[
"numpy.random.shuffle",
"numpy.random.seed",
"tensorflow.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
smokinmirror/raocp-toolbox
|
[
"56c45c1a720897c6cb241b6db74ca956adff7aa5"
] |
[
"raocp/core/risks.py"
] |
[
"import numpy as np\nimport raocp.core.cones as core_cones\n\n\nclass AVaR:\n \"\"\"\n Risk item: Average Value at Risk class\n \"\"\"\n\n def __init__(self, alpha, probabilities):\n \"\"\"\n :param alpha: AVaR risk parameter\n :param probabilities: list of probabilities of future events\n\n Note: ambiguity sets of coherent risk measures can be expressed by conic inequalities,\n defined by a tuple (E, F, cone, b)\n \"\"\"\n if 0 <= alpha <= 1:\n self.__alpha = alpha\n else:\n raise ValueError(\"alpha value '%d' not supported\" % alpha)\n self.__num_children = len(probabilities)\n self.__children_probabilities = np.asarray(probabilities).reshape(self.__num_children, 1)\n\n self.__matrix_e = None # coefficient matrix of mu\n self.__matrix_f = None # coefficient matrix of nu\n self.__cone = None\n self.__vector_b = None\n self.__make_e_f_cone_b()\n\n def __make_e_f_cone_b(self):\n eye = np.eye(self.__num_children)\n self.__matrix_e = np.vstack((self.__alpha*eye, -eye, np.ones((1, self.__num_children))))\n self.__matrix_f = np.zeros((0, self.__num_children))\n self.__cone = core_cones.Cartesian([core_cones.NonnegativeOrthant(dimension=2 * self.__num_children),\n core_cones.Zero(dimension=1)])\n self.__vector_b = np.vstack((self.__children_probabilities, np.zeros((self.__num_children, 1)), 1))\n\n # GETTERS\n @property\n def alpha(self):\n \"\"\"AVaR risk parameter alpha\"\"\"\n return self.__alpha\n\n @property\n def matrix_e(self):\n \"\"\"Ambiguity set matrix E\"\"\"\n return self.__matrix_e\n\n @property\n def matrix_f(self):\n \"\"\"Ambiguity set matrix F\"\"\"\n return self.__matrix_f\n\n @property\n def cone(self):\n \"\"\"Ambiguity set cone\"\"\"\n return self.__cone\n\n @property\n def vector_b(self):\n \"\"\"Ambiguity set vector b\"\"\"\n return self.__vector_b\n\n def __str__(self):\n return f\"Risk item; type: {type(self).__name__}, alpha: {self.__alpha}; cone: {self.__cone.types}\"\n\n def __repr__(self):\n return f\"Risk item; type: {type(self).__name__}, alpha: {self.__alpha}; cone: {self.__cone.types}\"\n"
] |
[
[
"numpy.asarray",
"numpy.eye",
"numpy.zeros",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cmeng94/dog-breed-classifier
|
[
"7178d82f9d654e2e95e8f5611c89353554094412"
] |
[
"app/dog_classifier.py"
] |
[
"import sys\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\nfrom keras.callbacks import ModelCheckpoint \nfrom keras.preprocessing import image \nfrom keras.applications.resnet import ResNet50\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\n\ndef face_detector(img_path):\n\n '''\n Input:\n img_path: string-valued file path to a color image \n \n Output:\n \"True\" if at least one face is detected in image stored at img_path\n \"False\" is no face is detected\n '''\n\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face_cascade = cv2.CascadeClassifier('../app_preparation/haarcascades/haarcascade_frontalface_alt.xml')\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0\n \n\ndef path_to_tensor(img_path):\n\n '''\n Input:\n img_path: string-valued file path to a color image \n \n Output:\n a 4D tensor suitable for supplying to a Keras CNN with shape (1,224,224,3)\n '''\n\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\n\ndef ResNet50_predict_labels(img_path):\n\n '''\n Input:\n img_path: string-valued file path to a color image \n \n Output:\n prediction vector by ResNet50 for image located at img_path\n '''\n\n from keras.applications.resnet import preprocess_input, decode_predictions\n\n # return prediction vector for image located at img_path\n img = preprocess_input(path_to_tensor(img_path))\n ResNet50_model_orig = ResNet50(weights='imagenet')\n return np.argmax(ResNet50_model_orig.predict(img))\n\n\ndef dog_detector(img_path):\n\n '''\n Input:\n img_path: string-valued file path to a color image \n \n Output:\n \"True\" if a dog is detected in image stored at img_path\n \"False\" is no dog is detected\n '''\n\n prediction = ResNet50_predict_labels(img_path)\n return ((prediction <= 268) & (prediction >= 151)) \n\ndef get_DL_model():\n\n '''\n Input:\n None\n \n Output:\n DL_model: CNN pretrained upon the InceptionV3 neural network using transfer learning\n '''\n\n print('Loading model...')\n DL_model = Sequential()\n DL_model.add(GlobalAveragePooling2D(input_shape=(5, 5, 2048)))\n DL_model.add(Dense(133, activation='softmax'))\n DL_model.load_weights('../app_preparation/saved_models/weights.best.InceptionV3.hdf5')\n return DL_model\n\ndef extract_InceptionV3(tensor):\n\n '''\n Input:\n tensor: image processed by path_to_tensor\n\n Output:\n bottleneck feature transformed by InceptionV3\n '''\n\n return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))\n\ndef DL_predict_breed(img_path):\n\n '''\n Input:\n img_path: string-valued file path to a color image \n \n Output:\n breed: breed of dog in input image predicted by CNN trained on top of the InceptionV3 neural network\n '''\n\n DL_model = get_DL_model()\n print('Predicting breed...')\n # extract bottleneck features\n bottleneck_feature = extract_InceptionV3(path_to_tensor(img_path))\n # obtain predicted vector\n predicted_vector = DL_model.predict(bottleneck_feature)\n # return dog breed that is predicted by the model\n\n with open('dog_names.txt', 'r') as f:\n dog_names = f.read().splitlines()\n f.close()\n\n breed = dog_names[np.argmax(predicted_vector)]\n return breed\n\ndef classify_dog_breed(img_path):\n\n '''\n Input:\n img_path: string-valued file path to a color image\n \n Output:\n - if a dog is detected in the image, the predicted breed is returned\n - else if a human is detected in the image, the resembling dog breed is returned\n - if neither is detected in the image, \"neither\" is returned\n '''\n \n if dog_detector(img_path):\n breed = DL_predict_breed(img_path)\n # print('I detect a {} dog!'.format(breed))\n return ['dog', breed]\n\n elif face_detector(img_path):\n breed = DL_predict_breed(img_path)\n # print('I detect a human face resembling a {} dog!'.format(breed))\n return ['human', breed]\n \n else:\n # print(\"Sorry, I can only classify a dog or a human!\") \n return 'neither'\n\n# def main():\n# print(sys.argv)\n\n# if len(sys.argv) == 2:\n\n# img_path = sys.argv[1]\n# print('Loading image...')\n\n# classify_dog_breed(img_path)\n\n# else:\n# print('Please provide the filepath of the image as the first argument.')\n\n# if __name__ == '__main__':\n# \tmain()"
] |
[
[
"numpy.expand_dims",
"numpy.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mattkjames7/Arase
|
[
"996167be35a13bbb1fdddfbe75e3a06d124b1d25",
"996167be35a13bbb1fdddfbe75e3a06d124b1d25",
"996167be35a13bbb1fdddfbe75e3a06d124b1d25"
] |
[
"Arase/MGF/InterpObj.py",
"Arase/MEPi/SavePADs.py",
"Arase/XEP/DataAvailability.py"
] |
[
"import numpy as np\nfrom .ReadMGF import ReadMGF\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage import uniform_filter\nimport DateTimeTools as TT\n\ndef InterpObj(Date,Coords='GSE',Smooth=None):\n\t'''\n\tReturn interpolation objects for MGF data.\n\t\n\t'''\n\t\n\t#read the data in\n\tmag = ReadMGF(Date)\n\t\n\t#get continuous time\n\tmutc = TT.ContUT(mag.Date,mag.ut)\n\t\n\t#interpolate the bad data\n\tgood = np.where(np.isfinite(mag['Bx'+Coords]))[0]\n\tbad = np.where(np.isfinite(mag['Bx'+Coords]) == False)[0]\n\tfx = interp1d(mutc[good],mag['Bx'+Coords][good],bounds_error=False,fill_value='extrapolate')\n\tfy = interp1d(mutc[good],mag['By'+Coords][good],bounds_error=False,fill_value='extrapolate')\n\tfz = interp1d(mutc[good],mag['Bz'+Coords][good],bounds_error=False,fill_value='extrapolate')\n\t\n\tif not Smooth is None:\n\t\n\t\tmag['Bx'+Coords][bad] = fx(mutc[bad])\n\t\tmag['By'+Coords][bad] = fy(mutc[bad])\n\t\tmag['Bz'+Coords][bad] = fz(mutc[bad])\n\t\t\t\n\n\n\t\t#interpolation objects\n\t\tfx = interp1d(mutc,uniform_filter(mag['Bx'+Coords],Smooth),bounds_error=False,fill_value='extrapolate')\n\t\tfy = interp1d(mutc,uniform_filter(mag['By'+Coords],Smooth),bounds_error=False,fill_value='extrapolate')\n\t\tfz = interp1d(mutc,uniform_filter(mag['Bz'+Coords],Smooth),bounds_error=False,fill_value='extrapolate')\n\t\t\n\t\t\n\treturn fx,fy,fz\n",
"import numpy as np\nfrom .CalculatePADs import CalculatePADs\nfrom ..Tools.SavePAD import SavePAD\nfrom .. import Globals\nfrom .ReadIndex import ReadIndex\nfrom .. import MGF\nfrom ..MGF.DownloadData import DownloadData as DownloadMGF\nfrom .DownloadData import DownloadData\nfrom .DeleteDate import DeleteDate\nfrom ..Tools.ListDates import ListDates\nfrom .SaveMirrorAlts import SaveMirrorAlts\nimport os\n\ndef SavePADs(Date,na=18,Overwrite=False,DownloadMissingData=True,\n\t\tDeleteNewData=True,Verbose=True):\n\t'''\n\tSave the PADs for a date or dates.\n\t\n\tInput\n\t=====\n\tDate : int\n\t\tDate to download data for in format yyyymmdd\n\t\tIf single date - only data from that one day will be fetched\n\t\tIf 2-element array - dates from Date[0] to Date[1] will be downloaded\n\t\tIf > 2 elements - this is treated as a specific list of dates to download\n\tna : int\n\t\tNumber of alpha bins\n\tOverwrite: bool\n\t\tOverwrite existing PADs\n\tDownloadMissingData : bool\n\t\tDownload missing 3dflux data\n\tDeleteNewData : bool\n\t\tIf we had to download any new data, then delete it to save space\n\t'''\n\t\n\t#populate the list of dates to save\n\tif np.size(Date) == 1:\n\t\tdates = np.array([Date])\n\telif np.size(Date) == 2:\n\t\tdates = ListDates(Date[0],Date[1])\n\telse:\n\t\tdates = np.array([Date]).flatten()\n\t\t\n\tpath = Globals.DataPath + 'MEPi/PAD/'\n\t\t\n\t#read the data index to see what data we have\n\tidx = ReadIndex(2,'3dflux')\n\tmagidx = MGF.ReadIndex(2,'8sec')\n\t\t\n\tfor date in dates:\n\t\tprint('Saving date {:08d}'.format(date))\n\t\t\n\t\t#check if the 3dflux data exists\n\t\tdownloadednew = False\n\t\texists3d = date in idx.Date\n\t\tif not exists3d and DownloadMissingData:\n\t\t\tDownloadData(2,'3dflux',date,Verbose=Verbose)\n\t\t\tidx = ReadIndex(2,'3dflux')\n\t\t\texists3d = date in idx.Date\n\t\t\tdownloadednew = True\n\t\t#check if the MGF data exists\n\t\texistsmag = date in magidx.Date\n\t\tif not existsmag and DownloadMissingData:\n\t\t\tDownloadMGF(2,'8sec',date,Verbose=Verbose)\n\t\t\tmagidx = ReadIndex(2,'8sec')\n\t\t\texistsmag = date in magidx.Date\n\t\t\n\t\tif existsmag and exists3d:\n\t\t\n\t\t\tpad = CalculatePADs(date,na,Verbose)\n\t\t\tSavePAD(date,path,pad,Overwrite)\n\n\t\tif downloadednew and DeleteNewData:\n\t\t\tDeleteDate(date,2,'3dflux',False)\n\n\t\t#save mirror stuff if needed\n\t\tmirrexists = os.path.isfile(path+ '{:08d}/Mirror.bin'.format(date))\n\t\tif (not mirrexists) or Overwrite:\n\t\t\tSaveMirrorAlts(date,na,Overwrite)\n",
"import numpy as np\nfrom .ReadIndex import ReadIndex\n\ndef DataAvailability(L,prod):\n\t'''\n\tProvide a list of dates for which there are data.\n\n\tInputs\n\t======\n\tL : int\n\t\tLevel of data to download\n\tprod : str\n\t\tData product to download\n\n\n\tAvailable data products\n\t=======================\n\tL\t\tprod\n\t2\t\t'omniflux'\n\t\n\tReturns\n\t=======\n\tdates : int\n\t\tArray of dates which have data\n\t\n\t'''\n\tidx = ReadIndex(L,prod)\n\treturn np.unique(idx.Date)\n"
] |
[
[
"scipy.interpolate.interp1d",
"numpy.isfinite",
"scipy.ndimage.uniform_filter"
],
[
"numpy.size",
"numpy.array"
],
[
"numpy.unique"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ErosMLima/keras
|
[
"70d7d07bd186b929d81f7a8ceafff5d78d8bd701",
"70d7d07bd186b929d81f7a8ceafff5d78d8bd701",
"70d7d07bd186b929d81f7a8ceafff5d78d8bd701"
] |
[
"keras/layers/preprocessing/string_lookup_test.py",
"keras/utils/io_utils_test.py",
"keras/callbacks_test.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras text vectorization preprocessing layer.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.layers.preprocessing import preprocessing_test_utils\nfrom keras.layers.preprocessing import string_lookup\nfrom keras.utils.generic_utils import CustomObjectScope\n\n\ndef _get_end_to_end_test_cases():\n test_cases = (\n {\n \"testcase_name\": \"test_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n )\n\n crossed_test_cases = []\n # Cross above test cases with use_dataset in (True, False)\n for use_dataset in (True, False):\n for case in test_cases:\n case = case.copy()\n if use_dataset:\n case[\"testcase_name\"] = case[\"testcase_name\"] + \"_with_dataset\"\n case[\"use_dataset\"] = use_dataset\n crossed_test_cases.append(case)\n\n return crossed_test_cases\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass StringLookupLayerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n @parameterized.named_parameters(*_get_end_to_end_test_cases())\n def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,\n use_dataset, expected_output,\n input_dtype):\n cls = string_lookup.StringLookup\n expected_output_dtype = tf.int64\n input_shape = input_data.shape\n\n if use_dataset:\n # Keras APIs expect batched datasets.\n # TODO(rachelim): `model.predict` predicts the result on each\n # dataset batch separately, then tries to concatenate the results\n # together. When the results have different shapes on the non-concat\n # axis (which can happen in the output_mode = INT case for\n # StringLookup), the concatenation fails. In real use cases, this may\n # not be an issue because users are likely to pipe the preprocessing layer\n # into other keras layers instead of predicting it directly. A workaround\n # for these unit tests is to have the dataset only contain one batch, so\n # no concatenation needs to happen with the result. For consistency with\n # numpy input, we should make `predict` join differently shaped results\n # together sensibly, with 0 padding.\n input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(\n input_shape[0])\n vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(\n input_shape[0])\n\n with CustomObjectScope({\"StringLookup\": cls}):\n output_data = testing_utils.layer_test(\n cls,\n kwargs=kwargs,\n input_shape=input_shape,\n input_data=input_data,\n input_dtype=input_dtype,\n expected_output_dtype=expected_output_dtype,\n validate_training=False,\n adapt_data=vocab_data)\n self.assertAllClose(expected_output, output_data)\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass StringLookupVocabularyTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def _write_to_temp_file(self, file_name, vocab_list):\n vocab_path = os.path.join(self.get_temp_dir(), file_name + \".txt\")\n with tf.io.gfile.GFile(vocab_path, \"w\") as writer:\n for vocab in vocab_list:\n writer.write(vocab + \"\\n\")\n writer.flush()\n writer.close()\n return vocab_path\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(vocabulary=vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_int_output_explicit_vocab_with_special_tokens(self):\n vocab_data = [\"\", \"[UNK]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(vocabulary=vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_no_vocab(self):\n with self.assertRaisesRegex(\n ValueError, \"You must set the layer's vocabulary\"):\n layer = string_lookup.StringLookup()\n layer([[\"a\"]])\n\n def test_binary_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[0, 1, 1, 1, 1], [1, 1, 0, 1, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(\n vocabulary=vocab_data, output_mode=\"binary\")\n res = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=res)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_count_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"earth\", \"fire\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[0, 2, 0, 0, 2], [1, 1, 0, 1, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(\n vocabulary=vocab_data, output_mode=\"count\")\n res = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=res)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_sparse_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(\n vocabulary=vocab_data, output_mode=\"binary\", sparse=True)\n res = layer(input_data)\n self.assertTrue(res.__class__.__name__, \"SparseKerasTensor\")\n\n def test_get_vocab_returns_str(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n expected_vocab = [\"\", \"[UNK]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = string_lookup.StringLookup(vocabulary=vocab_data)\n layer_vocab = layer.get_vocabulary()\n self.assertAllEqual(expected_vocab, layer_vocab)\n self.assertIsInstance(layer_vocab[0], str)\n\n inverse_layer = string_lookup.StringLookup(\n vocabulary=layer.get_vocabulary(), invert=True)\n layer_vocab = inverse_layer.get_vocabulary()\n self.assertAllEqual(expected_vocab, layer_vocab)\n self.assertIsInstance(layer_vocab[0], str)\n\n def test_int_output_explicit_vocab_from_file(self):\n vocab_list = [\"earth\", \"wind\", \"and\", \"fire\"]\n vocab_path = self._write_to_temp_file(\"vocab_file\", vocab_list)\n\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(vocabulary=vocab_path)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_int_output_explicit_vocab_from_file_via_setter(self):\n vocab_list = [\"earth\", \"wind\", \"and\", \"fire\"]\n vocab_path = self._write_to_temp_file(\"vocab_file\", vocab_list)\n\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup()\n layer.set_vocabulary(vocab_path)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = string_lookup.StringLookup(vocabulary=vocab_data)\n\n def test_non_unique_vocab_from_file_fails(self):\n vocab_list = [\"earth\", \"wind\", \"and\", \"fire\", \"earth\"]\n vocab_path = self._write_to_temp_file(\"repeat_vocab_file\", vocab_list)\n with self.assertRaisesRegex(\n tf.errors.FailedPreconditionError,\n \"HashTable has different value for same key.*earth\"):\n _ = string_lookup.StringLookup(vocabulary=vocab_path)\n\n def test_inverse_layer(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"\"]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = string_lookup.StringLookup(vocabulary=vocab_data, invert=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_inverse_layer_from_file(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[UNK]\"]])\n vocab_path = self._write_to_temp_file(\"vocab_file\", vocab_data)\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = string_lookup.StringLookup(vocabulary=vocab_path, invert=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_inverse_layer_from_file_with_non_default_msk(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[M]\"]])\n vocab_path = self._write_to_temp_file(\"vocab_file\", vocab_data)\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = string_lookup.StringLookup(\n vocabulary=vocab_path, invert=True, mask_token=\"[M]\")\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_forward_backward_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[UNK]\"]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup(vocabulary=vocab_data)\n invert_layer = string_lookup.StringLookup(\n vocabulary=vocab_data, invert=True)\n int_data = layer(input_data)\n out_data = invert_layer(int_data)\n model = keras.Model(inputs=input_data, outputs=out_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_forward_backward_adapted_vocab(self):\n adapt_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[UNK]\"]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = string_lookup.StringLookup()\n layer.adapt(adapt_data)\n invert_layer = string_lookup.StringLookup(\n vocabulary=layer.get_vocabulary(), invert=True)\n int_data = layer(input_data)\n out_data = invert_layer(int_data)\n model = keras.Model(inputs=input_data, outputs=out_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_ragged_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant([[\"earth\", \"wind\", \"fire\"],\n [\"fire\", \"and\", \"earth\",\n \"ohio\"]])\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = string_lookup.StringLookup(num_oov_indices=2)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for io_utils.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport builtins\nimport sys\n\nfrom keras import keras_parameterized\nfrom keras.utils import io_utils\n\n\nclass TestIOUtils(keras_parameterized.TestCase):\n\n def test_ask_to_proceed_with_overwrite(self):\n with tf.compat.v1.test.mock.patch.object(builtins, 'input') as mock_log:\n mock_log.return_value = 'y'\n self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))\n\n mock_log.return_value = 'n'\n self.assertFalse(\n io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))\n\n mock_log.side_effect = ['m', 'y']\n self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))\n\n mock_log.side_effect = ['m', 'n']\n self.assertFalse(\n io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))\n\n def test_path_to_string(self):\n\n class PathLikeDummy(object):\n\n def __fspath__(self):\n return 'dummypath'\n\n dummy = object()\n if sys.version_info >= (3, 4):\n from pathlib import Path # pylint:disable=g-import-not-at-top\n # conversion of PathLike\n self.assertEqual(io_utils.path_to_string(Path('path')), 'path')\n if sys.version_info >= (3, 6):\n self.assertEqual(io_utils.path_to_string(PathLikeDummy()), 'dummypath')\n\n # pass-through, works for all versions of python\n self.assertEqual(io_utils.path_to_string('path'), 'path')\n self.assertIs(io_utils.path_to_string(dummy), dummy)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras callbacks.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport collections\nimport csv\nimport json\nimport os\nimport re\nimport shutil\nimport sys\nimport threading\nimport time\nimport unittest\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.callbacks import BackupAndRestore\nfrom keras.engine import sequential\nfrom keras.layers import Activation\nfrom keras.layers import Dense\nfrom keras.optimizer_v2 import gradient_descent\nfrom keras.optimizer_v2 import learning_rate_schedule\nfrom keras.utils import np_utils\nfrom tensorflow.python.platform import tf_logging as logging\n\ntry:\n import h5py # pylint:disable=g-import-not-at-top\nexcept ImportError:\n h5py = None\n\ntry:\n import requests # pylint:disable=g-import-not-at-top\nexcept ImportError:\n requests = None\n\n\nTRAIN_SAMPLES = 10\nTEST_SAMPLES = 10\nNUM_CLASSES = 2\nINPUT_DIM = 3\nNUM_HIDDEN = 5\nBATCH_SIZE = 5\n\nCALLBACK_HOOKS = [\n 'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',\n 'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',\n 'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',\n 'on_test_begin', 'on_test_end', 'on_train_batch_begin',\n 'on_train_batch_end', 'on_train_begin', 'on_train_end'\n]\n\n\nclass Counter(keras.callbacks.Callback):\n \"\"\"Counts the number of times each callback method was run.\n\n Attributes:\n method_counts: dict. Contains the counts of time each callback method was\n run.\n \"\"\"\n\n def __init__(self):\n self.method_counts = collections.defaultdict(int)\n for method_name in CALLBACK_HOOKS:\n setattr(self, method_name,\n self.wrap_with_counts(method_name, getattr(self, method_name)))\n\n def wrap_with_counts(self, method_name, method):\n\n def _call_and_count(*args, **kwargs):\n self.method_counts[method_name] += 1\n return method(*args, **kwargs)\n\n return _call_and_count\n\n\nclass CallAllHooks(keras.callbacks.Callback):\n \"\"\"A callback that calls self._run for all hooks\"\"\"\n\n def __init__(self):\n for method_name in CALLBACK_HOOKS:\n setattr(self, method_name, self._run)\n\n def _run(self, *args, logs=None):\n raise NotImplementedError\n\n\ndef _get_numpy():\n return np.ones((10, 10)), np.ones((10, 1))\n\n\ndef _get_sequence():\n\n class MySequence(keras.utils.data_utils.Sequence):\n\n def __getitem__(self, _):\n return np.ones((2, 10)), np.ones((2, 1))\n\n def __len__(self):\n return 5\n\n return MySequence(), None\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass CallbackCountsTest(keras_parameterized.TestCase):\n\n def _check_counts(self, counter, expected_counts):\n \"\"\"Checks that the counts registered by `counter` are those expected.\"\"\"\n for method_name, expected_count in expected_counts.items():\n self.assertEqual(\n counter.method_counts[method_name],\n expected_count,\n msg='For method {}: expected {}, got: {}'.format(\n method_name, expected_count, counter.method_counts[method_name]))\n\n def _get_model(self):\n layers = [\n keras.layers.Dense(10, activation='relu'),\n keras.layers.Dense(1, activation='sigmoid')\n ]\n model = testing_utils.get_model_from_layers(layers, input_shape=(10,))\n model.compile(\n tf.compat.v1.train.AdamOptimizer(0.001),\n 'binary_crossentropy',\n run_eagerly=testing_utils.should_run_eagerly())\n return model\n\n @parameterized.named_parameters(('with_numpy', _get_numpy()),\n ('with_sequence', _get_sequence()))\n def test_callback_hooks_are_called_in_fit(self, data):\n if not tf.executing_eagerly():\n self.skipTest('Behavior changed in v2.')\n x, y = data\n val_x, val_y = np.ones((4, 10)), np.ones((4, 1))\n\n model = self._get_model()\n counter = Counter()\n model.fit(\n x,\n y,\n validation_data=(val_x, val_y),\n batch_size=2,\n steps_per_epoch=5,\n epochs=5,\n callbacks=[counter])\n\n self._check_counts(\n counter, {\n 'on_batch_begin': 25,\n 'on_batch_end': 25,\n 'on_epoch_begin': 5,\n 'on_epoch_end': 5,\n 'on_predict_batch_begin': 0,\n 'on_predict_batch_end': 0,\n 'on_predict_begin': 0,\n 'on_predict_end': 0,\n 'on_test_batch_begin': 10,\n 'on_test_batch_end': 10,\n 'on_test_begin': 5,\n 'on_test_end': 5,\n 'on_train_batch_begin': 25,\n 'on_train_batch_end': 25,\n 'on_train_begin': 1,\n 'on_train_end': 1\n })\n\n @parameterized.named_parameters(('with_numpy', _get_numpy()),\n ('with_sequence', _get_sequence()))\n def test_callback_hooks_are_called_in_evaluate(self, data):\n x, y = data\n is_sequence = isinstance(x, keras.utils.data_utils.Sequence)\n\n model = self._get_model()\n counter = Counter()\n model.evaluate(\n x,\n y,\n batch_size=2 if not is_sequence else None,\n steps=5 if is_sequence else None,\n callbacks=[counter])\n self._check_counts(\n counter, {\n 'on_test_batch_begin': 5,\n 'on_test_batch_end': 5,\n 'on_test_begin': 1,\n 'on_test_end': 1\n })\n\n @parameterized.named_parameters(('with_numpy', _get_numpy()),\n ('with_sequence', _get_sequence()))\n def test_callback_hooks_are_called_in_predict(self, data):\n x = data[0]\n is_sequence = isinstance(x, keras.utils.data_utils.Sequence)\n\n model = self._get_model()\n counter = Counter()\n model.predict(\n x,\n batch_size=2 if not is_sequence else None,\n steps=5 if is_sequence else None,\n callbacks=[counter])\n self._check_counts(\n counter, {\n 'on_predict_batch_begin': 5,\n 'on_predict_batch_end': 5,\n 'on_predict_begin': 1,\n 'on_predict_end': 1\n })\n\n def test_callback_list_methods(self):\n counter = Counter()\n callback_list = keras.callbacks.CallbackList([counter])\n\n batch = 0\n callback_list.on_test_batch_begin(batch)\n callback_list.on_test_batch_end(batch)\n callback_list.on_predict_batch_begin(batch)\n callback_list.on_predict_batch_end(batch)\n\n self._check_counts(\n counter, {\n 'on_test_batch_begin': 1,\n 'on_test_batch_end': 1,\n 'on_predict_batch_begin': 1,\n 'on_predict_batch_end': 1\n })\n\n\nclass KerasCallbacksTest(keras_parameterized.TestCase):\n\n def _get_model(self, input_shape=None):\n layers = [\n keras.layers.Dense(3, activation='relu'),\n keras.layers.Dense(2, activation='softmax')\n ]\n model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],\n run_eagerly=testing_utils.should_run_eagerly())\n return model\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_progbar_logging(self):\n model = self._get_model(input_shape=(3,))\n\n x = tf.ones((200, 3))\n y = tf.zeros((200, 2))\n dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)\n expected_log = r'(.*- loss:.*- my_acc:.*)+'\n\n with self.captureWritesToStream(sys.stdout) as printed:\n model.fit(dataset, epochs=2, steps_per_epoch=10)\n self.assertRegex(printed.contents(), expected_log)\n\n @keras_parameterized.run_all_keras_modes\n def test_trivial_backup_restore(self):\n if testing_utils.should_run_eagerly():\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse')\n cbk = BackupAndRestore(self.get_temp_dir())\n model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=0, callbacks=[cbk])\n\n @keras_parameterized.run_all_keras_modes\n def test_callback_warning(self):\n\n class SleepCallback(keras.callbacks.Callback):\n\n def on_train_batch_end(self, batch, logs=None):\n time.sleep(0.1)\n\n model = sequential.Sequential()\n model.add(keras.layers.Dense(1))\n model.compile(\n 'sgd',\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n\n warning_messages = []\n\n def warning(msg):\n warning_messages.append(msg)\n\n with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):\n model.fit(\n np.ones((16, 1), 'float32'),\n np.ones((16, 1), 'float32'),\n batch_size=3,\n epochs=1,\n callbacks=[SleepCallback()])\n warning_msg = ('Callback method `on_train_batch_end` is slow compared '\n 'to the batch time')\n self.assertIn(warning_msg, '\\n'.join(warning_messages))\n\n @keras_parameterized.run_all_keras_modes\n def test_default_callbacks_no_warning(self):\n # Test that without the callback no warning is raised\n model = sequential.Sequential()\n model.add(keras.layers.Dense(1))\n model.compile(\n 'sgd',\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n\n warning_messages = []\n\n def warning(msg):\n warning_messages.append(msg)\n\n with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):\n model.fit(\n np.ones((16, 1), 'float32'),\n np.ones((16, 1), 'float32'),\n batch_size=3,\n epochs=1)\n self.assertListEqual(warning_messages, [])\n\n @keras_parameterized.run_with_all_model_types(exclude_models='functional')\n @keras_parameterized.run_all_keras_modes\n def test_progbar_logging_deferred_model_build(self):\n model = self._get_model()\n self.assertFalse(model.built)\n\n x = tf.ones((200, 3))\n y = tf.zeros((200, 2))\n dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)\n expected_log = r'(.*- loss:.*- my_acc:.*)+'\n\n with self.captureWritesToStream(sys.stdout) as printed:\n model.fit(dataset, epochs=2, steps_per_epoch=10)\n self.assertRegex(printed.contents(), expected_log)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_progbar_logging_validation_data(self):\n model = self._get_model(input_shape=(3,))\n\n x = tf.ones((50, 3))\n y = tf.zeros((50, 2))\n training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)\n val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)\n expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'\n\n with self.captureWritesToStream(sys.stdout) as printed:\n model.fit(training_dataset, epochs=2, validation_data=val_dataset)\n self.assertRegex(printed.contents(), expected_log)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_progbar_logging_validation_split(self):\n model = self._get_model(input_shape=(3,))\n\n x = np.ones((100, 3))\n y = np.zeros((100, 2))\n expected_log = (\n r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'\n r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')\n\n with self.captureWritesToStream(sys.stdout) as printed:\n model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)\n self.assertRegex(printed.contents(), expected_log)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_progbar_logging_training_validation(self):\n model = self._get_model(input_shape=(2,))\n\n def generator():\n for _ in range(100):\n yield [1, 1], 1\n\n training = tf.data.Dataset \\\n .from_generator(\n generator=generator,\n output_types=('float64', 'float64'),\n output_shapes=([2], [])) \\\n .batch(2) \\\n .repeat()\n validation = tf.data.Dataset \\\n .from_generator(\n generator=generator,\n output_types=('float64', 'float64'),\n output_shapes=([2], [])) \\\n .batch(2)\n expected_log = (\n r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'\n r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')\n\n with self.captureWritesToStream(sys.stdout) as printed:\n model.fit(\n x=training, validation_data=validation, epochs=2, steps_per_epoch=20)\n self.assertRegex(printed.contents(), expected_log)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_progbar_logging_with_dataset_and_partial_batch(self):\n model = self._get_model(input_shape=(2,))\n\n def generator():\n # Have a partial batch at the end.\n for _ in range(9):\n yield np.random.random(2), 1\n\n training = tf.data.Dataset \\\n .from_generator(\n generator=generator,\n output_types=('float64', 'float64'),\n output_shapes=([2], [])) \\\n .batch(2)\n validation = tf.data.Dataset \\\n .from_generator(\n generator=generator,\n output_types=('float64', 'float64'),\n output_shapes=([2], [])) \\\n .batch(2)\n\n with self.captureWritesToStream(sys.stdout) as printed:\n model.fit(x=training, validation_data=validation)\n\n # Make sure the value of val_ metrics are not zeros.\n log_content = printed.contents()\n val_loss = re.findall(r'val_loss: (\\d\\.\\d+)', log_content)\n self.assertLen(val_loss, 1)\n self.assertGreater(float(val_loss[0]), 0.0)\n\n @keras_parameterized.run_with_all_model_types\n def test_ModelCheckpoint(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n model_type = testing_utils.get_model_type()\n if model_type == 'subclass':\n return # Skip test since subclassed models cannot be saved in .h5 format.\n\n layers = [\n keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),\n keras.layers.Dense(NUM_CLASSES, activation='softmax')\n ]\n model = testing_utils.get_model_from_layers(layers, input_shape=(3,))\n model.compile(\n loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])\n\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n\n filepath = os.path.join(temp_dir, 'checkpoint.h5')\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n # Case 1\n monitor = 'val_loss'\n save_best_only = False\n mode = 'auto'\n\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # Case 2\n mode = 'min'\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # Case 3\n mode = 'max'\n monitor = 'val_acc'\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # Case 4\n save_best_only = True\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # Case 5: metric not available.\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor='unknown',\n save_best_only=True)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n # File won't be written.\n assert not os.path.exists(filepath)\n\n # Case 6\n save_best_only = False\n period = 2\n mode = 'auto'\n\n filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode,\n period=period)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=4,\n verbose=1)\n assert os.path.exists(filepath.format(epoch=2))\n assert os.path.exists(filepath.format(epoch=4))\n os.remove(filepath.format(epoch=2))\n os.remove(filepath.format(epoch=4))\n assert not os.path.exists(filepath.format(epoch=1))\n assert not os.path.exists(filepath.format(epoch=3))\n\n # Invalid use: this will raise a warning but not an Exception.\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode='unknown')\n\n # Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`.\n # Though `period` is deprecated, we're testing it for\n # backward-compatibility.\n filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)\n ]\n assert not os.path.exists(filepath.format(epoch=0))\n assert not os.path.exists(filepath.format(epoch=5))\n model.fit(\n x_train,\n y_train,\n batch_size=2,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=10,\n verbose=1)\n assert not os.path.exists(filepath.format(epoch=1))\n assert not os.path.exists(filepath.format(epoch=2))\n assert not os.path.exists(filepath.format(epoch=3))\n assert not os.path.exists(filepath.format(epoch=4))\n assert os.path.exists(filepath.format(epoch=5))\n assert not os.path.exists(filepath.format(epoch=6))\n assert os.path.exists(filepath.format(epoch=10))\n os.remove(filepath.format(epoch=5))\n os.remove(filepath.format(epoch=10))\n\n # Case 8: `ModelCheckpoint` with an integer `save_freq`\n filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode,\n save_freq=15,\n period=100) # The period should be ignored (this test tests this).\n ]\n assert not os.path.exists(filepath.format(epoch=3))\n model.fit(\n x_train,\n y_train,\n batch_size=2,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=10,\n verbose=1)\n assert not os.path.exists(filepath.format(epoch=1))\n assert not os.path.exists(filepath.format(epoch=2))\n assert os.path.exists(filepath.format(epoch=3))\n assert not os.path.exists(filepath.format(epoch=4))\n assert not os.path.exists(filepath.format(epoch=5))\n assert os.path.exists(filepath.format(epoch=6))\n assert not os.path.exists(filepath.format(epoch=7))\n assert not os.path.exists(filepath.format(epoch=8))\n assert os.path.exists(filepath.format(epoch=9))\n os.remove(filepath.format(epoch=3))\n os.remove(filepath.format(epoch=6))\n os.remove(filepath.format(epoch=9))\n\n # Case 9: `ModelCheckpoint` with valid and invalid save_freq argument.\n with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode,\n save_freq='invalid_save_freq')\n # The following should not raise ValueError.\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode,\n save_freq='epoch')\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode,\n save_freq=3)\n\n # Case 10: `ModelCheckpoint` with valid and invalid `options` argument.\n with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n save_weights_only=True,\n mode=mode,\n options=tf.saved_model.SaveOptions())\n with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n save_weights_only=False,\n mode=mode,\n options=tf.train.CheckpointOptions())\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n save_weights_only=True,\n mode=mode,\n options=tf.train.CheckpointOptions())\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n save_weights_only=False,\n mode=mode,\n options=tf.saved_model.SaveOptions())\n\n @testing_utils.run_v2_only\n def test_ModelCheckpoint_subclass_save_weights_false(self):\n model = testing_utils.get_small_subclass_mlp(NUM_HIDDEN, NUM_CLASSES)\n model.compile(\n loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n filepath = os.path.join(temp_dir, 'checkpoint')\n cbks = [keras.callbacks.ModelCheckpoint(\n filepath, save_weights_only=False)]\n\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_train = np_utils.to_categorical(y_train, num_classes=NUM_CLASSES)\n\n model.fit(\n x_train,\n y_train,\n callbacks=cbks,\n epochs=1,\n verbose=0)\n # Check that the filepath is a SavedModel directory.\n self.assertIn('saved_model.pb', os.listdir(filepath))\n\n def _get_dummy_resource_for_model_checkpoint_testing(self):\n\n def get_input_datasets():\n # Simple training input.\n train_input = [[1.]] * 16\n train_label = [[0.]] * 16\n ds = tf.data.Dataset.from_tensor_slices((train_input, train_label))\n return ds.batch(8, drop_remainder=True)\n\n # Very simple bias model to eliminate randomness.\n optimizer = gradient_descent.SGD(0.1)\n model = sequential.Sequential()\n model.add(testing_utils.Bias(input_shape=(1,)))\n model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])\n train_ds = get_input_datasets()\n\n temp_dir = self.get_temp_dir()\n filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')\n\n # The filepath shouldn't exist at the beginning.\n self.assertFalse(os.path.exists(filepath))\n callback = keras.callbacks.ModelCheckpoint(\n filepath=filepath, save_weights_only=True)\n\n return model, train_ds, callback, filepath\n\n def _run_load_weights_on_restart_test_common_iterations(self):\n\n (model, train_ds, callback,\n filepath) = self._get_dummy_resource_for_model_checkpoint_testing()\n initial_epochs = 3\n model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])\n\n # The files should exist after fitting with callback.\n for epoch in range(initial_epochs):\n self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))\n self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))\n self.assertEqual(\n callback._get_most_recently_modified_file_matching_pattern(filepath),\n filepath.format(epoch=initial_epochs))\n\n model.fit(train_ds, epochs=1)\n weights_after_one_more_epoch = model.get_weights()\n\n # The filepath should continue to exist after fitting without callback.\n for epoch in range(initial_epochs):\n self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))\n\n return model, train_ds, filepath, weights_after_one_more_epoch\n\n @staticmethod\n def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):\n\n def func(self):\n (model, train_ds, filepath, weights_after_one_more_epoch\n ) = self._run_load_weights_on_restart_test_common_iterations()\n\n # Sleep for some short time period ensuring the files are created with\n # a different time (in MacOS OSS the granularity is only 1 second).\n time.sleep(2)\n callback = keras.callbacks.ModelCheckpoint(\n filepath=filepath,\n save_weights_only=save_weights_only,\n load_weights_on_restart=True)\n model.fit(train_ds, epochs=1, callbacks=[callback])\n weights_after_model_restoring_and_one_more_epoch = model.get_weights()\n\n self.assertEqual(\n callback._get_most_recently_modified_file_matching_pattern(filepath),\n filepath.format(epoch=1))\n\n model.fit(\n train_ds,\n epochs=1,\n callbacks=[\n keras.callbacks.ModelCheckpoint(\n filepath=filepath,\n save_weights_only=save_weights_only,\n load_weights_on_restart=True)\n ])\n weights_with_one_final_extra_epoch = model.get_weights()\n\n # Asserting the weights one epoch after initial fitting and another epoch\n # after that are closed, if a ModelCheckpoint with\n # load_weights_on_restart=True is given (so the model is restored at the\n # beginning of training).\n self.assertAllClose(weights_after_one_more_epoch,\n weights_after_model_restoring_and_one_more_epoch)\n\n self.assertNotAllClose(weights_after_one_more_epoch,\n weights_with_one_final_extra_epoch)\n\n return func\n\n @staticmethod\n def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):\n\n def func(self):\n (model, train_ds, filepath, weights_after_one_more_epoch\n ) = self._run_load_weights_on_restart_test_common_iterations()\n\n model.fit(\n train_ds,\n epochs=1,\n callbacks=[\n keras.callbacks.ModelCheckpoint(\n filepath=filepath, save_weights_only=save_weights_only)\n ])\n weights_after_model_restoring_and_one_more_epoch = model.get_weights()\n\n # Asserting the weights one epoch after initial fitting and another epoch\n # after that are different, if a ModelCheckpoint with\n # load_weights_on_restart=False is given (so the model is not restored at\n # the beginning of training).\n self.assertNotAllClose(weights_after_one_more_epoch,\n weights_after_model_restoring_and_one_more_epoch)\n\n return func\n\n test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \\\n get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)\n\n test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \\\n get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)\n\n test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \\\n get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)\n\n test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \\\n = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)\n\n def test_ModelCheckpoint_override_if_file_exist(self):\n (model, train_ds, filepath,\n _) = self._run_load_weights_on_restart_test_common_iterations()\n\n # Sleep for some short time period to ensure the files are created with\n # a different time (in MacOS OSS the granularity is only 1 second).\n time.sleep(2)\n callback = keras.callbacks.ModelCheckpoint(\n filepath=filepath, save_weights_only=True)\n model.load_weights(\n callback._get_most_recently_modified_file_matching_pattern(filepath))\n weights_before_additional_fit = model.get_weights()\n model.fit(train_ds, epochs=1, callbacks=[callback])\n model.load_weights(\n callback._get_most_recently_modified_file_matching_pattern(filepath))\n weights_after_additional_fit = model.get_weights()\n\n self.assertNotAllClose(weights_before_additional_fit,\n weights_after_additional_fit)\n\n def test_fit_with_ModelCheckpoint_with_tf_config(self):\n (model, train_ds, callback,\n _) = self._get_dummy_resource_for_model_checkpoint_testing()\n\n os.environ['TF_CONFIG'] = json.dumps({\n 'cluster': {\n 'worker': ['localhost:23333']\n },\n 'task': {\n 'type': 'worker',\n 'index': 0\n }\n })\n\n # `model.fit()` should work regardless of the presence of `TF_CONFIG`.\n model.fit(train_ds, epochs=1, callbacks=[callback])\n\n def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):\n (model, train_ds, callback,\n filepath) = self._get_dummy_resource_for_model_checkpoint_testing()\n\n temp_dir = self.get_temp_dir()\n filepath = os.path.join(temp_dir, 'temp.h5')\n\n self.assertFalse(os.path.exists(filepath))\n os.mkdir(filepath)\n self.assertTrue(os.path.exists(filepath))\n\n callback = keras.callbacks.ModelCheckpoint(filepath=filepath)\n\n with self.assertRaisesRegex(\n IOError, 'Please specify a non-directory '\n 'filepath for ModelCheckpoint.'):\n model.fit(train_ds, epochs=1, callbacks=[callback])\n\n def test_ModelCheckpoint_with_bad_path_placeholders(self):\n (model, train_ds, callback,\n filepath) = self._get_dummy_resource_for_model_checkpoint_testing()\n\n temp_dir = self.get_temp_dir()\n filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')\n callback = keras.callbacks.ModelCheckpoint(filepath=filepath)\n\n with self.assertRaisesRegex(KeyError, 'Failed to format this callback '\n 'filepath.*'):\n model.fit(train_ds, epochs=1, callbacks=[callback])\n\n def test_ModelCheckpoint_nonblocking(self):\n filepath = self.get_temp_dir()\n # Should only cause a sync block when saving is actually performed.\n callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100)\n self.assertTrue(callback._supports_tf_logs)\n\n model = keras.Sequential([keras.layers.Dense(1)])\n cb_list = keras.callbacks.CallbackList([callback],\n model=model,\n epochs=1,\n steps=10,\n verbose=0)\n\n tensor = tf.convert_to_tensor(1.)\n\n def mock_numpy():\n raise RuntimeError(\n 'If this error is seen, ModelCheckpoint is causing a blocking '\n 'NumPy conversion even when not checkpointing.')\n\n tensor.numpy = mock_numpy\n\n logs = {'metric': tensor}\n\n cb_list.on_train_begin(logs)\n cb_list.on_epoch_begin(0, logs)\n cb_list.on_train_batch_begin(0, logs)\n cb_list.on_train_batch_end(0, logs)\n cb_list.on_epoch_end(0, logs)\n cb_list.on_train_end(logs)\n\n cb_list.on_test_begin(logs)\n cb_list.on_test_batch_begin(0, logs)\n cb_list.on_test_batch_end(0, logs)\n cb_list.on_test_end(logs)\n\n cb_list.on_predict_begin(logs)\n cb_list.on_predict_batch_begin(logs)\n cb_list.on_predict_batch_end(logs)\n cb_list.on_predict_end(logs)\n\n def test_ProgbarLogger_verbose_2_nonblocking(self):\n # Should only cause a sync block on epoch end methods.\n callback = keras.callbacks.ProgbarLogger(count_mode='steps')\n self.assertTrue(callback._supports_tf_logs)\n\n model = keras.Sequential([keras.layers.Dense(1)])\n cb_list = keras.callbacks.CallbackList([callback],\n model=model,\n epochs=1,\n steps=10,\n verbose=2)\n\n tensor = tf.convert_to_tensor(1.)\n\n def mock_numpy():\n raise RuntimeError(\n 'If this error is seen, ModelCheckpoint is causing a blocking '\n 'NumPy conversion even when not checkpointing.')\n\n tensor.numpy = mock_numpy\n logs = {'metric': tensor}\n\n cb_list.on_train_begin(logs)\n cb_list.on_epoch_begin(0, logs)\n cb_list.on_train_batch_begin(0, logs)\n cb_list.on_train_batch_end(0, logs)\n\n cb_list.on_test_begin(logs)\n cb_list.on_test_batch_begin(0, logs)\n cb_list.on_test_batch_end(0, logs)\n cb_list.on_test_end(logs)\n\n with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):\n # on_epoch_end should still block.\n cb_list.on_epoch_end(0, logs)\n cb_list.on_train_end(logs)\n\n def test_EarlyStopping(self):\n with self.cached_session():\n np.random.seed(123)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = testing_utils.get_small_sequential_mlp(\n num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)\n model.compile(\n loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])\n\n cases = [\n ('max', 'val_acc'),\n ('min', 'val_loss'),\n ('auto', 'val_acc'),\n ('auto', 'loss'),\n ('unknown', 'unknown')\n ]\n for mode, monitor in cases:\n patience = 0\n cbks = [\n keras.callbacks.EarlyStopping(\n patience=patience, monitor=monitor, mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n\n def test_EarlyStopping_reuse(self):\n with self.cached_session():\n np.random.seed(1337)\n patience = 3\n data = np.random.random((100, 1))\n labels = np.where(data > 0.5, 1, 0)\n model = keras.models.Sequential((keras.layers.Dense(\n 1, input_dim=1, activation='relu'), keras.layers.Dense(\n 1, activation='sigmoid'),))\n model.compile(\n optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n weights = model.get_weights()\n\n # This should allow training to go for at least `patience` epochs\n model.set_weights(weights)\n\n stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)\n hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)\n assert len(hist.epoch) >= patience\n\n def test_EarlyStopping_with_baseline(self):\n with self.cached_session():\n np.random.seed(1337)\n baseline = 0.6\n (data, labels), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=50,\n input_shape=(1,),\n num_classes=NUM_CLASSES)\n model = testing_utils.get_small_sequential_mlp(\n num_hidden=1, num_classes=1, input_dim=1)\n model.compile(\n optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])\n\n stopper = keras.callbacks.EarlyStopping(monitor='acc',\n baseline=baseline)\n hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)\n assert len(hist.epoch) == 1\n\n patience = 3\n stopper = keras.callbacks.EarlyStopping(monitor='acc',\n patience=patience,\n baseline=baseline)\n hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)\n assert len(hist.epoch) >= patience\n\n def test_EarlyStopping_final_weights_when_restoring_model_weights(self):\n\n class DummyModel(object):\n\n def __init__(self):\n self.stop_training = False\n self.weights = -1\n\n def get_weights(self):\n return self.weights\n\n def set_weights(self, weights):\n self.weights = weights\n\n def set_weight_to_epoch(self, epoch):\n self.weights = epoch\n\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=2,\n restore_best_weights=True)\n early_stop.model = DummyModel()\n losses = [0.2, 0.15, 0.1, 0.11, 0.12]\n # The best configuration is in the epoch 2 (loss = 0.1000).\n epochs_trained = 0\n early_stop.on_train_begin()\n for epoch in range(len(losses)):\n epochs_trained += 1\n early_stop.model.set_weight_to_epoch(epoch=epoch)\n early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n if early_stop.model.stop_training:\n break\n # The best configuration is in epoch 2 (loss = 0.1000),\n # and while patience = 2, we're restoring the best weights,\n # so we end up at the epoch with the best weights, i.e. epoch 2\n self.assertEqual(early_stop.model.get_weights(), 2)\n\n # Check early stopping when no model beats the baseline.\n early_stop = keras.callbacks.EarlyStopping(\n monitor='val_loss', patience=5, baseline=0.5, restore_best_weights=True)\n early_stop.model = DummyModel()\n losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73]\n # The best configuration is in the epoch 2 (loss = 0.7000).\n epochs_trained = 0\n early_stop.on_train_begin()\n for epoch in range(len(losses)):\n epochs_trained += 1\n early_stop.model.set_weight_to_epoch(epoch=epoch)\n early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n if early_stop.model.stop_training:\n break\n # No epoch improves on the baseline, so we should train for only 5 epochs,\n # and restore the second model.\n self.assertEqual(epochs_trained, 5)\n self.assertEqual(early_stop.model.get_weights(), 2)\n\n def test_RemoteMonitor(self):\n if requests is None:\n self.skipTest('`requests` required to run this test')\n return None\n\n monitor = keras.callbacks.RemoteMonitor()\n # This will raise a warning since the default address in unreachable:\n monitor.on_epoch_end(0, logs={'loss': 0.})\n\n def test_LearningRateScheduler(self):\n with self.cached_session():\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = testing_utils.get_small_sequential_mlp(\n num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n assert (\n float(keras.backend.get_value(\n model.optimizer.lr)) - 0.2) < keras.backend.epsilon()\n\n cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=0)\n assert (\n float(keras.backend.get_value(\n model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()\n\n cbks = [\n keras.callbacks.LearningRateScheduler(\n lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)\n (epoch))\n ]\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=0)\n\n cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))\n decayed_learning_rate = 0.01 * cosine_decay_np\n\n assert (float(keras.backend.get_value(model.optimizer.lr)) -\n decayed_learning_rate) < keras.backend.epsilon()\n\n def test_ReduceLROnPlateau(self):\n with self.cached_session():\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n tf.compat.v1.set_random_seed(1234)\n np.random.seed(1337)\n model = testing_utils.get_small_sequential_mlp(\n num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)\n model.compile(\n loss='categorical_crossentropy',\n optimizer=gradient_descent.SGD(lr=0.1))\n return model\n\n # TODO(psv): Make sure the callback works correctly when min_delta is\n # set as 0. Test fails when the order of this callback and assertion is\n # interchanged.\n model = make_model()\n cbks = [\n keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.1,\n min_delta=0,\n patience=1,\n cooldown=5)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=0)\n self.assertAllClose(\n float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)\n\n model = make_model()\n # This should reduce the LR after the first epoch (due to high epsilon).\n cbks = [\n keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.1,\n min_delta=10,\n patience=1,\n cooldown=5)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=2)\n self.assertAllClose(\n float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)\n\n def test_ReduceLROnPlateau_patience(self):\n\n class DummyOptimizer(object):\n\n def __init__(self):\n self.lr = keras.backend.variable(1.0)\n\n class DummyModel(object):\n\n def __init__(self):\n self.optimizer = DummyOptimizer()\n\n reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', patience=2)\n reduce_on_plateau.model = DummyModel()\n\n losses = [0.0860, 0.1096, 0.1040]\n lrs = []\n\n for epoch in range(len(losses)):\n reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))\n\n # The learning rates should be 1.0 except the last one\n for lr in lrs[:-1]:\n self.assertEqual(lr, 1.0)\n self.assertLess(lrs[-1], 1.0)\n\n def test_ReduceLROnPlateau_backwards_compatibility(self):\n with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:\n reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)\n self.assertRegex(\n str(mock_log.call_args), '`epsilon` argument is deprecated')\n self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))\n self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))\n self.assertEqual(reduce_on_plateau.min_delta, 1e-13)\n\n def test_CSVLogger(self):\n with self.cached_session():\n np.random.seed(1337)\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n filepath = os.path.join(temp_dir, 'log.tsv')\n\n sep = '\\t'\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = testing_utils.get_small_sequential_mlp(\n num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)\n model.compile(\n loss='categorical_crossentropy',\n optimizer=gradient_descent.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n # case 1, create new file with defined separator\n model = make_model()\n cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n\n assert os.path.exists(filepath)\n with open(filepath) as csvfile:\n dialect = csv.Sniffer().sniff(csvfile.read())\n assert dialect.delimiter == sep\n del model\n del cbks\n\n # case 2, append data to existing file, skip header\n model = make_model()\n cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n\n # case 3, reuse of CSVLogger object\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=0)\n\n with open(filepath) as csvfile:\n list_lines = csvfile.readlines()\n for line in list_lines:\n assert line.count(sep) == 4\n assert len(list_lines) == 5\n output = ' '.join(list_lines)\n assert len(re.findall('epoch', output)) == 1\n\n os.remove(filepath)\n\n def test_stop_training_csv(self):\n # Test that using the CSVLogger callback with the TerminateOnNaN callback\n # does not result in invalid CSVs.\n np.random.seed(1337)\n tmpdir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)\n\n with self.cached_session():\n fp = os.path.join(tmpdir, 'test.csv')\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]\n model = keras.models.Sequential()\n for _ in range(5):\n model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n def data_generator():\n i = 0\n max_batch_index = len(x_train) // BATCH_SIZE\n tot = 0\n while 1:\n if tot > 3 * len(x_train):\n yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,\n np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)\n else:\n yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],\n y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])\n i += 1\n tot += 1\n i %= max_batch_index\n\n history = model.fit_generator(data_generator(),\n len(x_train) // BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) > 1\n assert loss[-1] == np.inf or np.isnan(loss[-1])\n\n values = []\n with open(fp) as f:\n for x in csv.reader(f):\n # In windows, due to \\r\\n line ends we may end up reading empty lines\n # after each line. Skip empty lines.\n if x:\n values.append(x)\n assert 'nan' in values[-1], 'The last epoch was not logged.'\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_TerminateOnNaN(self):\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [keras.callbacks.TerminateOnNaN()]\n model = keras.models.Sequential()\n initializer = keras.initializers.Constant(value=1e5)\n for _ in range(5):\n model.add(\n keras.layers.Dense(\n 2,\n input_dim=INPUT_DIM,\n activation='relu',\n kernel_initializer=initializer))\n model.add(keras.layers.Dense(NUM_CLASSES))\n model.compile(loss='mean_squared_error', optimizer='rmsprop')\n\n history = model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n self.assertEqual(len(loss), 1)\n self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))\n\n @unittest.skipIf(\n os.name == 'nt',\n 'use_multiprocessing=True does not work on windows properly.')\n def test_LambdaCallback(self):\n with self.cached_session():\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # Start an arbitrary process that should run during model\n # training and be terminated after training has completed.\n e = threading.Event()\n\n def target():\n e.wait()\n\n t = threading.Thread(target=target)\n t.start()\n cleanup_callback = keras.callbacks.LambdaCallback(\n on_train_end=lambda logs: e.set())\n\n cbks = [cleanup_callback]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n t.join()\n assert not t.is_alive()\n\n def test_RemoteMonitor_np_array(self):\n if requests is None:\n self.skipTest('`requests` required to run this test')\n with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:\n monitor = keras.callbacks.RemoteMonitor(send_as_json=True)\n a = np.arange(1) # a 1 by 1 array\n logs = {'loss': 0., 'val': a}\n monitor.on_epoch_end(0, logs=logs)\n send = {'loss': 0., 'epoch': 0, 'val': 0}\n requests_post.assert_called_once_with(\n monitor.root + monitor.path, json=send, headers=monitor.headers)\n\n def test_RemoteMonitor_np_float32(self):\n if requests is None:\n self.skipTest('`requests` required to run this test')\n\n with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:\n monitor = keras.callbacks.RemoteMonitor(send_as_json=True)\n a = np.float32(1.0) # a float32 generic type\n logs = {'loss': 0., 'val': a}\n monitor.on_epoch_end(0, logs=logs)\n send = {'loss': 0., 'epoch': 0, 'val': 1.0}\n requests_post.assert_called_once_with(\n monitor.root + monitor.path, json=send, headers=monitor.headers)\n\n def test_RemoteMonitorWithJsonPayload(self):\n if requests is None:\n self.skipTest('`requests` required to run this test')\n return None\n with self.cached_session():\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.np_utils.to_categorical(y_test)\n y_train = keras.utils.np_utils.to_categorical(y_train)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]\n\n with tf.compat.v1.test.mock.patch.object(requests, 'post'):\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1)\n\n def test_progbar_infers_steps(self):\n x, y = np.ones((10, 1)), np.ones((10, 1))\n data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)\n data = data.filter(lambda x, y: True) # Unknown cardinality.\n\n progbar = keras.callbacks.ProgbarLogger('steps')\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse')\n self.assertIsNone(progbar.target)\n model.fit(data, epochs=2, callbacks=[progbar])\n self.assertEqual(progbar.target, 5)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_callback_passed_floats(self):\n\n class MyCallback(keras.callbacks.Callback):\n\n def on_batch_end(self, batch, logs=None):\n assert isinstance(batch, int)\n assert isinstance(logs['loss'], float)\n self.on_batch_end_called = True\n\n def on_epoch_end(self, batch, logs=None):\n assert isinstance(batch, int)\n assert isinstance(logs['loss'], float)\n self.on_epoch_end_called = True\n\n x, y = np.ones((10, 1)), np.ones((10, 1))\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())\n\n callback = MyCallback()\n model.fit(x, y, epochs=2, callbacks=[callback])\n self.assertTrue(callback.on_batch_end_called)\n self.assertTrue(callback.on_batch_end_called)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_implements_batch_hooks(self):\n\n class MyCallbackWithBatchHooks(keras.callbacks.Callback):\n\n def __init__(self):\n self.train_batches = 0\n self.test_batches = 0\n self.predict_batches = 0\n\n def on_train_batch_end(self, batch, logs=None):\n self.train_batches += 1\n\n def on_test_batch_end(self, batch, logs=None):\n self.test_batches += 1\n\n def on_predict_batch_end(self, batch, logs=None):\n self.predict_batches += 1\n\n class MyCallbackWithTFBatchHooks(keras.callbacks.Callback):\n\n def __init__(self):\n super(MyCallbackWithTFBatchHooks, self).__init__()\n self._supports_tf_logs = True\n\n class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):\n\n def __init__(self):\n self.epochs = 0\n\n def on_epoch_end(self, epoch, logs=None):\n self.epochs += 1\n\n x, y = np.ones((10, 1)), np.ones((10, 1))\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse')\n\n my_cb = MyCallbackWithBatchHooks()\n cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)\n self.assertTrue(cb_list._should_call_train_batch_hooks)\n self.assertTrue(cb_list._should_call_test_batch_hooks)\n self.assertTrue(cb_list._should_call_predict_batch_hooks)\n self.assertFalse(cb_list._batch_hooks_support_tf_logs)\n\n model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)\n model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)\n model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)\n\n self.assertEqual(my_cb.train_batches, 2)\n self.assertEqual(my_cb.test_batches, 1)\n self.assertEqual(my_cb.predict_batches, 1)\n\n my_cb = MyCallbackWithTFBatchHooks()\n cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)\n self.assertTrue(cb_list._batch_hooks_support_tf_logs)\n\n my_cb = MyCallbackWithoutBatchHooks()\n cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)\n self.assertLen(cb_list.callbacks, 1)\n self.assertFalse(cb_list._should_call_train_batch_hooks)\n self.assertFalse(cb_list._should_call_test_batch_hooks)\n self.assertFalse(cb_list._should_call_predict_batch_hooks)\n\n model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)\n model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)\n model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_logs_conversion(self):\n assert_dict_equal = self.assertDictEqual\n\n class MutateNumpyLogs(CallAllHooks):\n\n def _run(self, *args, logs=None):\n logs = logs or args[-1]\n logs['numpy'] = 1\n\n class MutateTensorFlowLogs(CallAllHooks):\n\n def __init__(self):\n super(MutateTensorFlowLogs, self).__init__()\n self._supports_tf_logs = True\n\n def _run(self, *args, logs=None):\n logs = logs or args[-1]\n logs['tf'] = 2\n\n class AssertNumpyLogs(CallAllHooks):\n\n def _run(self, *args, logs=None):\n logs = logs or args[-1]\n assert_dict_equal(logs, {'all': 0, 'numpy': 1, 'tf': 2})\n\n class AssertTensorFlowLogs(AssertNumpyLogs):\n\n def __init__(self):\n super(AssertTensorFlowLogs, self).__init__()\n self._supports_tf_logs = True\n\n cb_list = keras.callbacks.CallbackList([\n MutateNumpyLogs(),\n MutateTensorFlowLogs(),\n AssertNumpyLogs(),\n AssertTensorFlowLogs()\n ])\n\n assert len(cb_list.callbacks) == 4\n cb_list.on_epoch_begin(0, logs={'all': 0})\n cb_list.on_epoch_end(0, logs={'all': 0})\n cb_list.on_predict_batch_begin(0, logs={'all': 0})\n cb_list.on_predict_batch_end(0, logs={'all': 0})\n cb_list.on_predict_begin(logs={'all': 0})\n cb_list.on_predict_end(logs={'all': 0})\n cb_list.on_test_batch_begin(0, logs={'all': 0})\n cb_list.on_test_batch_end(0, logs={'all': 0})\n cb_list.on_test_begin(logs={'all': 0})\n cb_list.on_test_end(logs={'all': 0})\n cb_list.on_train_batch_begin(0, logs={'all': 0})\n cb_list.on_train_batch_end(0, logs={'all': 0})\n cb_list.on_train_begin(logs={'all': 0})\n cb_list.on_train_end(logs={'all': 0})\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_implements_batch_hooks_override(self):\n\n class MyCallback(keras.callbacks.Callback):\n\n def __init__(self, should_run=True):\n self.should_run = should_run\n self.train_batches = 0\n self.test_batches = 0\n self.predict_batches = 0\n\n def on_train_batch_end(self, batch, logs=None):\n self.train_batches += 1\n\n def on_test_batch_end(self, batch, logs=None):\n self.test_batches += 1\n\n def on_predict_batch_end(self, batch, logs=None):\n self.predict_batches += 1\n\n def _implements_train_batch_hooks(self):\n return self.should_run\n\n def _implements_test_batch_hooks(self):\n return self.should_run\n\n def _implements_predict_batch_hooks(self):\n return self.should_run\n\n x, y = np.ones((10, 1)), np.ones((10, 1))\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse')\n\n my_cb = MyCallback(should_run=True)\n cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)\n self.assertTrue(cb_list._should_call_train_batch_hooks)\n self.assertTrue(cb_list._should_call_test_batch_hooks)\n self.assertTrue(cb_list._should_call_predict_batch_hooks)\n\n model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)\n model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)\n model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)\n\n self.assertEqual(my_cb.train_batches, 2)\n self.assertEqual(my_cb.test_batches, 1)\n self.assertEqual(my_cb.predict_batches, 1)\n\n my_cb = MyCallback(should_run=False)\n cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)\n self.assertFalse(cb_list._should_call_train_batch_hooks)\n self.assertFalse(cb_list._should_call_test_batch_hooks)\n self.assertFalse(cb_list._should_call_predict_batch_hooks)\n\n model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)\n model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)\n model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)\n\n self.assertEqual(my_cb.train_batches, 0)\n self.assertEqual(my_cb.test_batches, 0)\n self.assertEqual(my_cb.predict_batches, 0)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_default_callbacks_do_not_call_batch_hooks(self):\n model = keras.Sequential([keras.layers.Dense(1)])\n log_dir = self.get_temp_dir()\n cb_list = keras.callbacks.CallbackList([\n keras.callbacks.TensorBoard(log_dir, profile_batch=0),\n keras.callbacks.ModelCheckpoint(log_dir),\n ],\n add_progbar=True,\n model=model,\n verbose=2,\n epochs=3)\n self.assertLen(cb_list.callbacks, 3)\n self.assertFalse(cb_list._should_call_train_batch_hooks)\n self.assertFalse(cb_list._should_call_test_batch_hooks)\n self.assertFalse(cb_list._should_call_predict_batch_hooks)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_change_tf_functions_during_fit(self):\n\n class ChangeFunctions(keras.callbacks.Callback):\n\n def on_epoch_end(self, epochs, logs=None):\n\n def new_fn(iterator):\n raise ValueError('New function substituted successfully.')\n\n self.model.train_function = new_fn\n self.model.test_function = new_fn\n self.model.predict_function = new_fn\n\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse')\n\n x, y = np.ones((10, 10)), np.ones((10, 1))\n with self.assertRaisesRegexp(ValueError, 'New function '):\n model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()])\n with self.assertRaisesRegexp(ValueError, 'New function '):\n model.evaluate(x, y, batch_size=2)\n with self.assertRaisesRegexp(ValueError, 'New function '):\n model.predict(x, batch_size=2)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_stop_training_batch_level(self):\n\n class MyCallback(keras.callbacks.Callback):\n\n def __init__(self):\n super(MyCallback, self).__init__()\n self.batch_counter = 0\n\n def on_train_batch_end(self, batch, logs=None):\n self.batch_counter += 1\n if batch == 2:\n self.model.stop_training = True\n\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile('sgd', 'mse')\n x, y = np.ones((10, 10)), np.ones((10, 1))\n my_cb = MyCallback()\n # Will run 5 batches if `stop_training` doesn't work.\n model.fit(x, y, batch_size=2, callbacks=[my_cb])\n self.assertEqual(my_cb.batch_counter, 3)\n\n\n# A summary that was emitted during a test. Fields:\n# logdir: str. The logdir of the FileWriter to which the summary was\n# written.\n# tag: str. The name of the summary.\n_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))\n\n\nclass _SummaryFile(object):\n \"\"\"A record of summary tags and the files to which they were written.\n\n Fields `scalars`, `images`, `histograms`, and `tensors` are sets\n containing `_ObservedSummary` values.\n \"\"\"\n\n def __init__(self):\n self.scalars = set()\n self.images = set()\n self.histograms = set()\n self.tensors = set()\n self.graph_defs = []\n self.convert_from_v2_summary_proto = False\n\n\ndef list_summaries(logdir):\n \"\"\"Read all summaries under the logdir into a `_SummaryFile`.\n\n Args:\n logdir: A path to a directory that contains zero or more event\n files, either as direct children or in transitive subdirectories.\n Summaries in these events must only contain old-style scalars,\n images, and histograms. Non-summary events, like `graph_def`s, are\n ignored.\n\n Returns:\n A `_SummaryFile` object reflecting all summaries written to any\n event files in the logdir or any of its descendant directories.\n\n Raises:\n ValueError: If an event file contains an summary of unexpected kind.\n \"\"\"\n result = _SummaryFile()\n for (dirpath, _, filenames) in os.walk(logdir):\n for filename in filenames:\n if not filename.startswith('events.out.'):\n continue\n path = os.path.join(dirpath, filename)\n for event in tf.compat.v1.train.summary_iterator(path):\n if event.graph_def:\n result.graph_defs.append(event.graph_def)\n if not event.summary: # (e.g., it's a `graph_def` event)\n continue\n for value in event.summary.value:\n tag = value.tag\n # Case on the `value` rather than the summary metadata because\n # the Keras callback uses `summary_ops_v2` to emit old-style\n # summaries. See b/124535134.\n kind = value.WhichOneof('value')\n container = {\n 'simple_value': result.scalars,\n 'image': result.images,\n 'histo': result.histograms,\n 'tensor': result.tensors,\n }.get(kind)\n if container is None:\n raise ValueError(\n 'Unexpected summary kind %r in event file %s:\\n%r'\n % (kind, path, event))\n elif kind == 'tensor' and tag != 'keras':\n # Convert the tf2 summary proto to old style for type checking.\n plugin_name = value.metadata.plugin_data.plugin_name\n container = {\n 'images': result.images,\n 'histograms': result.histograms,\n 'scalars': result.scalars,\n }.get(plugin_name)\n if container is not None:\n result.convert_from_v2_summary_proto = True\n else:\n container = result.tensors\n container.add(_ObservedSummary(logdir=dirpath, tag=tag))\n return result\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass TestTensorBoardV2(keras_parameterized.TestCase):\n\n def setUp(self):\n super(TestTensorBoardV2, self).setUp()\n self.logdir = os.path.join(self.get_temp_dir(), 'tb')\n self.train_dir = os.path.join(self.logdir, 'train')\n self.validation_dir = os.path.join(self.logdir, 'validation')\n\n def _get_model(self, compile_model=True):\n layers = [\n keras.layers.Conv2D(8, (3, 3)),\n keras.layers.Flatten(),\n keras.layers.Dense(1)\n ]\n model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))\n if compile_model:\n opt = gradient_descent.SGD(learning_rate=0.001)\n model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())\n return model\n\n def test_TensorBoard_default_logdir(self):\n \"\"\"Regression test for cross-platform pathsep in default logdir.\"\"\"\n os.chdir(self.get_temp_dir())\n\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard() # no logdir specified\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n\n summary_file = list_summaries(logdir='.')\n train_dir = os.path.join('.', 'logs', 'train')\n validation_dir = os.path.join('.', 'logs', 'validation')\n self.assertEqual(\n summary_file.scalars, {\n _ObservedSummary(logdir=train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=validation_dir, tag='evaluation_loss_vs_iterations'),\n })\n\n def test_TensorBoard_basic(self):\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(self.logdir)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.scalars, {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='evaluation_loss_vs_iterations'),\n })\n\n def test_TensorBoard_across_invocations(self):\n \"\"\"Regression test for summary writer resource use-after-free.\n\n See: <https://github.com/tensorflow/tensorflow/issues/25707>\n \"\"\"\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(self.logdir)\n\n for _ in (1, 2):\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.scalars, {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='evaluation_loss_vs_iterations'),\n })\n\n def test_TensorBoard_no_spurious_event_files(self):\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(self.logdir)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n callbacks=[tb_cbk])\n\n events_file_run_basenames = set()\n for (dirpath, _, filenames) in os.walk(self.logdir):\n if any(fn.startswith('events.out.') for fn in filenames):\n events_file_run_basenames.add(os.path.basename(dirpath))\n self.assertEqual(events_file_run_basenames, {'train'})\n\n def test_TensorBoard_batch_metrics(self):\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.scalars,\n {\n _ObservedSummary(logdir=self.train_dir, tag='batch_loss'),\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='evaluation_loss_vs_iterations'),\n },\n )\n\n def test_TensorBoard_learning_rate_schedules(self):\n model = self._get_model(compile_model=False)\n opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))\n model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())\n\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n callbacks=[keras.callbacks.TensorBoard(self.logdir)])\n\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.scalars,\n {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),\n },\n )\n\n def test_TensorBoard_global_step(self):\n model = self._get_model(compile_model=False)\n opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))\n model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())\n\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n verbose=0,\n callbacks=[\n keras.callbacks.TensorBoard(\n self.logdir,\n update_freq=1,\n profile_batch=0,\n write_steps_per_second=True)\n ])\n\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.scalars,\n {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.train_dir, tag='batch_loss'),\n _ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),\n _ObservedSummary(\n logdir=self.train_dir, tag='epoch_steps_per_second'),\n _ObservedSummary(\n logdir=self.train_dir, tag='batch_steps_per_second'),\n },\n )\n\n def test_TensorBoard_weight_histograms(self):\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)\n model_type = testing_utils.get_model_type()\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.scalars,\n {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='evaluation_loss_vs_iterations'),\n },\n )\n self.assertEqual(\n self._strip_layer_names(summary_file.histograms, model_type),\n {\n _ObservedSummary(logdir=self.train_dir, tag='bias_0'),\n _ObservedSummary(logdir=self.train_dir, tag='kernel_0'),\n },\n )\n\n def test_TensorBoard_weight_images(self):\n model = self._get_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, write_images=True)\n model_type = testing_utils.get_model_type()\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.scalars,\n {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='evaluation_loss_vs_iterations'),\n },\n )\n self.assertEqual(\n self._strip_layer_names(summary_file.histograms, model_type),\n {\n _ObservedSummary(logdir=self.train_dir, tag='bias_0'),\n _ObservedSummary(logdir=self.train_dir, tag='kernel_0'),\n },\n )\n if summary_file.convert_from_v2_summary_proto:\n expected = {\n _ObservedSummary(logdir=self.train_dir, tag='bias_0'),\n _ObservedSummary(logdir=self.train_dir, tag='kernel_0'),\n }\n else:\n expected = {\n _ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),\n _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),\n _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),\n _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),\n }\n self.assertEqual(\n self._strip_layer_names(summary_file.images, model_type),\n expected\n )\n\n def test_TensorBoard_projector_callback(self):\n layers = [\n keras.layers.Embedding(10, 10, name='test_embedding'),\n keras.layers.Dense(10, activation='relu'),\n keras.layers.Dense(1, activation='sigmoid')\n ]\n model = testing_utils.get_model_from_layers(layers, input_shape=(10,))\n model.compile(\n optimizer='adam',\n loss=keras.losses.BinaryCrossentropy(from_logits=True),\n run_eagerly=testing_utils.should_run_eagerly())\n x, y = np.ones((10, 10)), np.ones((10, 10))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir,\n embeddings_freq=1,\n embeddings_metadata={'test_embedding': 'metadata.tsv'})\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n\n with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f:\n self.assertEqual(f.readlines(), [\n 'embeddings {\\n',\n (' tensor_name: '\n '\"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE\"\\n'),\n ' metadata_path: \"metadata.tsv\"\\n', '}\\n'\n ])\n\n def test_custom_summary(self):\n if not tf.executing_eagerly():\n self.skipTest('Custom summaries only supported in V2 code path.')\n\n def scalar_v2_mock(name, data, step=None):\n \"\"\"A reimplementation of the scalar plugin to avoid circular deps.\"\"\"\n metadata = tf.compat.v1.SummaryMetadata()\n # Should match value in tensorboard/plugins/scalar/metadata.py.\n metadata.plugin_data.plugin_name = 'scalars'\n with tf.summary.experimental.summary_scope(\n name, 'scalar_summary', values=[data, step]) as (tag, _):\n return tf.summary.write(\n tag=tag,\n tensor=tf.cast(data, 'float32'),\n step=step,\n metadata=metadata)\n\n class LayerWithSummary(keras.layers.Layer):\n\n def call(self, x):\n scalar_v2_mock('custom_summary', tf.reduce_sum(x))\n return x\n\n model = testing_utils.get_model_from_layers([LayerWithSummary()],\n input_shape=(5,),\n name='model')\n\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)\n x, y = np.ones((10, 5)), np.ones((10, 5))\n model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.scalars,\n {\n _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),\n _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='evaluation_loss_vs_iterations'),\n _ObservedSummary(logdir=self.train_dir, tag='batch_loss'),\n _ObservedSummary(\n logdir=self.train_dir,\n tag='model/layer_with_summary/custom_summary'),\n _ObservedSummary(\n logdir=self.validation_dir,\n tag='model/layer_with_summary/custom_summary')\n },\n )\n\n def _strip_layer_names(self, summaries, model_type):\n \"\"\"Deduplicate summary names modulo layer prefix.\n\n This removes the first slash-component of each tag name: for\n instance, \"foo/bar/baz\" becomes \"bar/baz\".\n\n Args:\n summaries: A `set` of `_ObservedSummary` values.\n model_type: The model type currently being tested.\n\n Returns:\n A new `set` of `_ObservedSummary` values with layer prefixes\n removed.\n \"\"\"\n result = set()\n for summary in summaries:\n if '/' not in summary.tag:\n raise ValueError('tag has no layer name: %r' % summary.tag)\n start_from = 2 if 'subclass' in model_type else 1\n new_tag = '/'.join(summary.tag.split('/')[start_from:])\n result.add(summary._replace(tag=new_tag))\n return result\n\n def test_TensorBoard_invalid_argument(self):\n with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'):\n keras.callbacks.TensorBoard(wwrite_images=True)\n\n def test_TensorBoard_non_blocking(self):\n model = keras.Sequential([keras.layers.Dense(1)])\n tb = keras.callbacks.TensorBoard(self.logdir)\n self.assertTrue(tb._supports_tf_logs)\n cb_list = keras.callbacks.CallbackList([tb],\n model=model,\n epochs=1,\n steps=100,\n verbose=0)\n\n tensor = tf.convert_to_tensor(1.)\n\n def mock_numpy():\n raise RuntimeError(\n 'If this error is seen, TensorBoard is causing a blocking '\n 'NumPy conversion.')\n\n with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy):\n logs = {'metric': tensor}\n\n cb_list.on_train_begin(logs)\n cb_list.on_epoch_begin(0, logs)\n cb_list.on_train_batch_begin(0, logs)\n cb_list.on_train_batch_end(0, logs)\n cb_list.on_epoch_end(0, logs)\n cb_list.on_train_end(logs)\n\n cb_list.on_test_begin(logs)\n cb_list.on_test_batch_begin(0, logs)\n cb_list.on_test_batch_end(0, logs)\n cb_list.on_test_end(logs)\n\n cb_list.on_predict_begin(logs)\n cb_list.on_predict_batch_begin(logs)\n cb_list.on_predict_batch_end(logs)\n cb_list.on_predict_end(logs)\n\n\n# Note that this test specifies model_type explicitly.\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):\n\n def setUp(self):\n super(TestTensorBoardV2NonParameterizedTest, self).setUp()\n self.logdir = os.path.join(self.get_temp_dir(), 'tb')\n self.train_dir = os.path.join(self.logdir, 'train')\n self.validation_dir = os.path.join(self.logdir, 'validation')\n\n def _get_seq_model(self):\n model = keras.models.Sequential([\n keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),\n keras.layers.Flatten(),\n keras.layers.Dense(1),\n ])\n opt = gradient_descent.SGD(learning_rate=0.001)\n model.compile(\n opt,\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n return model\n\n def _count_trace_file(self, logdir):\n profile_dir = os.path.join(logdir, 'plugins', 'profile')\n count = 0\n for (dirpath, dirnames, filenames) in os.walk(profile_dir):\n del dirpath # unused\n del dirnames # unused\n for filename in filenames:\n if filename.endswith('.trace.json.gz'):\n count += 1\n return count\n\n def fitModelAndAssertKerasModelWritten(self, model):\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(self.logdir,\n write_graph=True,\n profile_batch=0)\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=3,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.tensors,\n {\n _ObservedSummary(logdir=self.train_dir, tag='keras'),\n },\n )\n if not model.run_eagerly:\n # There should be one train graph\n self.assertLen(summary_file.graph_defs, 1)\n for graph_def in summary_file.graph_defs:\n graph_def_str = str(graph_def)\n\n # All the model layers should appear in the graphs\n for layer in model.layers:\n if 'input' not in layer.name:\n self.assertIn(layer.name, graph_def_str)\n\n def test_TensorBoard_writeSequentialModel_noInputShape(self):\n model = keras.models.Sequential([\n keras.layers.Conv2D(8, (3, 3)),\n keras.layers.Flatten(),\n keras.layers.Dense(1),\n ])\n model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())\n self.fitModelAndAssertKerasModelWritten(model)\n\n def test_TensorBoard_writeSequentialModel_withInputShape(self):\n model = keras.models.Sequential([\n keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),\n keras.layers.Flatten(),\n keras.layers.Dense(1),\n ])\n model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())\n self.fitModelAndAssertKerasModelWritten(model)\n\n def test_TensorBoard_writeModel(self):\n inputs = keras.layers.Input([10, 10, 1])\n x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)\n x = keras.layers.Flatten()(x)\n x = keras.layers.Dense(1)(x)\n model = keras.models.Model(inputs=inputs, outputs=[x])\n model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())\n self.fitModelAndAssertKerasModelWritten(model)\n\n def test_TensorBoard_autoTrace(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.tensors,\n {\n _ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),\n },\n )\n self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))\n\n def test_TensorBoard_autoTrace_outerProfiler(self):\n \"\"\"Runs a profiler session that interferes with the one from the callback.\n\n The callback will not generate a profile but execution will proceed without\n crashing due to unhandled exceptions.\n \"\"\"\n tf.profiler.experimental.start(logdir='')\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n tf.profiler.experimental.stop(save=False)\n\n self.assertEqual(\n summary_file.tensors,\n {\n _ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),\n },\n )\n self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))\n\n def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.tensors,\n {\n _ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),\n },\n )\n self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))\n\n def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=3,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.tensors,\n {\n # Trace will be logged once at the batch it stops profiling.\n _ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),\n },\n )\n self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))\n\n def test_TensorBoard_autoTrace_profileBatchRangeTwice(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=3,\n epochs=10,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n\n time.sleep(1) # Avoids the second profile over-writing the first.\n\n model.fit(\n x,\n y,\n batch_size=3,\n epochs=10,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n self.assertEqual(2, self._count_trace_file(logdir=self.train_dir))\n\n # Test case that replicates a Github issue.\n # https://github.com/tensorflow/tensorflow/issues/37543\n def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):\n tf.compat.v1.disable_eager_execution()\n inp = keras.Input((1,))\n out = keras.layers.Dense(units=1)(inp)\n model = keras.Model(inp, out)\n\n model.compile(gradient_descent.SGD(1), 'mse')\n\n logdir = os.path.join(self.get_temp_dir(), 'tb1')\n model.fit(\n np.zeros((64, 1)),\n np.zeros((64, 1)),\n batch_size=32,\n callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],\n )\n # Verifies trace exists in the first logdir.\n self.assertEqual(1, self._count_trace_file(logdir=logdir))\n logdir = os.path.join(self.get_temp_dir(), 'tb2')\n model.fit(\n np.zeros((64, 1)),\n np.zeros((64, 1)),\n batch_size=32,\n callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],\n )\n # Verifies trace exists in the second logdir.\n self.assertEqual(1, self._count_trace_file(logdir=logdir))\n\n def test_TensorBoard_autoTrace_profileBatchRange(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=4,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.tensors,\n {\n # Trace will be logged once at the batch it stops profiling.\n _ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),\n },\n )\n self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))\n\n def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):\n with self.assertRaises(ValueError):\n keras.callbacks.TensorBoard(\n self.logdir,\n histogram_freq=1,\n profile_batch='-1,3',\n write_graph=False)\n\n with self.assertRaises(ValueError):\n keras.callbacks.TensorBoard(\n self.logdir,\n histogram_freq=1,\n profile_batch='1,None',\n write_graph=False)\n\n with self.assertRaises(ValueError):\n keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)\n\n with self.assertRaises(ValueError):\n keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)\n\n def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)\n\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk])\n summary_file = list_summaries(self.logdir)\n\n # Enabled trace only on the 10000th batch, thus it should be empty.\n self.assertEmpty(summary_file.tensors)\n self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))\n\n\nclass MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase):\n\n def test_get_most_recently_modified_file_matching_pattern(self):\n file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'\n test_dir = self.get_temp_dir()\n path_pattern = os.path.join(test_dir, file_pattern)\n file_paths = [\n os.path.join(test_dir, file_name) for file_name in\n ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']\n ]\n for file_path in file_paths:\n with open(file_path, 'w') as f:\n # Ensure there are some intervals between file creation.\n time.sleep(2)\n f.write('foo bar')\n # Ensure the files have been actually written.\n self.assertEqual(\n set([\n os.path.join(test_dir, file_name)\n for file_name in os.listdir(test_dir)\n ]), set(file_paths))\n self.assertEqual(\n keras.callbacks.ModelCheckpoint(None)\n ._get_most_recently_modified_file_matching_pattern(path_pattern),\n file_paths[-1])\n\n def test_some_file_not_matching_pattern(self):\n file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'\n test_dir = self.get_temp_dir()\n path_pattern = os.path.join(test_dir, file_pattern)\n file_paths = [\n os.path.join(test_dir, file_name) for file_name in\n ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']\n ]\n for file_path in file_paths:\n with open(file_path, 'w') as f:\n # Ensure there are some intervals between file creation.\n time.sleep(2)\n f.write('foo bar')\n self.assertEqual(\n keras.callbacks.ModelCheckpoint(None)\n ._get_most_recently_modified_file_matching_pattern(path_pattern),\n file_paths[-2])\n\n def test_get_same_file_if_file_name_equals_pattern(self):\n file_name = 'f.batch02.h5'\n test_dir = self.get_temp_dir()\n file_path = os.path.join(test_dir, file_name)\n with open(file_path, 'w') as f:\n f.write('foo bar')\n self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)\n self.assertEqual(\n keras.callbacks.ModelCheckpoint(\n None)._get_most_recently_modified_file_matching_pattern(file_path),\n file_path)\n\n def test_get_none_if_file_does_not_exist(self):\n file_name = 'f.batch02.h5'\n test_dir = self.get_temp_dir()\n file_path = os.path.join(test_dir, file_name)\n self.assertLen(os.listdir(test_dir), 0)\n self.assertEqual(\n keras.callbacks.ModelCheckpoint(\n None)._get_most_recently_modified_file_matching_pattern(file_path),\n None)\n\n def test_using_checkpoint_management_latest_checkpoint(self):\n file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'\n ckpt_file_name = 'f.batchXepochY'\n test_dir = self.get_temp_dir()\n path_pattern = os.path.join(test_dir, file_pattern)\n ckpt_file_path = os.path.join(test_dir, ckpt_file_name)\n with open(ckpt_file_path, 'w') as f:\n f.write('dummy ckpt')\n tf.__internal__.train.update_checkpoint_state(\n test_dir, ckpt_file_path)\n\n file_paths = [\n os.path.join(test_dir, file_name)\n for file_name in ['f.batch03epoch02', 'f.batch02epoch02']\n ]\n for file_path in file_paths:\n with open(file_path, 'w') as f:\n f.write('foo bar')\n\n # The result returned from checkpoint_management.latest_checkpoint takes\n # priority, so even if it was written earlier, we should still return that.\n self.assertEqual(\n keras.callbacks.ModelCheckpoint(None)\n ._get_most_recently_modified_file_matching_pattern(path_pattern),\n ckpt_file_path)\n\n\nclass SummaryOpsTest(tf.test.TestCase):\n\n def tearDown(self):\n super(SummaryOpsTest, self).tearDown()\n tf.summary.trace_off()\n\n def keras_model(self, *args, **kwargs):\n logdir = self.get_temp_dir()\n writer = tf.summary.create_file_writer(logdir)\n with writer.as_default():\n keras.callbacks.keras_model_summary(*args, **kwargs)\n writer.close()\n events = events_from_logdir(logdir)\n # The first event contains no summary values. The written content goes to\n # the second event.\n return events[1]\n\n @testing_utils.run_v2_only\n def testKerasModel(self):\n model = keras.Sequential(\n [Dense(10, input_shape=(100,)),\n Activation('relu', name='my_relu')])\n event = self.keras_model(name='my_name', data=model, step=1)\n first_val = event.summary.value[0]\n self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())\n\n @testing_utils.run_v2_only\n def testKerasModel_usesDefaultStep(self):\n model = keras.Sequential(\n [Dense(10, input_shape=(100,)),\n Activation('relu', name='my_relu')])\n try:\n tf.summary.experimental.set_step(42)\n event = self.keras_model(name='my_name', data=model)\n self.assertEqual(42, event.step)\n finally:\n # Reset to default state for other tests.\n tf.summary.experimental.set_step(None)\n\n @testing_utils.run_v2_only\n def testKerasModel_subclass(self):\n\n class SimpleSubclass(keras.Model):\n\n def __init__(self):\n super(SimpleSubclass, self).__init__(name='subclass')\n self.dense = Dense(10, input_shape=(100,))\n self.activation = Activation('relu', name='my_relu')\n\n def call(self, inputs):\n x = self.dense(inputs)\n return self.activation(x)\n\n model = SimpleSubclass()\n with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:\n self.assertFalse(\n keras.callbacks.keras_model_summary(\n name='my_name', data=model, step=1))\n self.assertRegex(\n str(mock_log.call_args), 'Model failed to serialize as JSON.')\n\n @testing_utils.run_v2_only\n def testKerasModel_otherExceptions(self):\n model = keras.Sequential()\n\n with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json:\n with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:\n mock_to_json.side_effect = Exception('oops')\n self.assertFalse(\n keras.callbacks.keras_model_summary(\n name='my_name', data=model, step=1))\n self.assertRegex(\n str(mock_log.call_args),\n 'Model failed to serialize as JSON. Ignoring')\n\n\ndef events_from_file(filepath):\n \"\"\"Returns all events in a single event file.\n\n Args:\n filepath: Path to the event file.\n\n Returns:\n A list of all tf.Event protos in the event file.\n \"\"\"\n result = []\n raw_dataset = tf.data.TFRecordDataset([filepath])\n for raw_record in raw_dataset.take(10):\n event = tf.compat.v1.Event()\n event.ParseFromString(raw_record.numpy())\n result.append(event)\n return result\n\n\ndef events_from_logdir(logdir):\n \"\"\"Returns all events in the single eventfile in logdir.\n\n Args:\n logdir: The directory in which the single event file is sought.\n\n Returns:\n A list of all tf.Event protos from the single event file.\n\n Raises:\n AssertionError: If logdir does not contain exactly one file.\n \"\"\"\n assert tf.compat.v1.gfile.Exists(logdir)\n files = tf.compat.v1.gfile.ListDirectory(logdir)\n assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files\n return events_from_file(os.path.join(logdir, files[0]))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.ragged.constant",
"numpy.array"
],
[
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.compat.v1.test.mock.patch.object"
],
[
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.profiler.experimental.stop",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.compat.v1.gfile.Exists",
"numpy.where",
"tensorflow.compat.v2.summary.experimental.set_step",
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.compat.v1.SummaryMetadata",
"numpy.arange",
"tensorflow.compat.v2.compat.v1.Event",
"tensorflow.compat.v2.zeros",
"numpy.float32",
"numpy.zeros",
"tensorflow.compat.v2.compat.v1.set_random_seed",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.compat.v2.saved_model.SaveOptions",
"numpy.isnan",
"tensorflow.compat.v2.summary.trace_off",
"tensorflow.compat.v2.compat.v1.disable_eager_execution",
"tensorflow.compat.v2.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v2.profiler.experimental.start",
"tensorflow.compat.v2.compat.v1.test.mock.patch.object",
"tensorflow.compat.v2.data.Dataset.from_generator",
"numpy.random.random",
"numpy.random.seed",
"tensorflow.compat.v2.train.CheckpointOptions",
"tensorflow.compat.v2.compat.v1.train.summary_iterator",
"numpy.cos",
"numpy.ones",
"tensorflow.compat.v2.data.TFRecordDataset",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.compat.v1.gfile.ListDirectory",
"tensorflow.compat.v2.__internal__.train.update_checkpoint_state",
"numpy.isinf",
"tensorflow.compat.v2.summary.experimental.summary_scope"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ribes96/TFG
|
[
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9",
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9",
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9",
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9",
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9",
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9",
"b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9"
] |
[
"code/bib/utils/fixes.py",
"code/bib/neighbors/lof.py",
"code/notebooks/python/demo_utils/demo_utils/demo2.py",
"code/bib/feature_extraction/tests/test_text.py",
"code/bib/mixture/tests/test_gaussian_mixture.py",
"code/bib/cluster/tests/test_k_means.py",
"code/bib/decomposition/pca.py"
] |
[
"\"\"\"Compatibility fixes for older version of python, numpy and scipy\n\nIf you add content to this file, please give the version of the package\nat which the fixe is no longer needed.\n\"\"\"\n# Authors: Emmanuelle Gouillart <[email protected]>\n# Gael Varoquaux <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Lars Buitinck\n#\n# License: BSD 3 clause\n\nimport warnings\nimport os\nimport errno\n\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy\n\ntry:\n from inspect import signature\nexcept ImportError:\n from ..externals.funcsigs import signature\n\n\ndef _parse_version(version_string):\n version = []\n for x in version_string.split('.'):\n try:\n version.append(int(x))\n except ValueError:\n # x may be of the form dev-1ea1592\n version.append(x)\n return tuple(version)\n\n\neuler_gamma = getattr(np, 'euler_gamma',\n 0.577215664901532860606512090082402431)\n\nnp_version = _parse_version(np.__version__)\nsp_version = _parse_version(scipy.__version__)\n\n\n# Remove when minimum required NumPy >= 1.10\ntry:\n if (not np.allclose(np.divide(.4, 1, casting=\"unsafe\"),\n np.divide(.4, 1, casting=\"unsafe\", dtype=np.float64))\n or not np.allclose(np.divide(.4, 1), .4)):\n raise TypeError('Divide not working with dtype: '\n 'https://github.com/numpy/numpy/issues/3484')\n divide = np.divide\n\nexcept TypeError:\n # Compat for old versions of np.divide that do not provide support for\n # the dtype args\n def divide(x1, x2, out=None, dtype=None):\n out_orig = out\n if out is None:\n out = np.asarray(x1, dtype=dtype)\n if out is x1:\n out = x1.copy()\n else:\n if out is not x1:\n out[:] = x1\n if dtype is not None and out.dtype != dtype:\n out = out.astype(dtype)\n out /= x2\n if out_orig is None and np.isscalar(x1):\n out = np.asscalar(out)\n return out\n\n\ntry:\n with warnings.catch_warnings(record=True):\n # Don't raise the numpy deprecation warnings that appear in\n # 1.9, but avoid Python bug due to simplefilter('ignore')\n warnings.simplefilter('always')\n sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)\nexcept (TypeError, AttributeError):\n # in scipy < 0.14.0, sparse matrix min/max doesn't accept `axis` argument\n # the following code is taken from the scipy 0.14 codebase\n\n def _minor_reduce(X, ufunc):\n major_index = np.flatnonzero(np.diff(X.indptr))\n value = ufunc.reduceat(X.data, X.indptr[major_index])\n return major_index, value\n\n def _min_or_max_axis(X, axis, min_or_max):\n N = X.shape[axis]\n if N == 0:\n raise ValueError(\"zero-size array to reduction operation\")\n M = X.shape[1 - axis]\n mat = X.tocsc() if axis == 0 else X.tocsr()\n mat.sum_duplicates()\n major_index, value = _minor_reduce(mat, min_or_max)\n not_full = np.diff(mat.indptr)[major_index] < N\n value[not_full] = min_or_max(value[not_full], 0)\n mask = value != 0\n major_index = np.compress(mask, major_index)\n value = np.compress(mask, value)\n\n from scipy.sparse import coo_matrix\n if axis == 0:\n res = coo_matrix((value, (np.zeros(len(value)), major_index)),\n dtype=X.dtype, shape=(1, M))\n else:\n res = coo_matrix((value, (major_index, np.zeros(len(value)))),\n dtype=X.dtype, shape=(M, 1))\n return res.A.ravel()\n\n def _sparse_min_or_max(X, axis, min_or_max):\n if axis is None:\n if 0 in X.shape:\n raise ValueError(\"zero-size array to reduction operation\")\n zero = X.dtype.type(0)\n if X.nnz == 0:\n return zero\n m = min_or_max.reduce(X.data.ravel())\n if X.nnz != np.product(X.shape):\n m = min_or_max(zero, m)\n return m\n if axis < 0:\n axis += 2\n if (axis == 0) or (axis == 1):\n return _min_or_max_axis(X, axis, min_or_max)\n else:\n raise ValueError(\"invalid axis, use 0 for rows, or 1 for columns\")\n\n def sparse_min_max(X, axis):\n return (_sparse_min_or_max(X, axis, np.minimum),\n _sparse_min_or_max(X, axis, np.maximum))\n\nelse:\n def sparse_min_max(X, axis):\n return (X.min(axis=axis).toarray().ravel(),\n X.max(axis=axis).toarray().ravel())\n\n\nif sp_version < (0, 15):\n # Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142\n from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr\nelse:\n from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa\n\n\ntry: # SciPy >= 0.19\n from scipy.special import comb, logsumexp\nexcept ImportError:\n from scipy.misc import comb, logsumexp # noqa\n\n\nif sp_version >= (0, 19):\n def _argmax(arr_or_spmatrix, axis=None):\n return arr_or_spmatrix.argmax(axis=axis)\nelse:\n # Backport of argmax functionality from scipy 0.19.1, can be removed\n # once support for scipy 0.18 and below is dropped\n\n def _find_missing_index(ind, n):\n for k, a in enumerate(ind):\n if k != a:\n return k\n\n k += 1\n if k < n:\n return k\n else:\n return -1\n\n def _arg_min_or_max_axis(self, axis, op, compare):\n if self.shape[axis] == 0:\n raise ValueError(\"Can't apply the operation along a zero-sized \"\n \"dimension.\")\n\n if axis < 0:\n axis += 2\n\n zero = self.dtype.type(0)\n\n mat = self.tocsc() if axis == 0 else self.tocsr()\n mat.sum_duplicates()\n\n ret_size, line_size = mat._swap(mat.shape)\n ret = np.zeros(ret_size, dtype=int)\n\n nz_lines, = np.nonzero(np.diff(mat.indptr))\n for i in nz_lines:\n p, q = mat.indptr[i:i + 2]\n data = mat.data[p:q]\n indices = mat.indices[p:q]\n am = op(data)\n m = data[am]\n if compare(m, zero) or q - p == line_size:\n ret[i] = indices[am]\n else:\n zero_ind = _find_missing_index(indices, line_size)\n if m == zero:\n ret[i] = min(am, zero_ind)\n else:\n ret[i] = zero_ind\n\n if axis == 1:\n ret = ret.reshape(-1, 1)\n\n return np.asmatrix(ret)\n\n def _arg_min_or_max(self, axis, out, op, compare):\n if out is not None:\n raise ValueError(\"Sparse matrices do not support \"\n \"an 'out' parameter.\")\n\n # validateaxis(axis)\n\n if axis is None:\n if 0 in self.shape:\n raise ValueError(\"Can't apply the operation to \"\n \"an empty matrix.\")\n\n if self.nnz == 0:\n return 0\n else:\n zero = self.dtype.type(0)\n mat = self.tocoo()\n mat.sum_duplicates()\n am = op(mat.data)\n m = mat.data[am]\n\n if compare(m, zero):\n return mat.row[am] * mat.shape[1] + mat.col[am]\n else:\n size = np.product(mat.shape)\n if size == mat.nnz:\n return am\n else:\n ind = mat.row * mat.shape[1] + mat.col\n zero_ind = _find_missing_index(ind, size)\n if m == zero:\n return min(zero_ind, am)\n else:\n return zero_ind\n\n return _arg_min_or_max_axis(self, axis, op, compare)\n\n def _sparse_argmax(self, axis=None, out=None):\n return _arg_min_or_max(self, axis, out, np.argmax, np.greater)\n\n def _argmax(arr_or_matrix, axis=None):\n if sp.issparse(arr_or_matrix):\n return _sparse_argmax(arr_or_matrix, axis=axis)\n else:\n return arr_or_matrix.argmax(axis=axis)\n\n\ndef parallel_helper(obj, methodname, *args, **kwargs):\n \"\"\"Workaround for Python 2 limitations of pickling instance methods\"\"\"\n return getattr(obj, methodname)(*args, **kwargs)\n\n\nif 'exist_ok' in signature(os.makedirs).parameters:\n makedirs = os.makedirs\nelse:\n def makedirs(name, mode=0o777, exist_ok=False):\n \"\"\"makedirs(name [, mode=0o777][, exist_ok=False])\n\n Super-mkdir; create a leaf directory and all intermediate ones. Works\n like mkdir, except that any intermediate path segment (not just the\n rightmost) will be created if it does not exist. If the target\n directory already exists, raise an OSError if exist_ok is False.\n Otherwise no exception is raised. This is recursive.\n\n \"\"\"\n\n try:\n os.makedirs(name, mode=mode)\n except OSError as e:\n if (not exist_ok or e.errno != errno.EEXIST\n or not os.path.isdir(name)):\n raise\n\n\nif np_version < (1, 12):\n class MaskedArray(np.ma.MaskedArray):\n # Before numpy 1.12, np.ma.MaskedArray object is not picklable\n # This fix is needed to make our model_selection.GridSearchCV\n # picklable as the ``cv_results_`` param uses MaskedArray\n def __getstate__(self):\n \"\"\"Return the internal state of the masked array, for pickling\n purposes.\n\n \"\"\"\n cf = 'CF'[self.flags.fnc]\n data_state = super(np.ma.MaskedArray, self).__reduce__()[2]\n return data_state + (np.ma.getmaskarray(self).tostring(cf),\n self._fill_value)\nelse:\n from numpy.ma import MaskedArray # noqa\n\n\nif np_version < (1, 11):\n def nanpercentile(a, q):\n \"\"\"\n Compute the qth percentile of the data along the specified axis,\n while ignoring nan values.\n\n Returns the qth percentile(s) of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n q : float in range of [0,100] (or sequence of floats)\n Percentile to compute, which must be between 0 and 100\n inclusive.\n\n Returns\n -------\n percentile : scalar or ndarray\n If `q` is a single percentile and `axis=None`, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the percentiles. The other axes are\n the axes that remain after the reduction of `a`. If the input\n contains integers or floats smaller than ``float64``, the output\n data-type is ``float64``. Otherwise, the output data-type is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n \"\"\"\n data = np.compress(~np.isnan(a), a)\n if data.size:\n return np.percentile(data, q)\n else:\n size_q = 1 if np.isscalar(q) else len(q)\n return np.array([np.nan] * size_q)\nelse:\n from numpy import nanpercentile # noqa\n",
"# Authors: Nicolas Goix <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport warnings\nfrom scipy.stats import scoreatpercentile\n\nfrom .base import NeighborsBase\nfrom .base import KNeighborsMixin\nfrom .base import UnsupervisedMixin\nfrom ..base import OutlierMixin\n\nfrom ..utils.validation import check_is_fitted\nfrom ..utils import check_array\n\n__all__ = [\"LocalOutlierFactor\"]\n\n\nclass LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin,\n OutlierMixin):\n \"\"\"Unsupervised Outlier Detection using Local Outlier Factor (LOF)\n\n The anomaly score of each sample is called Local Outlier Factor.\n It measures the local deviation of density of a given sample with\n respect to its neighbors.\n It is local in that the anomaly score depends on how isolated the object\n is with respect to the surrounding neighborhood.\n More precisely, locality is given by k-nearest neighbors, whose distance\n is used to estimate the local density.\n By comparing the local density of a sample to the local densities of\n its neighbors, one can identify samples that have a substantially lower\n density than their neighbors. These are considered outliers.\n\n Parameters\n ----------\n n_neighbors : int, optional (default=20)\n Number of neighbors to use by default for :meth:`kneighbors` queries.\n If n_neighbors is larger than the number of samples provided,\n all samples will be used.\n\n algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional\n Algorithm used to compute the nearest neighbors:\n\n - 'ball_tree' will use :class:`BallTree`\n - 'kd_tree' will use :class:`KDTree`\n - 'brute' will use a brute-force search.\n - 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\n Note: fitting on sparse input will override the setting of\n this parameter, using brute force.\n\n leaf_size : int, optional (default=30)\n Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can\n affect the speed of the construction and query, as well as the memory\n required to store the tree. The optimal value depends on the\n nature of the problem.\n\n metric : string or callable, default 'minkowski'\n metric used for the distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If 'precomputed', the training input X is expected to be a distance\n matrix.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics:\n http://docs.scipy.org/doc/scipy/reference/spatial.distance.html\n\n p : integer, optional (default=2)\n Parameter for the Minkowski metric from\n :func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this\n is equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, optional (default=None)\n Additional keyword arguments for the metric function.\n\n contamination : float in (0., 0.5), optional (default=0.1)\n The amount of contamination of the data set, i.e. the proportion\n of outliers in the data set. When fitting this is used to define the\n threshold on the decision function. If \"auto\", the decision function\n threshold is determined as in the original paper.\n\n n_jobs : int, optional (default=1)\n The number of parallel jobs to run for neighbors search.\n If ``-1``, then the number of jobs is set to the number of CPU cores.\n Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.\n\n\n Attributes\n ----------\n negative_outlier_factor_ : numpy array, shape (n_samples,)\n The opposite LOF of the training samples. The higher, the more normal.\n Inliers tend to have a LOF score close to 1 (negative_outlier_factor_\n close to -1), while outliers tend to have a larger LOF score.\n\n The local outlier factor (LOF) of a sample captures its\n supposed 'degree of abnormality'.\n It is the average of the ratio of the local reachability density of\n a sample and those of its k-nearest neighbors.\n\n n_neighbors_ : integer\n The actual number of neighbors used for :meth:`kneighbors` queries.\n\n offset_ : float\n Offset used to obtain binary labels from the raw scores.\n Observations having a negative_outlier_factor smaller than offset_ are\n detected as abnormal.\n The offset is set to -1.5 (inliers score around -1), except when a\n contamination parameter different than \"auto\" is provided. In that\n case, the offset is defined in such a way we obtain the expected\n number of outliers in training.\n\n References\n ----------\n .. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).\n LOF: identifying density-based local outliers. In ACM sigmod record.\n \"\"\"\n def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,\n metric='minkowski', p=2, metric_params=None,\n contamination=\"legacy\", n_jobs=1):\n super(LocalOutlierFactor, self).__init__(\n n_neighbors=n_neighbors,\n algorithm=algorithm,\n leaf_size=leaf_size, metric=metric, p=p,\n metric_params=metric_params, n_jobs=n_jobs)\n\n if contamination == \"legacy\":\n warnings.warn('default contamination parameter 0.1 will change '\n 'in version 0.22 to \"auto\". This will change the '\n 'predict method behavior.',\n DeprecationWarning)\n self.contamination = contamination\n\n def fit_predict(self, X, y=None):\n \"\"\"\"Fits the model to the training set X and returns the labels\n (1 inlier, -1 outlier) on the training set according to the LOF score\n and the contamination parameter.\n\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features), default=None\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. to the training samples.\n\n Returns\n -------\n is_inlier : array, shape (n_samples,)\n Returns -1 for anomalies/outliers and 1 for inliers.\n \"\"\"\n\n return self.fit(X)._predict()\n\n def fit(self, X, y=None):\n \"\"\"Fit the model using X as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, BallTree, KDTree}\n Training data. If array or matrix, shape [n_samples, n_features],\n or [n_samples, n_samples] if metric='precomputed'.\n\n Returns\n -------\n self : object\n \"\"\"\n if self.contamination not in [\"auto\", \"legacy\"]: # rm legacy in 0.22\n if not(0. < self.contamination <= .5):\n raise ValueError(\"contamination must be in (0, 0.5], \"\n \"got: %f\" % self.contamination)\n\n super(LocalOutlierFactor, self).fit(X)\n\n n_samples = self._fit_X.shape[0]\n if self.n_neighbors > n_samples:\n warnings.warn(\"n_neighbors (%s) is greater than the \"\n \"total number of samples (%s). n_neighbors \"\n \"will be set to (n_samples - 1) for estimation.\"\n % (self.n_neighbors, n_samples))\n self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))\n\n self._distances_fit_X_, _neighbors_indices_fit_X_ = (\n self.kneighbors(None, n_neighbors=self.n_neighbors_))\n\n self._lrd = self._local_reachability_density(\n self._distances_fit_X_, _neighbors_indices_fit_X_)\n\n # Compute lof score over training samples to define offset_:\n lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /\n self._lrd[:, np.newaxis])\n\n self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)\n\n if self.contamination == \"auto\":\n # inliers score around -1 (the higher, the less abnormal).\n self.offset_ = -1.5\n elif self.contamination == \"legacy\": # to rm in 0.22\n self.offset_ = scoreatpercentile(\n self.negative_outlier_factor_, 100. * 0.1)\n else:\n self.offset_ = scoreatpercentile(\n self.negative_outlier_factor_, 100. * self.contamination)\n\n return self\n\n def _predict(self, X=None):\n \"\"\"Predict the labels (1 inlier, -1 outlier) of X according to LOF.\n\n If X is None, returns the same as fit_predict(X_train).\n This method allows to generalize prediction to new observations (not\n in the training set). As LOF originally does not deal with new data,\n this method is kept private. In particular, fit(X)._predict(X) is not\n the same as fit_predict(X).\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features), default=None\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. to the training samples. If None, makes prediction on the\n training data without considering them as their own neighbors.\n\n Returns\n -------\n is_inlier : array, shape (n_samples,)\n Returns -1 for anomalies/outliers and +1 for inliers.\n \"\"\"\n check_is_fitted(self, [\"offset_\", \"negative_outlier_factor_\",\n \"n_neighbors_\", \"_distances_fit_X_\"])\n\n if X is not None:\n X = check_array(X, accept_sparse='csr')\n is_inlier = np.ones(X.shape[0], dtype=int)\n is_inlier[self._decision_function(X) < 0] = -1\n else:\n is_inlier = np.ones(self._fit_X.shape[0], dtype=int)\n is_inlier[self.negative_outlier_factor_ < self.offset_] = -1\n\n return is_inlier\n\n def _decision_function(self, X):\n \"\"\"Shifted opposite of the Local Outlier Factor of X\n\n Bigger is better, i.e. large values correspond to inliers.\n\n The shift offset allows a zero threshold for being an outlier.\n The argument X is supposed to contain *new data*: if X contains a\n point from training, it consider the later in its own neighborhood.\n Also, the samples in X are not considered in the neighborhood of any\n point.\n This method is kept private as the predict method is.\n The decision function on training data is available by considering the\n the negative_outlier_factor_ attribute.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. the training samples.\n\n Returns\n -------\n shifted_opposite_lof_scores : array, shape (n_samples,)\n The shifted opposite of the Local Outlier Factor of each input\n samples. The lower, the more abnormal. Negative scores represent\n outliers, positive scores represent inliers.\n \"\"\"\n return self._score_samples(X) - self.offset_\n\n def _score_samples(self, X):\n \"\"\"Opposite of the Local Outlier Factor of X (as bigger is\n better, i.e. large values correspond to inliers).\n\n The argument X is supposed to contain *new data*: if X contains a\n point from training, it consider the later in its own neighborhood.\n Also, the samples in X are not considered in the neighborhood of any\n point.\n This method is kept private as the predict method is.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. the training samples.\n\n Returns\n -------\n opposite_lof_scores : array, shape (n_samples,)\n The opposite of the Local Outlier Factor of each input samples.\n The lower, the more abnormal.\n \"\"\"\n check_is_fitted(self, [\"offset_\", \"negative_outlier_factor_\",\n \"_distances_fit_X_\"])\n X = check_array(X, accept_sparse='csr')\n\n distances_X, neighbors_indices_X = (\n self.kneighbors(X, n_neighbors=self.n_neighbors_))\n X_lrd = self._local_reachability_density(distances_X,\n neighbors_indices_X)\n\n lrd_ratios_array = (self._lrd[neighbors_indices_X] /\n X_lrd[:, np.newaxis])\n\n # as bigger is better:\n return -np.mean(lrd_ratios_array, axis=1)\n\n def _local_reachability_density(self, distances_X, neighbors_indices):\n \"\"\"The local reachability density (LRD)\n\n The LRD of a sample is the inverse of the average reachability\n distance of its k-nearest neighbors.\n\n Parameters\n ----------\n distances_X : array, shape (n_query, self.n_neighbors)\n Distances to the neighbors (in the training samples `self._fit_X`)\n of each query point to compute the LRD.\n\n neighbors_indices : array, shape (n_query, self.n_neighbors)\n Neighbors indices (of each query point) among training samples\n self._fit_X.\n\n Returns\n -------\n local_reachability_density : array, shape (n_samples,)\n The local reachability density of each sample.\n \"\"\"\n dist_k = self._distances_fit_X_[neighbors_indices,\n self.n_neighbors_ - 1]\n reach_dist_array = np.maximum(distances_X, dist_k)\n\n # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:\n return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)\n",
"from demo_utils.generic_demo import Demo\nfrom demo_utils.general import SUPPORTED_DATASETS\n# import ipywidgets as widgets\nfrom demo_utils.learning import get_model\nfrom demo_utils.general import get_data\nfrom demo_utils.learning import get_sampling_model_scores\nfrom demo_utils.learning import get_non_sampling_model_scores\nimport numpy as np\n\nfrom ipywidgets import Button\n# from ipywidgets import Dropdown\n# from ipywidgets import IntRangeSlider\nfrom ipywidgets import VBox\n# from ipywidgets import IntSlider\n# from ipywidgets import Checkbox\n\n# TODO poner el widget de orden de PCA\n\n\nclass Demo2(Demo):\n desc = '''# Mismo modelo, distintos datasets\n Prueba un modelo determinado con todos los datasets disponibles.\n Permite ver si un modelo presenta comportamientos distintos dependiendo\n del tipo de problema al que se enfrenta\n '''\n\n def __init__(self):\n self.all_datasets_names = SUPPORTED_DATASETS\n self.run_bt = Button(description='Demo2', button_style='info')\n\n self.model_selector = self.get_default_model_selector()\n self.model_selector.description = 'Model'\n self.sampler_selector = self.get_default_sampler_selector()\n self.sampler_selector.description = 'Sampler'\n self.features_selector = self.get_default_features_selector()\n self.features_selector.description = 'Features'\n self.box_type_selector = self.get_default_box_type_selector()\n self.box_type_selector.description = 'Box type'\n self.n_estimators_selector = self.get_default_n_estimators_selector()\n self.n_estimators_selector.description = 'N. estim.'\n self.pca_checkbox = self.get_default_pca_checkbox()\n self.pca_checkbox.description = 'Perform PCA?'\n self.g = VBox([\n self.model_selector,\n self.sampler_selector,\n self.features_selector,\n self.box_type_selector,\n self.n_estimators_selector,\n self.pca_checkbox,\n ])\n self.gui = VBox([self.g, self.run_bt])\n self.box_type_selector.observe(self.box_type_changed, 'value')\n self.sampler_selector.observe(self.sampler_changed, 'value')\n self.box_type_changed()\n self.sampler_changed()\n super().__init__()\n\n def gui_to_data(self):\n '''\n Just reading from self.gui, return a dictionary with keys and values\n needed to run the demo. Keys are the arguments of run_demo\n '''\n model_name = self.model_selector.value\n sampler_name = self.sampler_selector.value\n if sampler_name == \"None\":\n sampler_name = 'identity'\n box_type = self.box_type_selector.value\n if box_type == \"None\":\n box_type = 'none'\n n_estim = self.n_estimators_selector.value\n if box_type == 'none':\n n_estim = None\n pca_bool = self.pca_checkbox.value\n features_range = self.features_selector.value\n if sampler_name == 'identity':\n features_range = None\n\n model_info = {\n 'model_name': model_name,\n 'sampler_name': sampler_name,\n 'pca_bool': pca_bool,\n 'n_estim': n_estim,\n 'box_type': box_type,\n }\n\n # clf = get_model(model_name=model_name,\n # sampler_name=sampler_name,\n # pca_bool=pca,\n # n_estim=n_estimators,\n # box_type=box_type)\n\n ret_dict = {\n # 'model': clf,\n 'model_info': model_info,\n 'features_range': features_range,\n }\n return ret_dict\n\n def run_demo(self, model_info, features_range):\n '''\n Parameters\n ----------\n model_info : dict\n Required keys: ['model_name', 'sampler_name', 'pca_bool',\n 'n_estim', 'box_type']\n features_range : list or None\n The list is the range, so len(features) == 2, and increasing order\n is assumed\n '''\n info_run = '''\n- Model: **{0}**\n- Sampler: **{1}**\n- Bagging: **{2}**\n- N. estim.: **{3}**\n- PCA: **{4}**\n '''\n self.run_specific = info_run.format(model_info['model_name'],\n model_info['sampler_name'],\n model_info['box_type'],\n model_info['n_estim'],\n model_info['pca_bool'])\n\n model = get_model(**model_info)\n if features_range is None:\n return self.run_demo_non_sampling(model)\n else:\n # a list of int is assumed\n n_splits_features = 30\n features = list(range(*features_range))\n if (features_range[1] - features_range[0]) > n_splits_features:\n features = np.linspace(*features_range, num=n_splits_features,\n dtype=np.int).tolist()\n return self.run_demo_with_sampling(model, features)\n\n def run_demo_with_sampling(self, model, features):\n train_dicts = []\n test_dicts = []\n for dts_name in self.all_datasets_names:\n dataset = get_data(dts_name)\n train_score, test_score = get_sampling_model_scores(model,\n dataset,\n features)\n train_score['label'] = dts_name\n test_score['label'] = dts_name\n\n train_dicts.append(train_score)\n test_dicts.append(test_score)\n\n return train_dicts, test_dicts\n\n def run_demo_non_sampling(self, model):\n # run_demo llamará a esta o a la otra dependiendo del tipo.\n '''\n Parameters\n ----------\n model : abstract model\n Something on which you can call fit and score\n\n Returns\n -------\n (train_scores, test_scores) : tuple of list of dict\n dict with keys ['absi', 'ord', 'labels']\n '''\n train_scores = []\n test_scores = []\n for dts_name in self.all_datasets_names:\n dataset = get_data(dts_name)\n train_score, test_score = get_non_sampling_model_scores(model,\n dataset)\n train_scores.append(train_score)\n test_scores.append(test_score)\n train_dicts = []\n test_dicts = []\n for i, dts_name in enumerate(self.all_datasets_names):\n train_d = {\n 'absi': [0, 1],\n 'ord': [train_scores[i], train_scores[i]],\n 'label': dts_name,\n }\n test_d = {\n 'absi': [0, 1],\n 'ord': [test_scores[i], test_scores[i]],\n 'label': dts_name,\n }\n train_dicts.append(train_d)\n test_dicts.append(test_d)\n\n return train_dicts, test_dicts\n\n def box_type_changed(self, *args):\n '''\n Desactiva n_estim_selector cuando no se hará bagging\n El parámetro *args es solo por ipywidgets, no me hace falta\n '''\n if self.box_type_selector.value == 'None':\n self.n_estimators_selector.disabled = True\n self.n_estimators_selector.layout.visibility = 'hidden'\n else:\n self.n_estimators_selector.disabled = False\n self.n_estimators_selector.layout.visibility = 'visible'\n\n def sampler_changed(self, *args):\n '''\n Desactiva features_selector cuando no se hará bagging\n El parámetro *args es solo por ipywidgets, no me hace falta\n '''\n if self.sampler_selector.value == 'None':\n self.features_selector.disabled = True\n self.features_selector.layout.visibility = 'hidden'\n else:\n self.features_selector.disabled = False\n self.features_selector.layout.visibility = 'visible'\n",
"from __future__ import unicode_literals\nimport warnings\n\nfrom sklearn.feature_extraction.text import strip_tags\nfrom sklearn.feature_extraction.text import strip_accents_unicode\nfrom sklearn.feature_extraction.text import strip_accents_ascii\n\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.base import clone\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom sklearn.utils.testing import (assert_equal, assert_false, assert_true,\n assert_not_equal, assert_almost_equal,\n assert_in, assert_less, assert_greater,\n assert_warns_message, assert_raise_message,\n clean_warning_registry, ignore_warnings,\n SkipTest, assert_raises)\n\nfrom collections import defaultdict, Mapping\nfrom functools import partial\nimport pickle\nfrom io import StringIO\n\nimport pytest\n\nJUNK_FOOD_DOCS = (\n \"the pizza pizza beer copyright\",\n \"the pizza burger beer copyright\",\n \"the the pizza beer beer copyright\",\n \"the burger beer beer copyright\",\n \"the coke burger coke copyright\",\n \"the coke burger burger\",\n)\n\nNOTJUNK_FOOD_DOCS = (\n \"the salad celeri copyright\",\n \"the salad salad sparkling water copyright\",\n \"the the celeri celeri copyright\",\n \"the tomato tomato salad water\",\n \"the tomato salad water copyright\",\n)\n\nALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n\ndef uppercase(s):\n return strip_accents_unicode(s).upper()\n\n\ndef strip_eacute(s):\n return s.replace('\\xe9', 'e')\n\n\ndef split_tokenize(s):\n return s.split()\n\n\ndef lazy_analyze(s):\n return ['the_ultimate_feature']\n\n\ndef test_strip_accents():\n # check some classical latin accentuated symbols\n a = '\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe7\\xe8\\xe9\\xea\\xeb'\n expected = 'aaaaaaceeee'\n assert_equal(strip_accents_unicode(a), expected)\n\n a = '\\xec\\xed\\xee\\xef\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf9\\xfa\\xfb\\xfc\\xfd'\n expected = 'iiiinooooouuuuy'\n assert_equal(strip_accents_unicode(a), expected)\n\n # check some arabic\n a = '\\u0625' # halef with a hamza below\n expected = '\\u0627' # simple halef\n assert_equal(strip_accents_unicode(a), expected)\n\n # mix letters accentuated and not\n a = \"this is \\xe0 test\"\n expected = 'this is a test'\n assert_equal(strip_accents_unicode(a), expected)\n\n\ndef test_to_ascii():\n # check some classical latin accentuated symbols\n a = '\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe7\\xe8\\xe9\\xea\\xeb'\n expected = 'aaaaaaceeee'\n assert_equal(strip_accents_ascii(a), expected)\n\n a = '\\xec\\xed\\xee\\xef\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf9\\xfa\\xfb\\xfc\\xfd'\n expected = 'iiiinooooouuuuy'\n assert_equal(strip_accents_ascii(a), expected)\n\n # check some arabic\n a = '\\u0625' # halef with a hamza below\n expected = '' # halef has no direct ascii match\n assert_equal(strip_accents_ascii(a), expected)\n\n # mix letters accentuated and not\n a = \"this is \\xe0 test\"\n expected = 'this is a test'\n assert_equal(strip_accents_ascii(a), expected)\n\n\ndef test_word_analyzer_unigrams():\n for Vectorizer in (CountVectorizer, HashingVectorizer):\n wa = Vectorizer(strip_accents='ascii').build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \"c'\\xe9tait pas tr\\xeas bon.\")\n expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',\n 'etait', 'pas', 'tres', 'bon']\n assert_equal(wa(text), expected)\n\n text = \"This is a test, really.\\n\\n I met Harry yesterday.\"\n expected = ['this', 'is', 'test', 'really', 'met', 'harry',\n 'yesterday']\n assert_equal(wa(text), expected)\n\n wa = Vectorizer(input='file').build_analyzer()\n text = StringIO(\"This is a test with a file-like object!\")\n expected = ['this', 'is', 'test', 'with', 'file', 'like',\n 'object']\n assert_equal(wa(text), expected)\n\n # with custom preprocessor\n wa = Vectorizer(preprocessor=uppercase).build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \" c'\\xe9tait pas tr\\xeas bon.\")\n expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',\n 'ETAIT', 'PAS', 'TRES', 'BON']\n assert_equal(wa(text), expected)\n\n # with custom tokenizer\n wa = Vectorizer(tokenizer=split_tokenize,\n strip_accents='ascii').build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \"c'\\xe9tait pas tr\\xeas bon.\")\n expected = [\"j'ai\", 'mange', 'du', 'kangourou', 'ce', 'midi,',\n \"c'etait\", 'pas', 'tres', 'bon.']\n assert_equal(wa(text), expected)\n\n\ndef test_word_analyzer_unigrams_and_bigrams():\n wa = CountVectorizer(analyzer=\"word\", strip_accents='unicode',\n ngram_range=(1, 2)).build_analyzer()\n\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon.\"\n expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',\n 'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',\n 'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',\n 'etait pas', 'pas tres', 'tres bon']\n assert_equal(wa(text), expected)\n\n\ndef test_unicode_decode_error():\n # decode_error default to strict, so this should fail\n # First, encode (as bytes) a unicode string.\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon.\"\n text_bytes = text.encode('utf-8')\n\n # Then let the Analyzer try to decode it as ascii. It should fail,\n # because we have given it an incorrect encoding.\n wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()\n assert_raises(UnicodeDecodeError, wa, text_bytes)\n\n ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),\n encoding='ascii').build_analyzer()\n assert_raises(UnicodeDecodeError, ca, text_bytes)\n\n\ndef test_char_ngram_analyzer():\n cnga = CountVectorizer(analyzer='char', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon\"\n expected = [\"j'a\", \"'ai\", 'ai ', 'i m', ' ma']\n assert_equal(cnga(text)[:5], expected)\n expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']\n assert_equal(cnga(text)[-5:], expected)\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = ['thi', 'his', 'is ', 's i', ' is']\n assert_equal(cnga(text)[:5], expected)\n\n expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']\n assert_equal(cnga(text)[-5:], expected)\n\n cnga = CountVectorizer(input='file', analyzer='char',\n ngram_range=(3, 6)).build_analyzer()\n text = StringIO(\"This is a test with a file-like object!\")\n expected = ['thi', 'his', 'is ', 's i', ' is']\n assert_equal(cnga(text)[:5], expected)\n\n\ndef test_char_wb_ngram_analyzer():\n cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = [' th', 'thi', 'his', 'is ', ' thi']\n assert_equal(cnga(text)[:5], expected)\n\n expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']\n assert_equal(cnga(text)[-5:], expected)\n\n cnga = CountVectorizer(input='file', analyzer='char_wb',\n ngram_range=(3, 6)).build_analyzer()\n text = StringIO(\"A test with a file-like object!\")\n expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']\n assert_equal(cnga(text)[:6], expected)\n\n\ndef test_word_ngram_analyzer():\n cnga = CountVectorizer(analyzer='word', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = ['this is test', 'is test really', 'test really met']\n assert_equal(cnga(text)[:3], expected)\n\n expected = ['test really met harry yesterday',\n 'this is test really met harry',\n 'is test really met harry yesterday']\n assert_equal(cnga(text)[-3:], expected)\n\n cnga_file = CountVectorizer(input='file', analyzer='word',\n ngram_range=(3, 6)).build_analyzer()\n file = StringIO(text)\n assert_equal(cnga_file(file), cnga(text))\n\n\ndef test_countvectorizer_custom_vocabulary():\n vocab = {\"pizza\": 0, \"beer\": 1}\n terms = set(vocab.keys())\n\n # Try a few of the supported types.\n for typ in [dict, list, iter, partial(defaultdict, int)]:\n v = typ(vocab)\n vect = CountVectorizer(vocabulary=v)\n vect.fit(JUNK_FOOD_DOCS)\n if isinstance(v, Mapping):\n assert_equal(vect.vocabulary_, vocab)\n else:\n assert_equal(set(vect.vocabulary_), terms)\n X = vect.transform(JUNK_FOOD_DOCS)\n assert_equal(X.shape[1], len(terms))\n\n\ndef test_countvectorizer_custom_vocabulary_pipeline():\n what_we_like = [\"pizza\", \"beer\"]\n pipe = Pipeline([\n ('count', CountVectorizer(vocabulary=what_we_like)),\n ('tfidf', TfidfTransformer())])\n X = pipe.fit_transform(ALL_FOOD_DOCS)\n assert_equal(set(pipe.named_steps['count'].vocabulary_),\n set(what_we_like))\n assert_equal(X.shape[1], len(what_we_like))\n\n\ndef test_countvectorizer_custom_vocabulary_repeated_indices():\n vocab = {\"pizza\": 0, \"beer\": 0}\n try:\n CountVectorizer(vocabulary=vocab)\n except ValueError as e:\n assert_in(\"vocabulary contains repeated indices\", str(e).lower())\n\n\ndef test_countvectorizer_custom_vocabulary_gap_index():\n vocab = {\"pizza\": 1, \"beer\": 2}\n try:\n CountVectorizer(vocabulary=vocab)\n except ValueError as e:\n assert_in(\"doesn't contain index\", str(e).lower())\n\n\ndef test_countvectorizer_stop_words():\n cv = CountVectorizer()\n cv.set_params(stop_words='english')\n assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)\n cv.set_params(stop_words='_bad_str_stop_')\n assert_raises(ValueError, cv.get_stop_words)\n cv.set_params(stop_words='_bad_unicode_stop_')\n assert_raises(ValueError, cv.get_stop_words)\n stoplist = ['some', 'other', 'words']\n cv.set_params(stop_words=stoplist)\n assert_equal(cv.get_stop_words(), set(stoplist))\n\n\ndef test_countvectorizer_empty_vocabulary():\n try:\n vect = CountVectorizer(vocabulary=[])\n vect.fit([\"foo\"])\n assert False, \"we shouldn't get here\"\n except ValueError as e:\n assert_in(\"empty vocabulary\", str(e).lower())\n\n try:\n v = CountVectorizer(max_df=1.0, stop_words=\"english\")\n # fit on stopwords only\n v.fit([\"to be or not to be\", \"and me too\", \"and so do you\"])\n assert False, \"we shouldn't get here\"\n except ValueError as e:\n assert_in(\"empty vocabulary\", str(e).lower())\n\n\ndef test_fit_countvectorizer_twice():\n cv = CountVectorizer()\n X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])\n X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])\n assert_not_equal(X1.shape[1], X2.shape[1])\n\n\ndef test_tf_idf_smoothing():\n X = [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=True, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert_true((tfidf >= 0).all())\n\n # check normalization\n assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])\n\n # this is robust to features with only zeros\n X = [[1, 1, 0],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=True, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert_true((tfidf >= 0).all())\n\n\ndef test_tfidf_no_smoothing():\n X = [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=False, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert_true((tfidf >= 0).all())\n\n # check normalization\n assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])\n\n # the lack of smoothing make IDF fragile in the presence of feature with\n # only zeros\n X = [[1, 1, 0],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=False, norm='l2')\n\n clean_warning_registry()\n with warnings.catch_warnings(record=True) as w:\n 1. / np.array([0.])\n numpy_provides_div0_warning = len(w) == 1\n\n in_warning_message = 'divide by zero'\n tfidf = assert_warns_message(RuntimeWarning, in_warning_message,\n tr.fit_transform, X).toarray()\n if not numpy_provides_div0_warning:\n raise SkipTest(\"Numpy does not provide div 0 warnings.\")\n\n\ndef test_sublinear_tf():\n X = [[1], [2], [3]]\n tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)\n tfidf = tr.fit_transform(X).toarray()\n assert_equal(tfidf[0], 1)\n assert_greater(tfidf[1], tfidf[0])\n assert_greater(tfidf[2], tfidf[1])\n assert_less(tfidf[1], 2)\n assert_less(tfidf[2], 3)\n\n\ndef test_vectorizer():\n # raw documents as an iterator\n train_data = iter(ALL_FOOD_DOCS[:-1])\n test_data = [ALL_FOOD_DOCS[-1]]\n n_train = len(ALL_FOOD_DOCS) - 1\n\n # test without vocabulary\n v1 = CountVectorizer(max_df=0.5)\n counts_train = v1.fit_transform(train_data)\n if hasattr(counts_train, 'tocsr'):\n counts_train = counts_train.tocsr()\n assert_equal(counts_train[0, v1.vocabulary_[\"pizza\"]], 2)\n\n # build a vectorizer v1 with the same vocabulary as the one fitted by v1\n v2 = CountVectorizer(vocabulary=v1.vocabulary_)\n\n # compare that the two vectorizer give the same output on the test sample\n for v in (v1, v2):\n counts_test = v.transform(test_data)\n if hasattr(counts_test, 'tocsr'):\n counts_test = counts_test.tocsr()\n\n vocabulary = v.vocabulary_\n assert_equal(counts_test[0, vocabulary[\"salad\"]], 1)\n assert_equal(counts_test[0, vocabulary[\"tomato\"]], 1)\n assert_equal(counts_test[0, vocabulary[\"water\"]], 1)\n\n # stop word from the fixed list\n assert_false(\"the\" in vocabulary)\n\n # stop word found automatically by the vectorizer DF thresholding\n # words that are high frequent across the complete corpus are likely\n # to be not informative (either real stop words of extraction\n # artifacts)\n assert_false(\"copyright\" in vocabulary)\n\n # not present in the sample\n assert_equal(counts_test[0, vocabulary[\"coke\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"burger\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"beer\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"pizza\"]], 0)\n\n # test tf-idf\n t1 = TfidfTransformer(norm='l1')\n tfidf = t1.fit(counts_train).transform(counts_train).toarray()\n assert_equal(len(t1.idf_), len(v1.vocabulary_))\n assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))\n\n # test tf-idf with new data\n tfidf_test = t1.transform(counts_test).toarray()\n assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))\n\n # test tf alone\n t2 = TfidfTransformer(norm='l1', use_idf=False)\n tf = t2.fit(counts_train).transform(counts_train).toarray()\n assert_false(hasattr(t2, \"idf_\"))\n\n # test idf transform with unlearned idf vector\n t3 = TfidfTransformer(use_idf=True)\n assert_raises(ValueError, t3.transform, counts_train)\n\n # test idf transform with incompatible n_features\n X = [[1, 1, 5],\n [1, 1, 0]]\n t3.fit(X)\n X_incompt = [[1, 3],\n [1, 3]]\n assert_raises(ValueError, t3.transform, X_incompt)\n\n # L1-normalized term frequencies sum to one\n assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)\n\n # test the direct tfidf vectorizer\n # (equivalent to term count vectorizer + tfidf transformer)\n train_data = iter(ALL_FOOD_DOCS[:-1])\n tv = TfidfVectorizer(norm='l1')\n\n tv.max_df = v1.max_df\n tfidf2 = tv.fit_transform(train_data).toarray()\n assert_false(tv.fixed_vocabulary_)\n assert_array_almost_equal(tfidf, tfidf2)\n\n # test the direct tfidf vectorizer with new data\n tfidf_test2 = tv.transform(test_data).toarray()\n assert_array_almost_equal(tfidf_test, tfidf_test2)\n\n # test transform on unfitted vectorizer with empty vocabulary\n v3 = CountVectorizer(vocabulary=None)\n assert_raises(ValueError, v3.transform, train_data)\n\n # ascii preprocessor?\n v3.set_params(strip_accents='ascii', lowercase=False)\n assert_equal(v3.build_preprocessor(), strip_accents_ascii)\n\n # error on bad strip_accents param\n v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)\n assert_raises(ValueError, v3.build_preprocessor)\n\n # error with bad analyzer type\n v3.set_params = '_invalid_analyzer_type_'\n assert_raises(ValueError, v3.build_analyzer)\n\n\ndef test_tfidf_vectorizer_setters():\n tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,\n sublinear_tf=False)\n tv.norm = 'l1'\n assert_equal(tv._tfidf.norm, 'l1')\n tv.use_idf = True\n assert_true(tv._tfidf.use_idf)\n tv.smooth_idf = True\n assert_true(tv._tfidf.smooth_idf)\n tv.sublinear_tf = True\n assert_true(tv._tfidf.sublinear_tf)\n\n\n@ignore_warnings(category=DeprecationWarning)\ndef test_hashing_vectorizer():\n v = HashingVectorizer()\n X = v.transform(ALL_FOOD_DOCS)\n token_nnz = X.nnz\n assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))\n assert_equal(X.dtype, v.dtype)\n\n # By default the hashed values receive a random sign and l2 normalization\n # makes the feature values bounded\n assert_true(np.min(X.data) > -1)\n assert_true(np.min(X.data) < 0)\n assert_true(np.max(X.data) > 0)\n assert_true(np.max(X.data) < 1)\n\n # Check that the rows are normalized\n for i in range(X.shape[0]):\n assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)\n\n # Check vectorization with some non-default parameters\n v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')\n X = v.transform(ALL_FOOD_DOCS)\n assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))\n assert_equal(X.dtype, v.dtype)\n\n # ngrams generate more non zeros\n ngrams_nnz = X.nnz\n assert_true(ngrams_nnz > token_nnz)\n assert_true(ngrams_nnz < 2 * token_nnz)\n\n # makes the feature values bounded\n assert_true(np.min(X.data) > 0)\n assert_true(np.max(X.data) < 1)\n\n # Check that the rows are normalized\n for i in range(X.shape[0]):\n assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)\n\n\ndef test_feature_names():\n cv = CountVectorizer(max_df=0.5)\n\n # test for Value error on unfitted/empty vocabulary\n assert_raises(ValueError, cv.get_feature_names)\n assert_false(cv.fixed_vocabulary_)\n\n # test for vocabulary learned from data\n X = cv.fit_transform(ALL_FOOD_DOCS)\n n_samples, n_features = X.shape\n assert_equal(len(cv.vocabulary_), n_features)\n\n feature_names = cv.get_feature_names()\n assert_equal(len(feature_names), n_features)\n assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'],\n feature_names)\n\n for idx, name in enumerate(feature_names):\n assert_equal(idx, cv.vocabulary_.get(name))\n\n # test for custom vocabulary\n vocab = ['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water']\n\n cv = CountVectorizer(vocabulary=vocab)\n feature_names = cv.get_feature_names()\n assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad',\n 'sparkling', 'tomato', 'water'], feature_names)\n assert_true(cv.fixed_vocabulary_)\n\n for idx, name in enumerate(feature_names):\n assert_equal(idx, cv.vocabulary_.get(name))\n\n\ndef test_vectorizer_max_features():\n vec_factories = (\n CountVectorizer,\n TfidfVectorizer,\n )\n\n expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])\n expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',\n u'sparkling', u'water', u'the'])\n\n for vec_factory in vec_factories:\n # test bounded number of extracted features\n vectorizer = vec_factory(max_df=0.6, max_features=4)\n vectorizer.fit(ALL_FOOD_DOCS)\n assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)\n assert_equal(vectorizer.stop_words_, expected_stop_words)\n\n\ndef test_count_vectorizer_max_features():\n # Regression test: max_features didn't work correctly in 0.14.\n\n cv_1 = CountVectorizer(max_features=1)\n cv_3 = CountVectorizer(max_features=3)\n cv_None = CountVectorizer(max_features=None)\n\n counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n\n features_1 = cv_1.get_feature_names()\n features_3 = cv_3.get_feature_names()\n features_None = cv_None.get_feature_names()\n\n # The most common feature is \"the\", with frequency 7.\n assert_equal(7, counts_1.max())\n assert_equal(7, counts_3.max())\n assert_equal(7, counts_None.max())\n\n # The most common feature should be the same\n assert_equal(\"the\", features_1[np.argmax(counts_1)])\n assert_equal(\"the\", features_3[np.argmax(counts_3)])\n assert_equal(\"the\", features_None[np.argmax(counts_None)])\n\n\ndef test_vectorizer_max_df():\n test_data = ['abc', 'dea', 'eat']\n vect = CountVectorizer(analyzer='char', max_df=1.0)\n vect.fit(test_data)\n assert_true('a' in vect.vocabulary_.keys())\n assert_equal(len(vect.vocabulary_.keys()), 6)\n assert_equal(len(vect.stop_words_), 0)\n\n vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5\n vect.fit(test_data)\n assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored\n assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain\n assert_true('a' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 2)\n\n vect.max_df = 1\n vect.fit(test_data)\n assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored\n assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain\n assert_true('a' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 2)\n\n\ndef test_vectorizer_min_df():\n test_data = ['abc', 'dea', 'eat']\n vect = CountVectorizer(analyzer='char', min_df=1)\n vect.fit(test_data)\n assert_true('a' in vect.vocabulary_.keys())\n assert_equal(len(vect.vocabulary_.keys()), 6)\n assert_equal(len(vect.stop_words_), 0)\n\n vect.min_df = 2\n vect.fit(test_data)\n assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored\n assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain\n assert_true('c' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 4)\n\n vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4\n vect.fit(test_data)\n assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored\n assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains\n assert_true('c' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 5)\n\n\ndef test_count_binary_occurrences():\n # by default multiple occurrences are counted as longs\n test_data = ['aaabc', 'abbde']\n vect = CountVectorizer(analyzer='char', max_df=1.0)\n X = vect.fit_transform(test_data).toarray()\n assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())\n assert_array_equal([[3, 1, 1, 0, 0],\n [1, 2, 0, 1, 1]], X)\n\n # using boolean features, we can fetch the binary occurrence info\n # instead.\n vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)\n X = vect.fit_transform(test_data).toarray()\n assert_array_equal([[1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1]], X)\n\n # check the ability to change the dtype\n vect = CountVectorizer(analyzer='char', max_df=1.0,\n binary=True, dtype=np.float32)\n X_sparse = vect.fit_transform(test_data)\n assert_equal(X_sparse.dtype, np.float32)\n\n\n@ignore_warnings(category=DeprecationWarning)\ndef test_hashed_binary_occurrences():\n # by default multiple occurrences are counted as longs\n test_data = ['aaabc', 'abbde']\n vect = HashingVectorizer(analyzer='char', non_negative=True,\n norm=None)\n X = vect.transform(test_data)\n assert_equal(np.max(X[0:1].data), 3)\n assert_equal(np.max(X[1:2].data), 2)\n assert_equal(X.dtype, np.float64)\n\n # using boolean features, we can fetch the binary occurrence info\n # instead.\n vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,\n norm=None)\n X = vect.transform(test_data)\n assert_equal(np.max(X.data), 1)\n assert_equal(X.dtype, np.float64)\n\n # check the ability to change the dtype\n vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,\n norm=None, dtype=np.float64)\n X = vect.transform(test_data)\n assert_equal(X.dtype, np.float64)\n\n\ndef test_vectorizer_inverse_transform():\n # raw documents\n data = ALL_FOOD_DOCS\n for vectorizer in (TfidfVectorizer(), CountVectorizer()):\n transformed_data = vectorizer.fit_transform(data)\n inversed_data = vectorizer.inverse_transform(transformed_data)\n analyze = vectorizer.build_analyzer()\n for doc, inversed_terms in zip(data, inversed_data):\n terms = np.sort(np.unique(analyze(doc)))\n inversed_terms = np.sort(np.unique(inversed_terms))\n assert_array_equal(terms, inversed_terms)\n\n # Test that inverse_transform also works with numpy arrays\n transformed_data = transformed_data.toarray()\n inversed_data2 = vectorizer.inverse_transform(transformed_data)\n for terms, terms2 in zip(inversed_data, inversed_data2):\n assert_array_equal(np.sort(terms), np.sort(terms2))\n\n\ndef test_count_vectorizer_pipeline_grid_selection():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n # split the dataset for model development and final evaluation\n train_data, test_data, target_train, target_test = train_test_split(\n data, target, test_size=.2, random_state=0)\n\n pipeline = Pipeline([('vect', CountVectorizer()),\n ('svc', LinearSVC())])\n\n parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'svc__loss': ('hinge', 'squared_hinge')\n }\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)\n\n # Check that the best model found by grid search is 100% correct on the\n # held out evaluation set.\n pred = grid_search.fit(train_data, target_train).predict(test_data)\n assert_array_equal(pred, target_test)\n\n # on this toy dataset bigram representation which is used in the last of\n # the grid_search is considered the best estimator since they all converge\n # to 100% accuracy models\n assert_equal(grid_search.best_score_, 1.0)\n best_vectorizer = grid_search.best_estimator_.named_steps['vect']\n assert_equal(best_vectorizer.ngram_range, (1, 1))\n\n\ndef test_vectorizer_pipeline_grid_selection():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n # split the dataset for model development and final evaluation\n train_data, test_data, target_train, target_test = train_test_split(\n data, target, test_size=.1, random_state=0)\n\n pipeline = Pipeline([('vect', TfidfVectorizer()),\n ('svc', LinearSVC())])\n\n parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'vect__norm': ('l1', 'l2'),\n 'svc__loss': ('hinge', 'squared_hinge'),\n }\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)\n\n # Check that the best model found by grid search is 100% correct on the\n # held out evaluation set.\n pred = grid_search.fit(train_data, target_train).predict(test_data)\n assert_array_equal(pred, target_test)\n\n # on this toy dataset bigram representation which is used in the last of\n # the grid_search is considered the best estimator since they all converge\n # to 100% accuracy models\n assert_equal(grid_search.best_score_, 1.0)\n best_vectorizer = grid_search.best_estimator_.named_steps['vect']\n assert_equal(best_vectorizer.ngram_range, (1, 1))\n assert_equal(best_vectorizer.norm, 'l2')\n assert_false(best_vectorizer.fixed_vocabulary_)\n\n\ndef test_vectorizer_pipeline_cross_validation():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n pipeline = Pipeline([('vect', TfidfVectorizer()),\n ('svc', LinearSVC())])\n\n cv_scores = cross_val_score(pipeline, data, target, cv=3)\n assert_array_equal(cv_scores, [1., 1., 1.])\n\n\n@ignore_warnings(category=DeprecationWarning)\ndef test_vectorizer_unicode():\n # tests that the count vectorizer works with cyrillic.\n document = (\n \"\\xd0\\x9c\\xd0\\xb0\\xd1\\x88\\xd0\\xb8\\xd0\\xbd\\xd0\\xbd\\xd0\\xbe\\xd0\"\n \"\\xb5 \\xd0\\xbe\\xd0\\xb1\\xd1\\x83\\xd1\\x87\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd0\"\n \"\\xb5 \\xe2\\x80\\x94 \\xd0\\xbe\\xd0\\xb1\\xd1\\x88\\xd0\\xb8\\xd1\\x80\\xd0\\xbd\"\n \"\\xd1\\x8b\\xd0\\xb9 \\xd0\\xbf\\xd0\\xbe\\xd0\\xb4\\xd1\\x80\\xd0\\xb0\\xd0\\xb7\"\n \"\\xd0\\xb4\\xd0\\xb5\\xd0\\xbb \\xd0\\xb8\\xd1\\x81\\xd0\\xba\\xd1\\x83\\xd1\\x81\"\n \"\\xd1\\x81\\xd1\\x82\\xd0\\xb2\\xd0\\xb5\\xd0\\xbd\\xd0\\xbd\\xd0\\xbe\\xd0\\xb3\"\n \"\\xd0\\xbe \\xd0\\xb8\\xd0\\xbd\\xd1\\x82\\xd0\\xb5\\xd0\\xbb\\xd0\\xbb\\xd0\"\n \"\\xb5\\xd0\\xba\\xd1\\x82\\xd0\\xb0, \\xd0\\xb8\\xd0\\xb7\\xd1\\x83\\xd1\\x87\"\n \"\\xd0\\xb0\\xd1\\x8e\\xd1\\x89\\xd0\\xb8\\xd0\\xb9 \\xd0\\xbc\\xd0\\xb5\\xd1\\x82\"\n \"\\xd0\\xbe\\xd0\\xb4\\xd1\\x8b \\xd0\\xbf\\xd0\\xbe\\xd1\\x81\\xd1\\x82\\xd1\\x80\"\n \"\\xd0\\xbe\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd1\\x8f \\xd0\\xb0\\xd0\\xbb\\xd0\\xb3\"\n \"\\xd0\\xbe\\xd1\\x80\\xd0\\xb8\\xd1\\x82\\xd0\\xbc\\xd0\\xbe\\xd0\\xb2, \\xd1\\x81\"\n \"\\xd0\\xbf\\xd0\\xbe\\xd1\\x81\\xd0\\xbe\\xd0\\xb1\\xd0\\xbd\\xd1\\x8b\\xd1\\x85 \"\n \"\\xd0\\xbe\\xd0\\xb1\\xd1\\x83\\xd1\\x87\\xd0\\xb0\\xd1\\x82\\xd1\\x8c\\xd1\\x81\\xd1\"\n \"\\x8f.\")\n\n vect = CountVectorizer()\n X_counted = vect.fit_transform([document])\n assert_equal(X_counted.shape, (1, 15))\n\n vect = HashingVectorizer(norm=None, non_negative=True)\n X_hashed = vect.transform([document])\n assert_equal(X_hashed.shape, (1, 2 ** 20))\n\n # No collisions on such a small dataset\n assert_equal(X_counted.nnz, X_hashed.nnz)\n\n # When norm is None and non_negative, the tokens are counted up to\n # collisions\n assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))\n\n\ndef test_tfidf_vectorizer_with_fixed_vocabulary():\n # non regression smoke test for inheritance issues\n vocabulary = ['pizza', 'celeri']\n vect = TfidfVectorizer(vocabulary=vocabulary)\n X_1 = vect.fit_transform(ALL_FOOD_DOCS)\n X_2 = vect.transform(ALL_FOOD_DOCS)\n assert_array_almost_equal(X_1.toarray(), X_2.toarray())\n assert_true(vect.fixed_vocabulary_)\n\n\ndef test_pickling_vectorizer():\n instances = [\n HashingVectorizer(),\n HashingVectorizer(norm='l1'),\n HashingVectorizer(binary=True),\n HashingVectorizer(ngram_range=(1, 2)),\n CountVectorizer(),\n CountVectorizer(preprocessor=strip_tags),\n CountVectorizer(analyzer=lazy_analyze),\n CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),\n CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),\n TfidfVectorizer(),\n TfidfVectorizer(analyzer=lazy_analyze),\n TfidfVectorizer().fit(JUNK_FOOD_DOCS),\n ]\n\n for orig in instances:\n s = pickle.dumps(orig)\n copy = pickle.loads(s)\n assert_equal(type(copy), orig.__class__)\n assert_equal(copy.get_params(), orig.get_params())\n assert_array_equal(\n copy.fit_transform(JUNK_FOOD_DOCS).toarray(),\n orig.fit_transform(JUNK_FOOD_DOCS).toarray())\n\n\ndef test_countvectorizer_vocab_sets_when_pickling():\n # ensure that vocabulary of type set is coerced to a list to\n # preserve iteration ordering after deserialization\n rng = np.random.RandomState(0)\n vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'])\n for x in range(0, 100):\n vocab_set = set(rng.choice(vocab_words, size=5, replace=False))\n cv = CountVectorizer(vocabulary=vocab_set)\n unpickled_cv = pickle.loads(pickle.dumps(cv))\n cv.fit(ALL_FOOD_DOCS)\n unpickled_cv.fit(ALL_FOOD_DOCS)\n assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())\n\n\ndef test_countvectorizer_vocab_dicts_when_pickling():\n rng = np.random.RandomState(0)\n vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'])\n for x in range(0, 100):\n vocab_dict = dict()\n words = rng.choice(vocab_words, size=5, replace=False)\n for y in range(0, 5):\n vocab_dict[words[y]] = y\n cv = CountVectorizer(vocabulary=vocab_dict)\n unpickled_cv = pickle.loads(pickle.dumps(cv))\n cv.fit(ALL_FOOD_DOCS)\n unpickled_cv.fit(ALL_FOOD_DOCS)\n assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())\n\n\ndef test_stop_words_removal():\n # Ensure that deleting the stop_words_ attribute doesn't affect transform\n\n fitted_vectorizers = (\n TfidfVectorizer().fit(JUNK_FOOD_DOCS),\n CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),\n CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)\n )\n\n for vect in fitted_vectorizers:\n vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n vect.stop_words_ = None\n stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n delattr(vect, 'stop_words_')\n stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n assert_array_equal(stop_None_transform, vect_transform)\n assert_array_equal(stop_del_transform, vect_transform)\n\n\ndef test_pickling_transformer():\n X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)\n orig = TfidfTransformer().fit(X)\n s = pickle.dumps(orig)\n copy = pickle.loads(s)\n assert_equal(type(copy), orig.__class__)\n assert_array_equal(\n copy.fit_transform(X).toarray(),\n orig.fit_transform(X).toarray())\n\n\ndef test_transformer_idf_setter():\n X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)\n orig = TfidfTransformer().fit(X)\n copy = TfidfTransformer()\n copy.idf_ = orig.idf_\n assert_array_equal(\n copy.transform(X).toarray(),\n orig.transform(X).toarray())\n\n\ndef test_tfidf_vectorizer_setter():\n orig = TfidfVectorizer(use_idf=True)\n orig.fit(JUNK_FOOD_DOCS)\n copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)\n copy.idf_ = orig.idf_\n assert_array_equal(\n copy.transform(JUNK_FOOD_DOCS).toarray(),\n orig.transform(JUNK_FOOD_DOCS).toarray())\n\n\ndef test_tfidfvectorizer_invalid_idf_attr():\n vect = TfidfVectorizer(use_idf=True)\n vect.fit(JUNK_FOOD_DOCS)\n copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)\n expected_idf_len = len(vect.idf_)\n invalid_idf = [1.0] * (expected_idf_len + 1)\n assert_raises(ValueError, setattr, copy, 'idf_', invalid_idf)\n\n\ndef test_non_unique_vocab():\n vocab = ['a', 'b', 'c', 'a', 'a']\n vect = CountVectorizer(vocabulary=vocab)\n assert_raises(ValueError, vect.fit, [])\n\n\ndef test_hashingvectorizer_nan_in_docs():\n # np.nan can appear when using pandas to load text fields from a csv file\n # with missing values.\n message = \"np.nan is an invalid document, expected byte or unicode string.\"\n exception = ValueError\n\n def func():\n hv = HashingVectorizer()\n hv.fit_transform(['hello world', np.nan, 'hello hello'])\n\n assert_raise_message(exception, message, func)\n\n\ndef test_tfidfvectorizer_binary():\n # Non-regression test: TfidfVectorizer used to ignore its \"binary\" param.\n v = TfidfVectorizer(binary=True, use_idf=False, norm=None)\n assert_true(v.binary)\n\n X = v.fit_transform(['hello world', 'hello hello']).toarray()\n assert_array_equal(X.ravel(), [1, 1, 1, 0])\n X2 = v.transform(['hello world', 'hello hello']).toarray()\n assert_array_equal(X2.ravel(), [1, 1, 1, 0])\n\n\ndef test_tfidfvectorizer_export_idf():\n vect = TfidfVectorizer(use_idf=True)\n vect.fit(JUNK_FOOD_DOCS)\n assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)\n\n\ndef test_vectorizer_vocab_clone():\n vect_vocab = TfidfVectorizer(vocabulary=[\"the\"])\n vect_vocab_clone = clone(vect_vocab)\n vect_vocab.fit(ALL_FOOD_DOCS)\n vect_vocab_clone.fit(ALL_FOOD_DOCS)\n assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)\n\n\ndef test_vectorizer_string_object_as_input():\n message = (\"Iterable over raw text documents expected, \"\n \"string object received.\")\n for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:\n assert_raise_message(\n ValueError, message, vec.fit_transform, \"hello world!\")\n assert_raise_message(\n ValueError, message, vec.fit, \"hello world!\")\n assert_raise_message(\n ValueError, message, vec.transform, \"hello world!\")\n\n\[email protected](\"vec\", [\n HashingVectorizer(ngram_range=(2, 1)),\n CountVectorizer(ngram_range=(2, 1)),\n TfidfVectorizer(ngram_range=(2, 1))\n ])\ndef test_vectorizers_invalid_ngram_range(vec):\n # vectorizers could be initialized with invalid ngram range\n # test for raising error message\n invalid_range = vec.ngram_range\n message = (\"Invalid value for ngram_range=%s \"\n \"lower boundary larger than the upper boundary.\"\n % str(invalid_range))\n\n assert_raise_message(\n ValueError, message, vec.fit, [\"good news everyone\"])\n assert_raise_message(\n ValueError, message, vec.fit_transform, [\"good news everyone\"])\n\n if isinstance(vec, HashingVectorizer):\n assert_raise_message(\n ValueError, message, vec.transform, [\"good news everyone\"])\n",
"# Author: Wei Xue <[email protected]>\n# Thierry Guillemot <[email protected]>\n# License: BSD 3 clause\n\nimport sys\nimport warnings\n\nimport numpy as np\n\nfrom scipy import stats, linalg\n\nfrom sklearn.covariance import EmpiricalCovariance\nfrom sklearn.datasets.samples_generator import make_spd_matrix\nfrom sklearn.externals.six.moves import cStringIO as StringIO\nfrom sklearn.metrics.cluster import adjusted_rand_score\nfrom sklearn.mixture.gaussian_mixture import GaussianMixture\nfrom sklearn.mixture.gaussian_mixture import (\n _estimate_gaussian_covariances_full,\n _estimate_gaussian_covariances_tied,\n _estimate_gaussian_covariances_diag,\n _estimate_gaussian_covariances_spherical)\nfrom sklearn.mixture.gaussian_mixture import _compute_precision_cholesky\nfrom sklearn.mixture.gaussian_mixture import _compute_log_det_cholesky\nfrom sklearn.exceptions import ConvergenceWarning, NotFittedError\nfrom sklearn.utils.extmath import fast_logdet\nfrom sklearn.utils.testing import assert_allclose\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_greater_equal\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.utils.testing import ignore_warnings\n\n\nCOVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']\n\n\ndef generate_data(n_samples, n_features, weights, means, precisions,\n covariance_type):\n rng = np.random.RandomState(0)\n\n X = []\n if covariance_type == 'spherical':\n for _, (w, m, c) in enumerate(zip(weights, means,\n precisions['spherical'])):\n X.append(rng.multivariate_normal(m, c * np.eye(n_features),\n int(np.round(w * n_samples))))\n if covariance_type == 'diag':\n for _, (w, m, c) in enumerate(zip(weights, means,\n precisions['diag'])):\n X.append(rng.multivariate_normal(m, np.diag(c),\n int(np.round(w * n_samples))))\n if covariance_type == 'tied':\n for _, (w, m) in enumerate(zip(weights, means)):\n X.append(rng.multivariate_normal(m, precisions['tied'],\n int(np.round(w * n_samples))))\n if covariance_type == 'full':\n for _, (w, m, c) in enumerate(zip(weights, means,\n precisions['full'])):\n X.append(rng.multivariate_normal(m, c,\n int(np.round(w * n_samples))))\n\n X = np.vstack(X)\n return X\n\n\nclass RandomData(object):\n def __init__(self, rng, n_samples=500, n_components=2, n_features=2,\n scale=50):\n self.n_samples = n_samples\n self.n_components = n_components\n self.n_features = n_features\n\n self.weights = rng.rand(n_components)\n self.weights = self.weights / self.weights.sum()\n self.means = rng.rand(n_components, n_features) * scale\n self.covariances = {\n 'spherical': .5 + rng.rand(n_components),\n 'diag': (.5 + rng.rand(n_components, n_features)) ** 2,\n 'tied': make_spd_matrix(n_features, random_state=rng),\n 'full': np.array([\n make_spd_matrix(n_features, random_state=rng) * .5\n for _ in range(n_components)])}\n self.precisions = {\n 'spherical': 1. / self.covariances['spherical'],\n 'diag': 1. / self.covariances['diag'],\n 'tied': linalg.inv(self.covariances['tied']),\n 'full': np.array([linalg.inv(covariance)\n for covariance in self.covariances['full']])}\n\n self.X = dict(zip(COVARIANCE_TYPE, [generate_data(\n n_samples, n_features, self.weights, self.means, self.covariances,\n covar_type) for covar_type in COVARIANCE_TYPE]))\n self.Y = np.hstack([k * np.ones(int(np.round(w * n_samples)))\n for k, w in enumerate(self.weights)])\n\n\ndef test_gaussian_mixture_attributes():\n # test bad parameters\n rng = np.random.RandomState(0)\n X = rng.rand(10, 2)\n\n n_components_bad = 0\n gmm = GaussianMixture(n_components=n_components_bad)\n assert_raise_message(ValueError,\n \"Invalid value for 'n_components': %d \"\n \"Estimation requires at least one component\"\n % n_components_bad, gmm.fit, X)\n\n # covariance_type should be in [spherical, diag, tied, full]\n covariance_type_bad = 'bad_covariance_type'\n gmm = GaussianMixture(covariance_type=covariance_type_bad)\n assert_raise_message(ValueError,\n \"Invalid value for 'covariance_type': %s \"\n \"'covariance_type' should be in \"\n \"['spherical', 'tied', 'diag', 'full']\"\n % covariance_type_bad,\n gmm.fit, X)\n\n tol_bad = -1\n gmm = GaussianMixture(tol=tol_bad)\n assert_raise_message(ValueError,\n \"Invalid value for 'tol': %.5f \"\n \"Tolerance used by the EM must be non-negative\"\n % tol_bad, gmm.fit, X)\n\n reg_covar_bad = -1\n gmm = GaussianMixture(reg_covar=reg_covar_bad)\n assert_raise_message(ValueError,\n \"Invalid value for 'reg_covar': %.5f \"\n \"regularization on covariance must be \"\n \"non-negative\" % reg_covar_bad, gmm.fit, X)\n\n max_iter_bad = 0\n gmm = GaussianMixture(max_iter=max_iter_bad)\n assert_raise_message(ValueError,\n \"Invalid value for 'max_iter': %d \"\n \"Estimation requires at least one iteration\"\n % max_iter_bad, gmm.fit, X)\n\n n_init_bad = 0\n gmm = GaussianMixture(n_init=n_init_bad)\n assert_raise_message(ValueError,\n \"Invalid value for 'n_init': %d \"\n \"Estimation requires at least one run\"\n % n_init_bad, gmm.fit, X)\n\n init_params_bad = 'bad_method'\n gmm = GaussianMixture(init_params=init_params_bad)\n assert_raise_message(ValueError,\n \"Unimplemented initialization method '%s'\"\n % init_params_bad,\n gmm.fit, X)\n\n # test good parameters\n n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1\n covariance_type, init_params = 'full', 'random'\n gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,\n max_iter=max_iter, reg_covar=reg_covar,\n covariance_type=covariance_type,\n init_params=init_params).fit(X)\n\n assert_equal(gmm.n_components, n_components)\n assert_equal(gmm.covariance_type, covariance_type)\n assert_equal(gmm.tol, tol)\n assert_equal(gmm.reg_covar, reg_covar)\n assert_equal(gmm.max_iter, max_iter)\n assert_equal(gmm.n_init, n_init)\n assert_equal(gmm.init_params, init_params)\n\n\ndef test_check_X():\n from sklearn.mixture.base import _check_X\n rng = np.random.RandomState(0)\n\n n_samples, n_components, n_features = 10, 2, 2\n\n X_bad_dim = rng.rand(n_components - 1, n_features)\n assert_raise_message(ValueError,\n 'Expected n_samples >= n_components '\n 'but got n_components = %d, n_samples = %d'\n % (n_components, X_bad_dim.shape[0]),\n _check_X, X_bad_dim, n_components)\n\n X_bad_dim = rng.rand(n_components, n_features + 1)\n assert_raise_message(ValueError,\n 'Expected the input data X have %d features, '\n 'but got %d features'\n % (n_features, X_bad_dim.shape[1]),\n _check_X, X_bad_dim, n_components, n_features)\n\n X = rng.rand(n_samples, n_features)\n assert_array_equal(X, _check_X(X, n_components, n_features))\n\n\ndef test_check_weights():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n\n n_components = rand_data.n_components\n X = rand_data.X['full']\n\n g = GaussianMixture(n_components=n_components)\n\n # Check bad shape\n weights_bad_shape = rng.rand(n_components, 1)\n g.weights_init = weights_bad_shape\n assert_raise_message(ValueError,\n \"The parameter 'weights' should have the shape of \"\n \"(%d,), but got %s\" %\n (n_components, str(weights_bad_shape.shape)),\n g.fit, X)\n\n # Check bad range\n weights_bad_range = rng.rand(n_components) + 1\n g.weights_init = weights_bad_range\n assert_raise_message(ValueError,\n \"The parameter 'weights' should be in the range \"\n \"[0, 1], but got max value %.5f, min value %.5f\"\n % (np.min(weights_bad_range),\n np.max(weights_bad_range)),\n g.fit, X)\n\n # Check bad normalization\n weights_bad_norm = rng.rand(n_components)\n weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)\n g.weights_init = weights_bad_norm\n assert_raise_message(ValueError,\n \"The parameter 'weights' should be normalized, \"\n \"but got sum(weights) = %.5f\"\n % np.sum(weights_bad_norm),\n g.fit, X)\n\n # Check good weights matrix\n weights = rand_data.weights\n g = GaussianMixture(weights_init=weights, n_components=n_components)\n g.fit(X)\n assert_array_equal(weights, g.weights_init)\n\n\ndef test_check_means():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n\n n_components, n_features = rand_data.n_components, rand_data.n_features\n X = rand_data.X['full']\n\n g = GaussianMixture(n_components=n_components)\n\n # Check means bad shape\n means_bad_shape = rng.rand(n_components + 1, n_features)\n g.means_init = means_bad_shape\n assert_raise_message(ValueError,\n \"The parameter 'means' should have the shape of \",\n g.fit, X)\n\n # Check good means matrix\n means = rand_data.means\n g.means_init = means\n g.fit(X)\n assert_array_equal(means, g.means_init)\n\n\ndef test_check_precisions():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n\n n_components, n_features = rand_data.n_components, rand_data.n_features\n\n # Define the bad precisions for each covariance_type\n precisions_bad_shape = {\n 'full': np.ones((n_components + 1, n_features, n_features)),\n 'tied': np.ones((n_features + 1, n_features + 1)),\n 'diag': np.ones((n_components + 1, n_features)),\n 'spherical': np.ones((n_components + 1))}\n\n # Define not positive-definite precisions\n precisions_not_pos = np.ones((n_components, n_features, n_features))\n precisions_not_pos[0] = np.eye(n_features)\n precisions_not_pos[0, 0, 0] = -1.\n\n precisions_not_positive = {\n 'full': precisions_not_pos,\n 'tied': precisions_not_pos[0],\n 'diag': -1. * np.ones((n_components, n_features)),\n 'spherical': -1. * np.ones(n_components)}\n\n not_positive_errors = {\n 'full': 'symmetric, positive-definite',\n 'tied': 'symmetric, positive-definite',\n 'diag': 'positive',\n 'spherical': 'positive'}\n\n for covar_type in COVARIANCE_TYPE:\n X = RandomData(rng).X[covar_type]\n g = GaussianMixture(n_components=n_components,\n covariance_type=covar_type,\n random_state=rng)\n\n # Check precisions with bad shapes\n g.precisions_init = precisions_bad_shape[covar_type]\n assert_raise_message(ValueError,\n \"The parameter '%s precision' should have \"\n \"the shape of\" % covar_type,\n g.fit, X)\n\n # Check not positive precisions\n g.precisions_init = precisions_not_positive[covar_type]\n assert_raise_message(ValueError,\n \"'%s precision' should be %s\"\n % (covar_type, not_positive_errors[covar_type]),\n g.fit, X)\n\n # Check the correct init of precisions_init\n g.precisions_init = rand_data.precisions[covar_type]\n g.fit(X)\n assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)\n\n\ndef test_suffstat_sk_full():\n # compare the precision matrix compute from the\n # EmpiricalCovariance.covariance fitted on X*sqrt(resp)\n # with _sufficient_sk_full, n_components=1\n rng = np.random.RandomState(0)\n n_samples, n_features = 500, 2\n\n # special case 1, assuming data is \"centered\"\n X = rng.rand(n_samples, n_features)\n resp = rng.rand(n_samples, 1)\n X_resp = np.sqrt(resp) * X\n nk = np.array([n_samples])\n xk = np.zeros((1, n_features))\n covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)\n ecov = EmpiricalCovariance(assume_centered=True)\n ecov.fit(X_resp)\n assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)\n assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)\n\n # check the precision computation\n precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')\n precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])\n precs_est = np.array([linalg.inv(cov) for cov in covars_pred])\n assert_array_almost_equal(precs_est, precs_pred)\n\n # special case 2, assuming resp are all ones\n resp = np.ones((n_samples, 1))\n nk = np.array([n_samples])\n xk = X.mean(axis=0).reshape((1, -1))\n covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)\n ecov = EmpiricalCovariance(assume_centered=False)\n ecov.fit(X)\n assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)\n assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)\n\n # check the precision computation\n precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')\n precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])\n precs_est = np.array([linalg.inv(cov) for cov in covars_pred])\n assert_array_almost_equal(precs_est, precs_pred)\n\n\ndef test_suffstat_sk_tied():\n # use equation Nk * Sk / N = S_tied\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 500, 2, 2\n\n resp = rng.rand(n_samples, n_components)\n resp = resp / resp.sum(axis=1)[:, np.newaxis]\n X = rng.rand(n_samples, n_features)\n nk = resp.sum(axis=0)\n xk = np.dot(resp.T, X) / nk[:, np.newaxis]\n\n covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)\n covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,\n 0) / n_samples\n\n covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)\n\n ecov = EmpiricalCovariance()\n ecov.covariance_ = covars_pred_full\n assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)\n assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)\n\n # check the precision computation\n precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')\n precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)\n precs_est = linalg.inv(covars_pred_tied)\n assert_array_almost_equal(precs_est, precs_pred)\n\n\ndef test_suffstat_sk_diag():\n # test against 'full' case\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 500, 2, 2\n\n resp = rng.rand(n_samples, n_components)\n resp = resp / resp.sum(axis=1)[:, np.newaxis]\n X = rng.rand(n_samples, n_features)\n nk = resp.sum(axis=0)\n xk = np.dot(resp.T, X) / nk[:, np.newaxis]\n covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)\n covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)\n\n ecov = EmpiricalCovariance()\n for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):\n ecov.covariance_ = np.diag(np.diag(cov_full))\n cov_diag = np.diag(cov_diag)\n assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)\n assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)\n\n # check the precision computation\n precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')\n assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)\n\n\ndef test_gaussian_suffstat_sk_spherical():\n # computing spherical covariance equals to the variance of one-dimension\n # data after flattening, n_components=1\n rng = np.random.RandomState(0)\n n_samples, n_features = 500, 2\n\n X = rng.rand(n_samples, n_features)\n X = X - X.mean()\n resp = np.ones((n_samples, 1))\n nk = np.array([n_samples])\n xk = X.mean()\n covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X,\n nk, xk, 0)\n covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) /\n (n_features * n_samples))\n assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)\n\n # check the precision computation\n precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical,\n 'spherical')\n assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2)\n\n\ndef test_compute_log_det_cholesky():\n n_features = 2\n rand_data = RandomData(np.random.RandomState(0))\n\n for covar_type in COVARIANCE_TYPE:\n covariance = rand_data.covariances[covar_type]\n\n if covar_type == 'full':\n predected_det = np.array([linalg.det(cov) for cov in covariance])\n elif covar_type == 'tied':\n predected_det = linalg.det(covariance)\n elif covar_type == 'diag':\n predected_det = np.array([np.prod(cov) for cov in covariance])\n elif covar_type == 'spherical':\n predected_det = covariance ** n_features\n\n # We compute the cholesky decomposition of the covariance matrix\n expected_det = _compute_log_det_cholesky(_compute_precision_cholesky(\n covariance, covar_type), covar_type, n_features=n_features)\n assert_array_almost_equal(expected_det, - .5 * np.log(predected_det))\n\n\ndef _naive_lmvnpdf_diag(X, means, covars):\n resp = np.empty((len(X), len(means)))\n stds = np.sqrt(covars)\n for i, (mean, std) in enumerate(zip(means, stds)):\n resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)\n return resp\n\n\ndef test_gaussian_mixture_log_probabilities():\n from sklearn.mixture.gaussian_mixture import _estimate_log_gaussian_prob\n\n # test against with _naive_lmvnpdf_diag\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n n_samples = 500\n n_features = rand_data.n_features\n n_components = rand_data.n_components\n\n means = rand_data.means\n covars_diag = rng.rand(n_components, n_features)\n X = rng.rand(n_samples, n_features)\n log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)\n\n # full covariances\n precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])\n\n log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full')\n assert_array_almost_equal(log_prob, log_prob_naive)\n\n # diag covariances\n precs_chol_diag = 1. / np.sqrt(covars_diag)\n log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag')\n assert_array_almost_equal(log_prob, log_prob_naive)\n\n # tied\n covars_tied = np.array([x for x in covars_diag]).mean(axis=0)\n precs_tied = np.diag(np.sqrt(1. / covars_tied))\n\n log_prob_naive = _naive_lmvnpdf_diag(X, means,\n [covars_tied] * n_components)\n log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied')\n\n assert_array_almost_equal(log_prob, log_prob_naive)\n\n # spherical\n covars_spherical = covars_diag.mean(axis=1)\n precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))\n log_prob_naive = _naive_lmvnpdf_diag(X, means,\n [[k] * n_features for k in\n covars_spherical])\n log_prob = _estimate_log_gaussian_prob(X, means,\n precs_spherical, 'spherical')\n assert_array_almost_equal(log_prob, log_prob_naive)\n\n# skip tests on weighted_log_probabilities, log_weights\n\n\ndef test_gaussian_mixture_estimate_log_prob_resp():\n # test whether responsibilities are normalized\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=5)\n n_samples = rand_data.n_samples\n n_features = rand_data.n_features\n n_components = rand_data.n_components\n\n X = rng.rand(n_samples, n_features)\n for covar_type in COVARIANCE_TYPE:\n weights = rand_data.weights\n means = rand_data.means\n precisions = rand_data.precisions[covar_type]\n g = GaussianMixture(n_components=n_components, random_state=rng,\n weights_init=weights, means_init=means,\n precisions_init=precisions,\n covariance_type=covar_type)\n g.fit(X)\n resp = g.predict_proba(X)\n assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))\n assert_array_equal(g.weights_init, weights)\n assert_array_equal(g.means_init, means)\n assert_array_equal(g.precisions_init, precisions)\n\n\ndef test_gaussian_mixture_predict_predict_proba():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n Y = rand_data.Y\n g = GaussianMixture(n_components=rand_data.n_components,\n random_state=rng, weights_init=rand_data.weights,\n means_init=rand_data.means,\n precisions_init=rand_data.precisions[covar_type],\n covariance_type=covar_type)\n\n # Check a warning message arrive if we don't do fit\n assert_raise_message(NotFittedError,\n \"This GaussianMixture instance is not fitted \"\n \"yet. Call 'fit' with appropriate arguments \"\n \"before using this method.\", g.predict, X)\n\n g.fit(X)\n Y_pred = g.predict(X)\n Y_pred_proba = g.predict_proba(X).argmax(axis=1)\n assert_array_equal(Y_pred, Y_pred_proba)\n assert_greater(adjusted_rand_score(Y, Y_pred), .95)\n\n\ndef test_gaussian_mixture_fit():\n # recover the ground truth\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n n_features = rand_data.n_features\n n_components = rand_data.n_components\n\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n g = GaussianMixture(n_components=n_components, n_init=20,\n reg_covar=0, random_state=rng,\n covariance_type=covar_type)\n g.fit(X)\n\n # needs more data to pass the test with rtol=1e-7\n assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),\n rtol=0.1, atol=1e-2)\n\n arg_idx1 = g.means_[:, 0].argsort()\n arg_idx2 = rand_data.means[:, 0].argsort()\n assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],\n rtol=0.1, atol=1e-2)\n\n if covar_type == 'full':\n prec_pred = g.precisions_\n prec_test = rand_data.precisions['full']\n elif covar_type == 'tied':\n prec_pred = np.array([g.precisions_] * n_components)\n prec_test = np.array([rand_data.precisions['tied']] * n_components)\n elif covar_type == 'spherical':\n prec_pred = np.array([np.eye(n_features) * c\n for c in g.precisions_])\n prec_test = np.array([np.eye(n_features) * c for c in\n rand_data.precisions['spherical']])\n elif covar_type == 'diag':\n prec_pred = np.array([np.diag(d) for d in g.precisions_])\n prec_test = np.array([np.diag(d) for d in\n rand_data.precisions['diag']])\n\n arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()\n arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()\n for k, h in zip(arg_idx1, arg_idx2):\n ecov = EmpiricalCovariance()\n ecov.covariance_ = prec_test[h]\n # the accuracy depends on the number of data and randomness, rng\n assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.1)\n\n\ndef test_gaussian_mixture_fit_best_params():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n n_components = rand_data.n_components\n n_init = 10\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,\n random_state=rng, covariance_type=covar_type)\n ll = []\n for _ in range(n_init):\n g.fit(X)\n ll.append(g.score(X))\n ll = np.array(ll)\n g_best = GaussianMixture(n_components=n_components,\n n_init=n_init, reg_covar=0, random_state=rng,\n covariance_type=covar_type)\n g_best.fit(X)\n assert_almost_equal(ll.min(), g_best.score(X))\n\n\ndef test_gaussian_mixture_fit_convergence_warning():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=1)\n n_components = rand_data.n_components\n max_iter = 1\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n g = GaussianMixture(n_components=n_components, n_init=1,\n max_iter=max_iter, reg_covar=0, random_state=rng,\n covariance_type=covar_type)\n assert_warns_message(ConvergenceWarning,\n 'Initialization %d did not converge. '\n 'Try different init parameters, '\n 'or increase max_iter, tol '\n 'or check for degenerate data.'\n % max_iter, g.fit, X)\n\n\ndef test_multiple_init():\n # Test that multiple inits does not much worse than a single one\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 50, 5, 2\n X = rng.randn(n_samples, n_features)\n for cv_type in COVARIANCE_TYPE:\n train1 = GaussianMixture(n_components=n_components,\n covariance_type=cv_type,\n random_state=rng).fit(X).score(X)\n train2 = GaussianMixture(n_components=n_components,\n covariance_type=cv_type,\n random_state=rng, n_init=5).fit(X).score(X)\n assert_greater_equal(train2, train1)\n\n\ndef test_gaussian_mixture_n_parameters():\n # Test that the right number of parameters is estimated\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 50, 5, 2\n X = rng.randn(n_samples, n_features)\n n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}\n for cv_type in COVARIANCE_TYPE:\n g = GaussianMixture(\n n_components=n_components, covariance_type=cv_type,\n random_state=rng).fit(X)\n assert_equal(g._n_parameters(), n_params[cv_type])\n\n\ndef test_bic_1d_1component():\n # Test all of the covariance_types return the same BIC score for\n # 1-dimensional, 1 component fits.\n rng = np.random.RandomState(0)\n n_samples, n_dim, n_components = 100, 1, 1\n X = rng.randn(n_samples, n_dim)\n bic_full = GaussianMixture(n_components=n_components,\n covariance_type='full',\n random_state=rng).fit(X).bic(X)\n for covariance_type in ['tied', 'diag', 'spherical']:\n bic = GaussianMixture(n_components=n_components,\n covariance_type=covariance_type,\n random_state=rng).fit(X).bic(X)\n assert_almost_equal(bic_full, bic)\n\n\ndef test_gaussian_mixture_aic_bic():\n # Test the aic and bic criteria\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 50, 3, 2\n X = rng.randn(n_samples, n_features)\n # standard gaussian entropy\n sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +\n n_features * (1 + np.log(2 * np.pi)))\n for cv_type in COVARIANCE_TYPE:\n g = GaussianMixture(\n n_components=n_components, covariance_type=cv_type,\n random_state=rng, max_iter=200)\n g.fit(X)\n aic = 2 * n_samples * sgh + 2 * g._n_parameters()\n bic = (2 * n_samples * sgh +\n np.log(n_samples) * g._n_parameters())\n bound = n_features / np.sqrt(n_samples)\n assert_true((g.aic(X) - aic) / n_samples < bound)\n assert_true((g.bic(X) - bic) / n_samples < bound)\n\n\ndef test_gaussian_mixture_verbose():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng)\n n_components = rand_data.n_components\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,\n random_state=rng, covariance_type=covar_type,\n verbose=1)\n h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,\n random_state=rng, covariance_type=covar_type,\n verbose=2)\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n g.fit(X)\n h.fit(X)\n finally:\n sys.stdout = old_stdout\n\n\ndef test_warm_start():\n\n random_state = 0\n rng = np.random.RandomState(random_state)\n n_samples, n_features, n_components = 500, 2, 2\n X = rng.rand(n_samples, n_features)\n\n # Assert the warm_start give the same result for the same number of iter\n g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2,\n reg_covar=0, random_state=random_state,\n warm_start=False)\n h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1,\n reg_covar=0, random_state=random_state,\n warm_start=True)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n g.fit(X)\n score1 = h.fit(X).score(X)\n score2 = h.fit(X).score(X)\n\n assert_almost_equal(g.weights_, h.weights_)\n assert_almost_equal(g.means_, h.means_)\n assert_almost_equal(g.precisions_, h.precisions_)\n assert_greater(score2, score1)\n\n # Assert that by using warm_start we can converge to a good solution\n g = GaussianMixture(n_components=n_components, n_init=1,\n max_iter=5, reg_covar=0, random_state=random_state,\n warm_start=False, tol=1e-6)\n h = GaussianMixture(n_components=n_components, n_init=1,\n max_iter=5, reg_covar=0, random_state=random_state,\n warm_start=True, tol=1e-6)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n g.fit(X)\n h.fit(X).fit(X)\n\n assert_true(not g.converged_)\n assert_true(h.converged_)\n\n\ndef test_score():\n covar_type = 'full'\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=7)\n n_components = rand_data.n_components\n X = rand_data.X[covar_type]\n\n # Check the error message if we don't call fit\n gmm1 = GaussianMixture(n_components=n_components, n_init=1,\n max_iter=1, reg_covar=0, random_state=rng,\n covariance_type=covar_type)\n assert_raise_message(NotFittedError,\n \"This GaussianMixture instance is not fitted \"\n \"yet. Call 'fit' with appropriate arguments \"\n \"before using this method.\", gmm1.score, X)\n\n # Check score value\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n gmm1.fit(X)\n gmm_score = gmm1.score(X)\n gmm_score_proba = gmm1.score_samples(X).mean()\n assert_almost_equal(gmm_score, gmm_score_proba)\n\n # Check if the score increase\n gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,\n random_state=rng,\n covariance_type=covar_type).fit(X)\n assert_greater(gmm2.score(X), gmm1.score(X))\n\n\ndef test_score_samples():\n covar_type = 'full'\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=7)\n n_components = rand_data.n_components\n X = rand_data.X[covar_type]\n\n # Check the error message if we don't call fit\n gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,\n random_state=rng, covariance_type=covar_type)\n assert_raise_message(NotFittedError,\n \"This GaussianMixture instance is not fitted \"\n \"yet. Call 'fit' with appropriate arguments \"\n \"before using this method.\", gmm.score_samples, X)\n\n gmm_score_samples = gmm.fit(X).score_samples(X)\n assert_equal(gmm_score_samples.shape[0], rand_data.n_samples)\n\n\ndef test_monotonic_likelihood():\n # We check that each step of the EM without regularization improve\n # monotonically the training set likelihood\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=7)\n n_components = rand_data.n_components\n\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n gmm = GaussianMixture(n_components=n_components,\n covariance_type=covar_type, reg_covar=0,\n warm_start=True, max_iter=1, random_state=rng,\n tol=1e-7)\n current_log_likelihood = -np.infty\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ConvergenceWarning)\n # Do one training iteration at a time so we can make sure that the\n # training log likelihood increases after each iteration.\n for _ in range(600):\n prev_log_likelihood = current_log_likelihood\n try:\n current_log_likelihood = gmm.fit(X).score(X)\n except ConvergenceWarning:\n pass\n assert_greater_equal(current_log_likelihood,\n prev_log_likelihood)\n\n if gmm.converged_:\n break\n\n assert_true(gmm.converged_)\n\n\ndef test_regularisation():\n # We train the GaussianMixture on degenerate data by defining two clusters\n # of a 0 covariance.\n rng = np.random.RandomState(0)\n n_samples, n_features = 10, 5\n\n X = np.vstack((np.ones((n_samples // 2, n_features)),\n np.zeros((n_samples // 2, n_features))))\n\n for covar_type in COVARIANCE_TYPE:\n gmm = GaussianMixture(n_components=n_samples, reg_covar=0,\n covariance_type=covar_type, random_state=rng)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_raise_message(ValueError,\n \"Fitting the mixture model failed because \"\n \"some components have ill-defined empirical \"\n \"covariance (for instance caused by \"\n \"singleton or collapsed samples). Try to \"\n \"decrease the number of components, or \"\n \"increase reg_covar.\", gmm.fit, X)\n\n gmm.set_params(reg_covar=1e-6).fit(X)\n\n\ndef test_property():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=7)\n n_components = rand_data.n_components\n\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n gmm = GaussianMixture(n_components=n_components,\n covariance_type=covar_type, random_state=rng,\n n_init=5)\n gmm.fit(X)\n if covar_type == 'full':\n for prec, covar in zip(gmm.precisions_, gmm.covariances_):\n\n assert_array_almost_equal(linalg.inv(prec), covar)\n elif covar_type == 'tied':\n assert_array_almost_equal(linalg.inv(gmm.precisions_),\n gmm.covariances_)\n else:\n assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)\n\n\ndef test_sample():\n rng = np.random.RandomState(0)\n rand_data = RandomData(rng, scale=7, n_components=3)\n n_features, n_components = rand_data.n_features, rand_data.n_components\n\n for covar_type in COVARIANCE_TYPE:\n X = rand_data.X[covar_type]\n\n gmm = GaussianMixture(n_components=n_components,\n covariance_type=covar_type, random_state=rng)\n # To sample we need that GaussianMixture is fitted\n assert_raise_message(NotFittedError, \"This GaussianMixture instance \"\n \"is not fitted\", gmm.sample, 0)\n gmm.fit(X)\n\n assert_raise_message(ValueError, \"Invalid value for 'n_samples\",\n gmm.sample, 0)\n\n # Just to make sure the class samples correctly\n n_samples = 20000\n X_s, y_s = gmm.sample(n_samples)\n\n for k in range(n_components):\n if covar_type == 'full':\n assert_array_almost_equal(gmm.covariances_[k],\n np.cov(X_s[y_s == k].T), decimal=1)\n elif covar_type == 'tied':\n assert_array_almost_equal(gmm.covariances_,\n np.cov(X_s[y_s == k].T), decimal=1)\n elif covar_type == 'diag':\n assert_array_almost_equal(gmm.covariances_[k],\n np.diag(np.cov(X_s[y_s == k].T)),\n decimal=1)\n else:\n assert_array_almost_equal(\n gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]),\n decimal=1)\n\n means_s = np.array([np.mean(X_s[y_s == k], 0)\n for k in range(n_components)])\n assert_array_almost_equal(gmm.means_, means_s, decimal=1)\n\n # Check shapes of sampled data, see\n # https://github.com/scikit-learn/scikit-learn/issues/7701\n assert_equal(X_s.shape, (n_samples, n_features))\n\n for sample_size in range(1, 100):\n X_s, _ = gmm.sample(sample_size)\n assert_equal(X_s.shape, (sample_size, n_features))\n\n\n@ignore_warnings(category=ConvergenceWarning)\ndef test_init():\n # We check that by increasing the n_init number we have a better solution\n random_state = 0\n rand_data = RandomData(np.random.RandomState(random_state), scale=1)\n n_components = rand_data.n_components\n X = rand_data.X['full']\n\n gmm1 = GaussianMixture(n_components=n_components, n_init=1,\n max_iter=1, random_state=random_state).fit(X)\n gmm2 = GaussianMixture(n_components=n_components, n_init=100,\n max_iter=1, random_state=random_state).fit(X)\n\n assert_greater(gmm2.lower_bound_, gmm1.lower_bound_)\n",
"\"\"\"Testing for K-means\"\"\"\nimport sys\n\nimport numpy as np\nfrom scipy import sparse as sp\n\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import SkipTest\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_raises_regex\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.utils.testing import if_safe_multiprocessing_with_blas\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.validation import _num_samples\nfrom sklearn.base import clone\nfrom sklearn.exceptions import ConvergenceWarning\n\nfrom sklearn.utils.extmath import row_norms\nfrom sklearn.metrics.cluster import v_measure_score\nfrom sklearn.cluster import KMeans, k_means\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.cluster.k_means_ import _labels_inertia\nfrom sklearn.cluster.k_means_ import _mini_batch_step\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.externals.six.moves import cStringIO as StringIO\nfrom sklearn.metrics.cluster import homogeneity_score\n\n\n# non centered, sparse centers to check the\ncenters = np.array([\n [0.0, 5.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 4.0, 0.0, 0.0],\n [1.0, 0.0, 0.0, 5.0, 1.0],\n])\nn_samples = 100\nn_clusters, n_features = centers.shape\nX, true_labels = make_blobs(n_samples=n_samples, centers=centers,\n cluster_std=1., random_state=42)\nX_csr = sp.csr_matrix(X)\n\n\ndef test_elkan_results():\n rnd = np.random.RandomState(0)\n X_normal = rnd.normal(size=(50, 10))\n X_blobs, _ = make_blobs(random_state=0)\n km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)\n km_elkan = KMeans(algorithm='elkan', n_clusters=5,\n random_state=0, n_init=1)\n for X in [X_normal, X_blobs]:\n km_full.fit(X)\n km_elkan.fit(X)\n assert_array_almost_equal(km_elkan.cluster_centers_,\n km_full.cluster_centers_)\n assert_array_equal(km_elkan.labels_, km_full.labels_)\n\n\ndef test_labels_assignment_and_inertia():\n # pure numpy implementation as easily auditable reference gold\n # implementation\n rng = np.random.RandomState(42)\n noisy_centers = centers + rng.normal(size=centers.shape)\n labels_gold = - np.ones(n_samples, dtype=np.int)\n mindist = np.empty(n_samples)\n mindist.fill(np.infty)\n for center_id in range(n_clusters):\n dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)\n labels_gold[dist < mindist] = center_id\n mindist = np.minimum(dist, mindist)\n inertia_gold = mindist.sum()\n assert_true((mindist >= 0.0).all())\n assert_true((labels_gold != -1).all())\n\n sample_weight = None\n\n # perform label assignment using the dense array input\n x_squared_norms = (X ** 2).sum(axis=1)\n labels_array, inertia_array = _labels_inertia(\n X, sample_weight, x_squared_norms, noisy_centers)\n assert_array_almost_equal(inertia_array, inertia_gold)\n assert_array_equal(labels_array, labels_gold)\n\n # perform label assignment using the sparse CSR input\n x_squared_norms_from_csr = row_norms(X_csr, squared=True)\n labels_csr, inertia_csr = _labels_inertia(\n X_csr, sample_weight, x_squared_norms_from_csr, noisy_centers)\n assert_array_almost_equal(inertia_csr, inertia_gold)\n assert_array_equal(labels_csr, labels_gold)\n\n\ndef test_minibatch_update_consistency():\n # Check that dense and sparse minibatch update give the same results\n rng = np.random.RandomState(42)\n old_centers = centers + rng.normal(size=centers.shape)\n\n new_centers = old_centers.copy()\n new_centers_csr = old_centers.copy()\n\n weight_sums = np.zeros(new_centers.shape[0], dtype=np.double)\n weight_sums_csr = np.zeros(new_centers.shape[0], dtype=np.double)\n\n x_squared_norms = (X ** 2).sum(axis=1)\n x_squared_norms_csr = row_norms(X_csr, squared=True)\n\n buffer = np.zeros(centers.shape[1], dtype=np.double)\n buffer_csr = np.zeros(centers.shape[1], dtype=np.double)\n\n # extract a small minibatch\n X_mb = X[:10]\n X_mb_csr = X_csr[:10]\n x_mb_squared_norms = x_squared_norms[:10]\n x_mb_squared_norms_csr = x_squared_norms_csr[:10]\n\n sample_weight_mb = np.ones(X_mb.shape[0], dtype=np.double)\n\n # step 1: compute the dense minibatch update\n old_inertia, incremental_diff = _mini_batch_step(\n X_mb, sample_weight_mb, x_mb_squared_norms, new_centers, weight_sums,\n buffer, 1, None, random_reassign=False)\n assert_greater(old_inertia, 0.0)\n\n # compute the new inertia on the same batch to check that it decreased\n labels, new_inertia = _labels_inertia(\n X_mb, sample_weight_mb, x_mb_squared_norms, new_centers)\n assert_greater(new_inertia, 0.0)\n assert_less(new_inertia, old_inertia)\n\n # check that the incremental difference computation is matching the\n # final observed value\n effective_diff = np.sum((new_centers - old_centers) ** 2)\n assert_almost_equal(incremental_diff, effective_diff)\n\n # step 2: compute the sparse minibatch update\n old_inertia_csr, incremental_diff_csr = _mini_batch_step(\n X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr,\n weight_sums_csr, buffer_csr, 1, None, random_reassign=False)\n assert_greater(old_inertia_csr, 0.0)\n\n # compute the new inertia on the same batch to check that it decreased\n labels_csr, new_inertia_csr = _labels_inertia(\n X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr)\n assert_greater(new_inertia_csr, 0.0)\n assert_less(new_inertia_csr, old_inertia_csr)\n\n # check that the incremental difference computation is matching the\n # final observed value\n effective_diff = np.sum((new_centers_csr - old_centers) ** 2)\n assert_almost_equal(incremental_diff_csr, effective_diff)\n\n # step 3: check that sparse and dense updates lead to the same results\n assert_array_equal(labels, labels_csr)\n assert_array_almost_equal(new_centers, new_centers_csr)\n assert_almost_equal(incremental_diff, incremental_diff_csr)\n assert_almost_equal(old_inertia, old_inertia_csr)\n assert_almost_equal(new_inertia, new_inertia_csr)\n\n\ndef _check_fitted_model(km):\n # check that the number of clusters centers and distinct labels match\n # the expectation\n centers = km.cluster_centers_\n assert_equal(centers.shape, (n_clusters, n_features))\n\n labels = km.labels_\n assert_equal(np.unique(labels).shape[0], n_clusters)\n\n # check that the labels assignment are perfect (up to a permutation)\n assert_equal(v_measure_score(true_labels, labels), 1.0)\n assert_greater(km.inertia_, 0.0)\n\n # check error on dataset being too small\n assert_raise_message(ValueError, \"n_samples=1 should be >= n_clusters=%d\"\n % km.n_clusters, km.fit, [[0., 1.]])\n\n\ndef test_k_means_plus_plus_init():\n km = KMeans(init=\"k-means++\", n_clusters=n_clusters,\n random_state=42).fit(X)\n _check_fitted_model(km)\n\n\ndef test_k_means_new_centers():\n # Explore the part of the code where a new center is reassigned\n X = np.array([[0, 0, 1, 1],\n [0, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 1, 0, 0]])\n labels = [0, 1, 2, 1, 1, 2]\n bad_centers = np.array([[+0, 1, 0, 0],\n [.2, 0, .2, .2],\n [+0, 0, 0, 0]])\n\n km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,\n random_state=1)\n for this_X in (X, sp.coo_matrix(X)):\n km.fit(this_X)\n this_labels = km.labels_\n # Reorder the labels so that the first instance is in cluster 0,\n # the second in cluster 1, ...\n this_labels = np.unique(this_labels, return_index=True)[1][this_labels]\n np.testing.assert_array_equal(this_labels, labels)\n\n\n@if_safe_multiprocessing_with_blas\ndef test_k_means_plus_plus_init_2_jobs():\n if sys.version_info[:2] < (3, 4):\n raise SkipTest(\n \"Possible multi-process bug with some BLAS under Python < 3.4\")\n\n km = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_jobs=2,\n random_state=42).fit(X)\n _check_fitted_model(km)\n\n\ndef test_k_means_precompute_distances_flag():\n # check that a warning is raised if the precompute_distances flag is not\n # supported\n km = KMeans(precompute_distances=\"wrong\")\n assert_raises(ValueError, km.fit, X)\n\n\ndef test_k_means_plus_plus_init_sparse():\n km = KMeans(init=\"k-means++\", n_clusters=n_clusters, random_state=42)\n km.fit(X_csr)\n _check_fitted_model(km)\n\n\ndef test_k_means_random_init():\n km = KMeans(init=\"random\", n_clusters=n_clusters, random_state=42)\n km.fit(X)\n _check_fitted_model(km)\n\n\ndef test_k_means_random_init_sparse():\n km = KMeans(init=\"random\", n_clusters=n_clusters, random_state=42)\n km.fit(X_csr)\n _check_fitted_model(km)\n\n\ndef test_k_means_plus_plus_init_not_precomputed():\n km = KMeans(init=\"k-means++\", n_clusters=n_clusters, random_state=42,\n precompute_distances=False).fit(X)\n _check_fitted_model(km)\n\n\ndef test_k_means_random_init_not_precomputed():\n km = KMeans(init=\"random\", n_clusters=n_clusters, random_state=42,\n precompute_distances=False).fit(X)\n _check_fitted_model(km)\n\n\ndef test_k_means_perfect_init():\n km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,\n n_init=1)\n km.fit(X)\n _check_fitted_model(km)\n\n\ndef test_k_means_n_init():\n rnd = np.random.RandomState(0)\n X = rnd.normal(size=(40, 2))\n\n # two regression tests on bad n_init argument\n # previous bug: n_init <= 0 threw non-informative TypeError (#3858)\n assert_raises_regex(ValueError, \"n_init\", KMeans(n_init=0).fit, X)\n assert_raises_regex(ValueError, \"n_init\", KMeans(n_init=-1).fit, X)\n\n\ndef test_k_means_explicit_init_shape():\n # test for sensible errors when giving explicit init\n # with wrong number of features or clusters\n rnd = np.random.RandomState(0)\n X = rnd.normal(size=(40, 3))\n for Class in [KMeans, MiniBatchKMeans]:\n # mismatch of number of features\n km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))\n msg = \"does not match the number of features of the data\"\n assert_raises_regex(ValueError, msg, km.fit, X)\n # for callable init\n km = Class(n_init=1,\n init=lambda X_, k, random_state: X_[:, :2],\n n_clusters=len(X))\n assert_raises_regex(ValueError, msg, km.fit, X)\n # mismatch of number of clusters\n msg = \"does not match the number of clusters\"\n km = Class(n_init=1, init=X[:2, :], n_clusters=3)\n assert_raises_regex(ValueError, msg, km.fit, X)\n # for callable init\n km = Class(n_init=1,\n init=lambda X_, k, random_state: X_[:2, :],\n n_clusters=3)\n assert_raises_regex(ValueError, msg, km.fit, X)\n\n\ndef test_k_means_fortran_aligned_data():\n # Check the KMeans will work well, even if X is a fortran-aligned data.\n X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])\n centers = np.array([[0, 0], [0, 1]])\n labels = np.array([0, 1, 1])\n km = KMeans(n_init=1, init=centers, precompute_distances=False,\n random_state=42, n_clusters=2)\n km.fit(X)\n assert_array_almost_equal(km.cluster_centers_, centers)\n assert_array_equal(km.labels_, labels)\n\n\ndef test_mb_k_means_plus_plus_init_dense_array():\n mb_k_means = MiniBatchKMeans(init=\"k-means++\", n_clusters=n_clusters,\n random_state=42)\n mb_k_means.fit(X)\n _check_fitted_model(mb_k_means)\n\n\ndef test_mb_kmeans_verbose():\n mb_k_means = MiniBatchKMeans(init=\"k-means++\", n_clusters=n_clusters,\n random_state=42, verbose=1)\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n mb_k_means.fit(X)\n finally:\n sys.stdout = old_stdout\n\n\ndef test_mb_k_means_plus_plus_init_sparse_matrix():\n mb_k_means = MiniBatchKMeans(init=\"k-means++\", n_clusters=n_clusters,\n random_state=42)\n mb_k_means.fit(X_csr)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_init_with_large_k():\n mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)\n # Check that a warning is raised, as the number clusters is larger\n # than the init_size\n assert_warns(RuntimeWarning, mb_k_means.fit, X)\n\n\ndef test_minibatch_k_means_random_init_dense_array():\n # increase n_init to make random init stable enough\n mb_k_means = MiniBatchKMeans(init=\"random\", n_clusters=n_clusters,\n random_state=42, n_init=10).fit(X)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_k_means_random_init_sparse_csr():\n # increase n_init to make random init stable enough\n mb_k_means = MiniBatchKMeans(init=\"random\", n_clusters=n_clusters,\n random_state=42, n_init=10).fit(X_csr)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_k_means_perfect_init_dense_array():\n mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,\n random_state=42, n_init=1).fit(X)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_k_means_init_multiple_runs_with_explicit_centers():\n mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,\n random_state=42, n_init=10)\n assert_warns(RuntimeWarning, mb_k_means.fit, X)\n\n\ndef test_minibatch_k_means_perfect_init_sparse_csr():\n mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,\n random_state=42, n_init=1).fit(X_csr)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_sensible_reassign_fit():\n # check if identical initial clusters are reassigned\n # also a regression test for when there are more desired reassignments than\n # samples.\n zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,\n cluster_std=1., random_state=42)\n zeroed_X[::2, :] = 0\n mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,\n init=\"random\")\n mb_k_means.fit(zeroed_X)\n # there should not be too many exact zero cluster centers\n assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)\n\n # do the same with batch-size > X.shape[0] (regression test)\n mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,\n random_state=42, init=\"random\")\n mb_k_means.fit(zeroed_X)\n # there should not be too many exact zero cluster centers\n assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)\n\n\ndef test_minibatch_sensible_reassign_partial_fit():\n zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,\n cluster_std=1., random_state=42)\n zeroed_X[::2, :] = 0\n mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init=\"random\")\n for i in range(100):\n mb_k_means.partial_fit(zeroed_X)\n # there should not be too many exact zero cluster centers\n assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)\n\n\ndef test_minibatch_reassign():\n # Give a perfect initialization, but a large reassignment_ratio,\n # as a result all the centers should be reassigned and the model\n # should no longer be good\n sample_weight = np.ones(X.shape[0], dtype=X.dtype)\n for this_X in (X, X_csr):\n mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,\n random_state=42)\n mb_k_means.fit(this_X)\n\n score_before = mb_k_means.score(this_X)\n try:\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n # Turn on verbosity to smoke test the display code\n _mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1),\n mb_k_means.cluster_centers_,\n mb_k_means.counts_,\n np.zeros(X.shape[1], np.double),\n False, distances=np.zeros(X.shape[0]),\n random_reassign=True, random_state=42,\n reassignment_ratio=1, verbose=True)\n finally:\n sys.stdout = old_stdout\n assert_greater(score_before, mb_k_means.score(this_X))\n\n # Give a perfect initialization, with a small reassignment_ratio,\n # no center should be reassigned\n for this_X in (X, X_csr):\n mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,\n init=centers.copy(),\n random_state=42, n_init=1)\n mb_k_means.fit(this_X)\n clusters_before = mb_k_means.cluster_centers_\n # Turn on verbosity to smoke test the display code\n _mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1),\n mb_k_means.cluster_centers_,\n mb_k_means.counts_,\n np.zeros(X.shape[1], np.double),\n False, distances=np.zeros(X.shape[0]),\n random_reassign=True, random_state=42,\n reassignment_ratio=1e-15)\n assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)\n\n\ndef test_minibatch_with_many_reassignments():\n # Test for the case that the number of clusters to reassign is bigger\n # than the batch_size\n n_samples = 550\n rnd = np.random.RandomState(42)\n X = rnd.uniform(size=(n_samples, 10))\n # Check that the fit works if n_clusters is bigger than the batch_size.\n # Run the test with 550 clusters and 550 samples, because it turned out\n # that this values ensure that the number of clusters to reassign\n # is always bigger than the batch_size\n n_clusters = 550\n MiniBatchKMeans(n_clusters=n_clusters,\n batch_size=100,\n init_size=n_samples,\n random_state=42).fit(X)\n\n\ndef test_sparse_mb_k_means_callable_init():\n\n def test_init(X, k, random_state):\n return centers\n\n # Small test to check that giving the wrong number of centers\n # raises a meaningful error\n msg = \"does not match the number of clusters\"\n assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,\n random_state=42).fit,\n X_csr)\n\n # Now check that the fit actually works\n mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,\n random_state=42).fit(X_csr)\n _check_fitted_model(mb_k_means)\n\n\ndef test_mini_batch_k_means_random_init_partial_fit():\n km = MiniBatchKMeans(n_clusters=n_clusters, init=\"random\", random_state=42)\n\n # use the partial_fit API for online learning\n for X_minibatch in np.array_split(X, 10):\n km.partial_fit(X_minibatch)\n\n # compute the labeling on the complete dataset\n labels = km.predict(X)\n assert_equal(v_measure_score(true_labels, labels), 1.0)\n\n\ndef test_minibatch_default_init_size():\n mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,\n batch_size=10, random_state=42,\n n_init=1).fit(X)\n assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_tol():\n mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,\n random_state=42, tol=.01).fit(X)\n _check_fitted_model(mb_k_means)\n\n\ndef test_minibatch_set_init_size():\n mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,\n init_size=666, random_state=42,\n n_init=1).fit(X)\n assert_equal(mb_k_means.init_size, 666)\n assert_equal(mb_k_means.init_size_, n_samples)\n _check_fitted_model(mb_k_means)\n\n\ndef test_k_means_invalid_init():\n km = KMeans(init=\"invalid\", n_init=1, n_clusters=n_clusters)\n assert_raises(ValueError, km.fit, X)\n\n\ndef test_mini_match_k_means_invalid_init():\n km = MiniBatchKMeans(init=\"invalid\", n_init=1, n_clusters=n_clusters)\n assert_raises(ValueError, km.fit, X)\n\n\ndef test_k_means_copyx():\n # Check if copy_x=False returns nearly equal X after de-centering.\n my_X = X.copy()\n km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)\n km.fit(my_X)\n _check_fitted_model(km)\n\n # check if my_X is centered\n assert_array_almost_equal(my_X, X)\n\n\ndef test_k_means_non_collapsed():\n # Check k_means with a bad initialization does not yield a singleton\n # Starting with bad centers that are quickly ignored should not\n # result in a repositioning of the centers to the center of mass that\n # would lead to collapsed centers which in turns make the clustering\n # dependent of the numerical unstabilities.\n my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])\n array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])\n km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)\n km.fit(my_X)\n\n # centers must not been collapsed\n assert_equal(len(np.unique(km.labels_)), 3)\n\n centers = km.cluster_centers_\n assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)\n assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)\n assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)\n\n\ndef test_predict():\n km = KMeans(n_clusters=n_clusters, random_state=42)\n\n km.fit(X)\n\n # sanity check: predict centroid labels\n pred = km.predict(km.cluster_centers_)\n assert_array_equal(pred, np.arange(n_clusters))\n\n # sanity check: re-predict labeling for training set samples\n pred = km.predict(X)\n assert_array_equal(pred, km.labels_)\n\n # re-predict labels for training set using fit_predict\n pred = km.fit_predict(X)\n assert_array_equal(pred, km.labels_)\n\n\ndef test_score():\n\n km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)\n s1 = km1.fit(X).score(X)\n km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)\n s2 = km2.fit(X).score(X)\n assert_greater(s2, s1)\n\n km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,\n algorithm='elkan')\n s1 = km1.fit(X).score(X)\n km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,\n algorithm='elkan')\n s2 = km2.fit(X).score(X)\n assert_greater(s2, s1)\n\n\ndef test_predict_minibatch_dense_input():\n mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)\n\n # sanity check: predict centroid labels\n pred = mb_k_means.predict(mb_k_means.cluster_centers_)\n assert_array_equal(pred, np.arange(n_clusters))\n\n # sanity check: re-predict labeling for training set samples\n pred = mb_k_means.predict(X)\n assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)\n\n\ndef test_predict_minibatch_kmeanspp_init_sparse_input():\n mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',\n n_init=10).fit(X_csr)\n\n # sanity check: re-predict labeling for training set samples\n assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)\n\n # sanity check: predict centroid labels\n pred = mb_k_means.predict(mb_k_means.cluster_centers_)\n assert_array_equal(pred, np.arange(n_clusters))\n\n # check that models trained on sparse input also works for dense input at\n # predict time\n assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)\n\n\ndef test_predict_minibatch_random_init_sparse_input():\n mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',\n n_init=10).fit(X_csr)\n\n # sanity check: re-predict labeling for training set samples\n assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)\n\n # sanity check: predict centroid labels\n pred = mb_k_means.predict(mb_k_means.cluster_centers_)\n assert_array_equal(pred, np.arange(n_clusters))\n\n # check that models trained on sparse input also works for dense input at\n # predict time\n assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)\n\n\ndef test_int_input():\n X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]\n for dtype in [np.int32, np.int64]:\n X_int = np.array(X_list, dtype=dtype)\n X_int_csr = sp.csr_matrix(X_int)\n init_int = X_int[:2]\n\n fitted_models = [\n KMeans(n_clusters=2).fit(X_int),\n KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),\n # mini batch kmeans is very unstable on such a small dataset hence\n # we use many inits\n MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),\n MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(\n X_int_csr),\n MiniBatchKMeans(n_clusters=2, batch_size=2,\n init=init_int, n_init=1).fit(X_int),\n MiniBatchKMeans(n_clusters=2, batch_size=2,\n init=init_int, n_init=1).fit(X_int_csr),\n ]\n\n for km in fitted_models:\n assert_equal(km.cluster_centers_.dtype, np.float64)\n\n expected_labels = [0, 1, 1, 0, 0, 1]\n scores = np.array([v_measure_score(expected_labels, km.labels_)\n for km in fitted_models])\n assert_array_almost_equal(scores, np.ones(scores.shape[0]))\n\n\ndef test_transform():\n km = KMeans(n_clusters=n_clusters)\n km.fit(X)\n X_new = km.transform(km.cluster_centers_)\n\n for c in range(n_clusters):\n assert_equal(X_new[c, c], 0)\n for c2 in range(n_clusters):\n if c != c2:\n assert_greater(X_new[c, c2], 0)\n\n\ndef test_fit_transform():\n X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)\n X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)\n assert_array_almost_equal(X1, X2)\n\n\ndef test_predict_equal_labels():\n km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,\n algorithm='full')\n km.fit(X)\n assert_array_equal(km.predict(X), km.labels_)\n\n km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,\n algorithm='elkan')\n km.fit(X)\n assert_array_equal(km.predict(X), km.labels_)\n\n\ndef test_full_vs_elkan():\n\n km1 = KMeans(algorithm='full', random_state=13)\n km2 = KMeans(algorithm='elkan', random_state=13)\n\n km1.fit(X)\n km2.fit(X)\n\n homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0\n\n\ndef test_n_init():\n # Check that increasing the number of init increases the quality\n n_runs = 5\n n_init_range = [1, 5, 10]\n inertia = np.zeros((len(n_init_range), n_runs))\n for i, n_init in enumerate(n_init_range):\n for j in range(n_runs):\n km = KMeans(n_clusters=n_clusters, init=\"random\", n_init=n_init,\n random_state=j).fit(X)\n inertia[i, j] = km.inertia_\n\n inertia = inertia.mean(axis=1)\n failure_msg = (\"Inertia %r should be decreasing\"\n \" when n_init is increasing.\") % list(inertia)\n for i in range(len(n_init_range) - 1):\n assert_true(inertia[i] >= inertia[i + 1], failure_msg)\n\n\ndef test_k_means_function():\n # test calling the k_means function directly\n # catch output\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,\n sample_weight=None,\n verbose=True)\n finally:\n sys.stdout = old_stdout\n centers = cluster_centers\n assert_equal(centers.shape, (n_clusters, n_features))\n\n labels = labels\n assert_equal(np.unique(labels).shape[0], n_clusters)\n\n # check that the labels assignment are perfect (up to a permutation)\n assert_equal(v_measure_score(true_labels, labels), 1.0)\n assert_greater(inertia, 0.0)\n\n # check warning when centers are passed\n assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,\n sample_weight=None, init=centers)\n\n # to many clusters desired\n assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1,\n sample_weight=None)\n\n # kmeans for algorithm='elkan' raises TypeError on sparse matrix\n assert_raise_message(TypeError, \"algorithm='elkan' not supported for \"\n \"sparse input X\", k_means, X=X_csr, n_clusters=2,\n sample_weight=None, algorithm=\"elkan\")\n\n\ndef test_x_squared_norms_init_centroids():\n \"\"\"Test that x_squared_norms can be None in _init_centroids\"\"\"\n from sklearn.cluster.k_means_ import _init_centroids\n\n X_norms = np.sum(X**2, axis=1)\n precompute = _init_centroids(\n X, 3, \"k-means++\", random_state=0, x_squared_norms=X_norms)\n assert_array_almost_equal(\n precompute,\n _init_centroids(X, 3, \"k-means++\", random_state=0))\n\n\ndef test_max_iter_error():\n\n km = KMeans(max_iter=-1)\n assert_raise_message(ValueError, 'Number of iterations should be',\n km.fit, X)\n\n\ndef test_float_precision():\n km = KMeans(n_init=1, random_state=30)\n mb_km = MiniBatchKMeans(n_init=1, random_state=30)\n\n inertia = {}\n X_new = {}\n centers = {}\n\n for estimator in [km, mb_km]:\n for is_sparse in [False, True]:\n for dtype in [np.float64, np.float32]:\n if is_sparse:\n X_test = sp.csr_matrix(X_csr, dtype=dtype)\n else:\n X_test = X.astype(dtype)\n estimator.fit(X_test)\n # dtype of cluster centers has to be the dtype of the input\n # data\n assert_equal(estimator.cluster_centers_.dtype, dtype)\n inertia[dtype] = estimator.inertia_\n X_new[dtype] = estimator.transform(X_test)\n centers[dtype] = estimator.cluster_centers_\n # ensure the extracted row is a 2d array\n assert_equal(estimator.predict(X_test[:1]),\n estimator.labels_[0])\n if hasattr(estimator, 'partial_fit'):\n estimator.partial_fit(X_test[0:3])\n # dtype of cluster centers has to stay the same after\n # partial_fit\n assert_equal(estimator.cluster_centers_.dtype, dtype)\n\n # compare arrays with low precision since the difference between\n # 32 and 64 bit sometimes makes a difference up to the 4th decimal\n # place\n assert_array_almost_equal(inertia[np.float32], inertia[np.float64],\n decimal=4)\n assert_array_almost_equal(X_new[np.float32], X_new[np.float64],\n decimal=4)\n assert_array_almost_equal(centers[np.float32], centers[np.float64],\n decimal=4)\n\n\ndef test_k_means_init_centers():\n # This test is used to check KMeans won't mutate the user provided input\n # array silently even if input data and init centers have the same type\n X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])\n init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])\n for dtype in [np.int32, np.int64, np.float32, np.float64]:\n X_test = dtype(X_small)\n init_centers_test = dtype(init_centers)\n assert_array_equal(init_centers, init_centers_test)\n km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)\n km.fit(X_test)\n assert_equal(False, np.may_share_memory(km.cluster_centers_,\n init_centers))\n\n\ndef test_sparse_k_means_init_centers():\n from sklearn.datasets import load_iris\n\n iris = load_iris()\n X = iris.data\n\n # Get a local optimum\n centers = KMeans(n_clusters=3).fit(X).cluster_centers_\n\n # Fit starting from a local optimum shouldn't change the solution\n np.testing.assert_allclose(\n centers,\n KMeans(n_clusters=3,\n init=centers,\n n_init=1).fit(X).cluster_centers_\n )\n\n # The same should be true when X is sparse\n X_sparse = sp.csr_matrix(X)\n np.testing.assert_allclose(\n centers,\n KMeans(n_clusters=3,\n init=centers,\n n_init=1).fit(X_sparse).cluster_centers_\n )\n\n\ndef test_sparse_validate_centers():\n from sklearn.datasets import load_iris\n\n iris = load_iris()\n X = iris.data\n\n # Get a local optimum\n centers = KMeans(n_clusters=4).fit(X).cluster_centers_\n\n # Test that a ValueError is raised for validate_center_shape\n classifier = KMeans(n_clusters=3, init=centers, n_init=1)\n\n msg = \"The shape of the initial centers \\(\\(4L?, 4L?\\)\\) \" \\\n \"does not match the number of clusters 3\"\n assert_raises_regex(ValueError, msg, classifier.fit, X)\n\n\ndef test_less_centers_than_unique_points():\n X = np.asarray([[0, 0],\n [0, 1],\n [1, 0],\n [1, 0]]) # last point is duplicated\n\n km = KMeans(n_clusters=4).fit(X)\n\n # only three distinct points, so only three clusters\n # can have points assigned to them\n assert_equal(set(km.labels_), set(range(3)))\n\n # k_means should warn that fewer labels than cluster\n # centers have been used\n msg = (\"Number of distinct clusters (3) found smaller than \"\n \"n_clusters (4). Possibly due to duplicate points in X.\")\n assert_warns_message(ConvergenceWarning, msg, k_means, X,\n sample_weight=None, n_clusters=4)\n\n\ndef _sort_centers(centers):\n return np.sort(centers, axis=0)\n\n\ndef test_weighted_vs_repeated():\n # a sample weight of N should yield the same result as an N-fold\n # repetition of the sample\n sample_weight = np.random.randint(1, 5, size=n_samples)\n X_repeat = np.repeat(X, sample_weight, axis=0)\n estimators = [KMeans(init=\"k-means++\", n_clusters=n_clusters,\n random_state=42),\n KMeans(init=\"random\", n_clusters=n_clusters,\n random_state=42),\n KMeans(init=centers.copy(), n_clusters=n_clusters,\n random_state=42),\n MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,\n random_state=42)]\n for estimator in estimators:\n est_weighted = clone(estimator).fit(X, sample_weight=sample_weight)\n est_repeated = clone(estimator).fit(X_repeat)\n repeated_labels = np.repeat(est_weighted.labels_, sample_weight)\n assert_almost_equal(v_measure_score(est_repeated.labels_,\n repeated_labels), 1.0)\n if not isinstance(estimator, MiniBatchKMeans):\n assert_almost_equal(_sort_centers(est_weighted.cluster_centers_),\n _sort_centers(est_repeated.cluster_centers_))\n\n\ndef test_unit_weights_vs_no_weights():\n # not passing any sample weights should be equivalent\n # to all weights equal to one\n sample_weight = np.ones(n_samples)\n for estimator in [KMeans(n_clusters=n_clusters, random_state=42),\n MiniBatchKMeans(n_clusters=n_clusters, random_state=42)]:\n est_1 = clone(estimator).fit(X)\n est_2 = clone(estimator).fit(X, sample_weight=sample_weight)\n assert_almost_equal(v_measure_score(est_1.labels_, est_2.labels_), 1.0)\n assert_almost_equal(_sort_centers(est_1.cluster_centers_),\n _sort_centers(est_2.cluster_centers_))\n\n\ndef test_scaled_weights():\n # scaling all sample weights by a common factor\n # shouldn't change the result\n sample_weight = np.ones(n_samples)\n for estimator in [KMeans(n_clusters=n_clusters, random_state=42),\n MiniBatchKMeans(n_clusters=n_clusters, random_state=42)]:\n est_1 = clone(estimator).fit(X)\n est_2 = clone(estimator).fit(X, sample_weight=0.5*sample_weight)\n assert_almost_equal(v_measure_score(est_1.labels_, est_2.labels_), 1.0)\n assert_almost_equal(_sort_centers(est_1.cluster_centers_),\n _sort_centers(est_2.cluster_centers_))\n\n\ndef test_sample_weight_length():\n # check that an error is raised when passing sample weights\n # with an incompatible shape\n km = KMeans(n_clusters=n_clusters, random_state=42)\n assert_raises_regex(ValueError, 'len\\(sample_weight\\)', km.fit, X,\n sample_weight=np.ones(2))\n\n\ndef test_check_sample_weight():\n from sklearn.cluster.k_means_ import _check_sample_weight\n sample_weight = None\n checked_sample_weight = _check_sample_weight(X, sample_weight)\n assert_equal(_num_samples(X), _num_samples(checked_sample_weight))\n assert_almost_equal(checked_sample_weight.sum(), _num_samples(X))\n assert_equal(X.dtype, checked_sample_weight.dtype)\n",
"\"\"\" Principal Component Analysis\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Denis A. Engemann <[email protected]>\n# Michael Eickenberg <[email protected]>\n# Giorgio Patrini <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom math import log, sqrt\nimport numbers\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.special import gammaln\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import svds\n\nfrom ..externals import six\n\nfrom .base import _BasePCA\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils import deprecated\nfrom ..utils import check_random_state, as_float_array\nfrom ..utils import check_array\nfrom ..utils.extmath import fast_logdet, randomized_svd, svd_flip\nfrom ..utils.extmath import stable_cumsum\nfrom ..utils.validation import check_is_fitted\n\n\ndef _assess_dimension_(spectrum, rank, n_samples, n_features):\n \"\"\"Compute the likelihood of a rank ``rank`` dataset\n\n The dataset is assumed to be embedded in gaussian noise of shape(n,\n dimf) having spectrum ``spectrum``.\n\n Parameters\n ----------\n spectrum : array of shape (n)\n Data spectrum.\n rank : int\n Tested rank value.\n n_samples : int\n Number of samples.\n n_features : int\n Number of features.\n\n Returns\n -------\n ll : float,\n The log-likelihood\n\n Notes\n -----\n This implements the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n \"\"\"\n if rank > len(spectrum):\n raise ValueError(\"The tested rank cannot exceed the rank of the\"\n \" dataset\")\n\n pu = -rank * log(2.)\n for i in range(rank):\n pu += (gammaln((n_features - i) / 2.) -\n log(np.pi) * (n_features - i) / 2.)\n\n pl = np.sum(np.log(spectrum[:rank]))\n pl = -pl * n_samples / 2.\n\n if rank == n_features:\n pv = 0\n v = 1\n else:\n v = np.sum(spectrum[rank:]) / (n_features - rank)\n pv = -np.log(v) * n_samples * (n_features - rank) / 2.\n\n m = n_features * rank - rank * (rank + 1.) / 2.\n pp = log(2. * np.pi) * (m + rank + 1.) / 2.\n\n pa = 0.\n spectrum_ = spectrum.copy()\n spectrum_[rank:n_features] = v\n for i in range(rank):\n for j in range(i + 1, len(spectrum)):\n pa += log((spectrum[i] - spectrum[j]) *\n (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)\n\n ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.\n\n return ll\n\n\ndef _infer_dimension_(spectrum, n_samples, n_features):\n \"\"\"Infers the dimension of a dataset of shape (n_samples, n_features)\n\n The dataset is described by its spectrum `spectrum`.\n \"\"\"\n n_spectrum = len(spectrum)\n ll = np.empty(n_spectrum)\n for rank in range(n_spectrum):\n ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)\n return ll.argmax()\n\n\nclass PCA(_BasePCA):\n \"\"\"Principal component analysis (PCA)\n\n Linear dimensionality reduction using Singular Value Decomposition of the\n data to project it to a lower dimensional space.\n\n It uses the LAPACK implementation of the full SVD or a randomized truncated\n SVD by the method of Halko et al. 2009, depending on the shape of the input\n data and the number of components to extract.\n\n It can also use the scipy.sparse.linalg ARPACK implementation of the\n truncated SVD.\n\n Notice that this class does not support sparse input. See\n :class:`TruncatedSVD` for an alternative with sparse data.\n\n Read more in the :ref:`User Guide <PCA>`.\n\n Parameters\n ----------\n n_components : int, float, None or string\n Number of components to keep.\n if n_components is not set all components are kept::\n\n n_components == min(n_samples, n_features)\n\n If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka\\'s\n MLE is used to guess the dimension. Use of ``n_components == 'mle'``\n will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\n If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\n number of components such that the amount of variance that needs to be\n explained is greater than the percentage specified by n_components.\n\n If ``svd_solver == 'arpack'``, the number of components must be\n strictly less than the minimum of n_features and n_samples.\n\n Hence, the None case results in::\n\n n_components == min(n_samples, n_features) - 1\n\n copy : bool (default True)\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n whiten : bool, optional (default False)\n When True (False by default) the `components_` vectors are multiplied\n by the square root of n_samples and then divided by the singular values\n to ensure uncorrelated outputs with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n\n svd_solver : string {'auto', 'full', 'arpack', 'randomized'}\n auto :\n the solver is selected by a default policy based on `X.shape` and\n `n_components`: if the input data is larger than 500x500 and the\n number of components to extract is lower than 80% of the smallest\n dimension of the data, then the more efficient 'randomized'\n method is enabled. Otherwise the exact full SVD is computed and\n optionally truncated afterwards.\n full :\n run exact full SVD calling the standard LAPACK solver via\n `scipy.linalg.svd` and select the components by postprocessing\n arpack :\n run SVD truncated to n_components calling ARPACK solver via\n `scipy.sparse.linalg.svds`. It requires strictly\n 0 < n_components < min(X.shape)\n randomized :\n run randomized SVD by the method of Halko et al.\n\n .. versionadded:: 0.18.0\n\n tol : float >= 0, optional (default .0)\n Tolerance for singular values computed by svd_solver == 'arpack'.\n\n .. versionadded:: 0.18.0\n\n iterated_power : int >= 0, or 'auto', (default 'auto')\n Number of iterations for the power method computed by\n svd_solver == 'randomized'.\n\n .. versionadded:: 0.18.0\n\n random_state : int, RandomState instance or None, optional (default None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`. Used when ``svd_solver`` == 'arpack' or 'randomized'.\n\n .. versionadded:: 0.18.0\n\n Attributes\n ----------\n components_ : array, shape (n_components, n_features)\n Principal axes in feature space, representing the directions of\n maximum variance in the data. The components are sorted by\n ``explained_variance_``.\n\n explained_variance_ : array, shape (n_components,)\n The amount of variance explained by each of the selected components.\n\n Equal to n_components largest eigenvalues\n of the covariance matrix of X.\n\n .. versionadded:: 0.18\n\n explained_variance_ratio_ : array, shape (n_components,)\n Percentage of variance explained by each of the selected components.\n\n If ``n_components`` is not set then all components are stored and the\n sum of the ratios is equal to 1.0.\n\n singular_values_ : array, shape (n_components,)\n The singular values corresponding to each of the selected components.\n The singular values are equal to the 2-norms of the ``n_components``\n variables in the lower-dimensional space.\n\n mean_ : array, shape (n_features,)\n Per-feature empirical mean, estimated from the training set.\n\n Equal to `X.mean(axis=0)`.\n\n n_components_ : int\n The estimated number of components. When n_components is set\n to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this\n number is estimated from input data. Otherwise it equals the parameter\n n_components, or the lesser value of n_features and n_samples\n if n_components is None.\n\n noise_variance_ : float\n The estimated noise covariance following the Probabilistic PCA model\n from Tipping and Bishop 1999. See \"Pattern Recognition and\n Machine Learning\" by C. Bishop, 12.2.1 p. 574 or\n http://www.miketipping.com/papers/met-mppca.pdf. It is required to\n compute the estimated data covariance and score samples.\n\n Equal to the average of (min(n_features, n_samples) - n_components)\n smallest eigenvalues of the covariance matrix of X.\n\n References\n ----------\n For n_components == 'mle', this class uses the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n\n Implements the probabilistic PCA model from:\n M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,\n Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622\n via the score and score_samples methods.\n See http://www.miketipping.com/papers/met-mppca.pdf\n\n For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.\n\n For svd_solver == 'randomized', see:\n `Finding structure with randomness: Stochastic algorithms\n for constructing approximate matrix decompositions Halko, et al., 2009\n (arXiv:909)`\n `A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`\n\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.decomposition import PCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = PCA(n_components=2)\n >>> pca.fit(X)\n PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,\n svd_solver='auto', tol=0.0, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [0.9924... 0.0075...]\n >>> print(pca.singular_values_) # doctest: +ELLIPSIS\n [6.30061... 0.54980...]\n\n >>> pca = PCA(n_components=2, svd_solver='full')\n >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,\n svd_solver='full', tol=0.0, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [0.9924... 0.00755...]\n >>> print(pca.singular_values_) # doctest: +ELLIPSIS\n [6.30061... 0.54980...]\n\n >>> pca = PCA(n_components=1, svd_solver='arpack')\n >>> pca.fit(X)\n PCA(copy=True, iterated_power='auto', n_components=1, random_state=None,\n svd_solver='arpack', tol=0.0, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [0.99244...]\n >>> print(pca.singular_values_) # doctest: +ELLIPSIS\n [6.30061...]\n\n See also\n --------\n KernelPCA\n SparsePCA\n TruncatedSVD\n IncrementalPCA\n \"\"\"\n\n def __init__(self, n_components=None, copy=True, whiten=False,\n svd_solver='auto', tol=0.0, iterated_power='auto',\n random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n self.svd_solver = svd_solver\n self.tol = tol\n self.iterated_power = iterated_power\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(X)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n U, S, V = self._fit(X)\n U = U[:, :self.n_components_]\n\n if self.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= sqrt(X.shape[0] - 1)\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S[:self.n_components_]\n\n return U\n\n def _fit(self, X):\n \"\"\"Dispatch to the right submethod depending on the chosen solver.\"\"\"\n\n # Raise an error for sparse input.\n # This is more informative than the generic one raised by check_array.\n if issparse(X):\n raise TypeError('PCA does not support sparse input. See '\n 'TruncatedSVD for a possible alternative.')\n\n X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True,\n copy=self.copy)\n\n # Handle n_components==None\n if self.n_components is None:\n if self.svd_solver != 'arpack':\n n_components = min(X.shape)\n else:\n n_components = min(X.shape) - 1\n else:\n n_components = self.n_components\n\n # Handle svd_solver\n svd_solver = self.svd_solver\n if svd_solver == 'auto':\n # Small problem or n_components == 'mle', just call full PCA\n if max(X.shape) <= 500 or n_components == 'mle':\n svd_solver = 'full'\n elif n_components >= 1 and n_components < .8 * min(X.shape):\n svd_solver = 'randomized'\n # This is also the case of n_components in (0,1)\n else:\n svd_solver = 'full'\n\n # Call different fits for either full or truncated SVD\n if svd_solver == 'full':\n return self._fit_full(X, n_components)\n elif svd_solver in ['arpack', 'randomized']:\n return self._fit_truncated(X, n_components, svd_solver)\n else:\n raise ValueError(\"Unrecognized svd_solver='{0}'\"\n \"\".format(svd_solver))\n\n def _fit_full(self, X, n_components):\n \"\"\"Fit the model by computing full SVD on X\"\"\"\n n_samples, n_features = X.shape\n\n if n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n elif not 0 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 0 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='full'\"\n % (n_components, min(n_samples, n_features)))\n elif n_components >= 1:\n if not isinstance(n_components, (numbers.Integral, np.integer)):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, \"\n \"was of type=%r\"\n % (n_components, type(n_components)))\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n\n U, S, V = linalg.svd(X, full_matrices=False)\n # flip eigenvectors' sign to enforce deterministic output\n U, V = svd_flip(U, V)\n\n components_ = V\n\n # Get variance explained by singular values\n explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / total_var\n singular_values_ = S.copy() # Store the singular values.\n\n # Postprocess the number of components required\n if n_components == 'mle':\n n_components = \\\n _infer_dimension_(explained_variance_, n_samples, n_features)\n elif 0 < n_components < 1.0:\n # number of components for which the cumulated explained\n # variance percentage is superior to the desired threshold\n ratio_cumsum = stable_cumsum(explained_variance_ratio_)\n n_components = np.searchsorted(ratio_cumsum, n_components) + 1\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n explained_variance_ratio_[:n_components]\n self.singular_values_ = singular_values_[:n_components]\n\n return U, S, V\n\n def _fit_truncated(self, X, n_components, svd_solver):\n \"\"\"Fit the model by computing truncated SVD (by ARPACK or randomized)\n on X\n \"\"\"\n n_samples, n_features = X.shape\n\n if isinstance(n_components, six.string_types):\n raise ValueError(\"n_components=%r cannot be a string \"\n \"with svd_solver='%s'\"\n % (n_components, svd_solver))\n elif not 1 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 1 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='%s'\"\n % (n_components, min(n_samples, n_features),\n svd_solver))\n elif not isinstance(n_components, (numbers.Integral, np.integer)):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, was of type=%r\"\n % (n_components, type(n_components)))\n elif svd_solver == 'arpack' and n_components == min(n_samples,\n n_features):\n raise ValueError(\"n_components=%r must be strictly less than \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='%s'\"\n % (n_components, min(n_samples, n_features),\n svd_solver))\n\n random_state = check_random_state(self.random_state)\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n\n if svd_solver == 'arpack':\n # random init solution, as ARPACK does it internally\n v0 = random_state.uniform(-1, 1, size=min(X.shape))\n U, S, V = svds(X, k=n_components, tol=self.tol, v0=v0)\n # svds doesn't abide by scipy.linalg.svd/randomized_svd\n # conventions, so reverse its outputs.\n S = S[::-1]\n # flip eigenvectors' sign to enforce deterministic output\n U, V = svd_flip(U[:, ::-1], V[::-1])\n\n elif svd_solver == 'randomized':\n # sign flipping is done inside\n U, S, V = randomized_svd(X, n_components=n_components,\n n_iter=self.iterated_power,\n flip_sign=True,\n random_state=random_state)\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = V\n self.n_components_ = n_components\n\n # Get variance explained by singular values\n self.explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = np.var(X, ddof=1, axis=0)\n self.explained_variance_ratio_ = \\\n self.explained_variance_ / total_var.sum()\n self.singular_values_ = S.copy() # Store the singular values.\n\n if self.n_components_ < min(n_features, n_samples):\n self.noise_variance_ = (total_var.sum() -\n self.explained_variance_.sum())\n self.noise_variance_ /= min(n_features, n_samples) - n_components\n else:\n self.noise_variance_ = 0.\n\n return U, S, V\n\n def score_samples(self, X):\n \"\"\"Return the log-likelihood of each sample.\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X : array, shape(n_samples, n_features)\n The data.\n\n Returns\n -------\n ll : array, shape (n_samples,)\n Log-likelihood of each sample under the current model\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n Xr = X - self.mean_\n n_features = X.shape[1]\n precision = self.get_precision()\n log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)\n log_like -= .5 * (n_features * log(2. * np.pi) -\n fast_logdet(precision))\n return log_like\n\n def score(self, X, y=None):\n \"\"\"Return the average log-likelihood of all samples.\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X : array, shape(n_samples, n_features)\n The data.\n\n y : Ignored\n\n Returns\n -------\n ll : float\n Average log-likelihood of the samples under the current model\n \"\"\"\n return np.mean(self.score_samples(X))\n\n\n@deprecated(\"RandomizedPCA was deprecated in 0.18 and will be removed in \"\n \"0.20. \"\n \"Use PCA(svd_solver='randomized') instead. The new implementation \"\n \"DOES NOT store whiten ``components_``. Apply transform to get \"\n \"them.\")\nclass RandomizedPCA(BaseEstimator, TransformerMixin):\n \"\"\"Principal component analysis (PCA) using randomized SVD\n\n .. deprecated:: 0.18\n This class will be removed in 0.20.\n Use :class:`PCA` with parameter svd_solver 'randomized' instead.\n The new implementation DOES NOT store whiten ``components_``.\n Apply transform to get them.\n\n Linear dimensionality reduction using approximated Singular Value\n Decomposition of the data and keeping only the most significant\n singular vectors to project the data to a lower dimensional space.\n\n Read more in the :ref:`User Guide <RandomizedPCA>`.\n\n Parameters\n ----------\n n_components : int, optional\n Maximum number of components to keep. When not given or None, this\n is set to n_features (the second dimension of the training data).\n\n copy : bool\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n iterated_power : int, default=2\n Number of iterations for the power method.\n\n .. versionchanged:: 0.18\n\n whiten : bool, optional\n When True (False by default) the `components_` vectors are multiplied\n by the square root of (n_samples) and divided by the singular values to\n ensure uncorrelated outputs with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n\n random_state : int, RandomState instance or None, optional, default=None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Attributes\n ----------\n components_ : array, shape (n_components, n_features)\n Components with maximum variance.\n\n explained_variance_ratio_ : array, shape (n_components,)\n Percentage of variance explained by each of the selected components.\n If k is not set then all components are stored and the sum of explained\n variances is equal to 1.0.\n\n singular_values_ : array, shape (n_components,)\n The singular values corresponding to each of the selected components.\n The singular values are equal to the 2-norms of the ``n_components``\n variables in the lower-dimensional space.\n\n mean_ : array, shape (n_features,)\n Per-feature empirical mean, estimated from the training set.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.decomposition import RandomizedPCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = RandomizedPCA(n_components=2)\n >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n RandomizedPCA(copy=True, iterated_power=2, n_components=2,\n random_state=None, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [0.9924... 0.007557...]\n >>> print(pca.singular_values_) # doctest: +ELLIPSIS\n [6.30061... 0.54980...]\n\n See also\n --------\n PCA\n TruncatedSVD\n\n References\n ----------\n\n .. [Halko2009] `Finding structure with randomness: Stochastic algorithms\n for constructing approximate matrix decompositions Halko, et al., 2009\n (arXiv:909)`\n\n .. [MRT] `A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`\n\n \"\"\"\n\n def __init__(self, n_components=None, copy=True, iterated_power=2,\n whiten=False, random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.iterated_power = iterated_power\n self.whiten = whiten\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X by extracting the first principal components.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(check_array(X))\n return self\n\n def _fit(self, X):\n \"\"\"Fit the model to the data X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n X : ndarray, shape (n_samples, n_features)\n The input data, copied, centered and whitened when requested.\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = np.atleast_2d(as_float_array(X, copy=self.copy))\n\n n_samples = X.shape[0]\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n\n U, S, V = randomized_svd(X, n_components,\n n_iter=self.iterated_power,\n random_state=random_state)\n\n self.explained_variance_ = exp_var = (S ** 2) / (n_samples - 1)\n full_var = np.var(X, ddof=1, axis=0).sum()\n self.explained_variance_ratio_ = exp_var / full_var\n self.singular_values_ = S # Store the singular values.\n\n if self.whiten:\n self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)\n else:\n self.components_ = V\n\n return X\n\n def transform(self, X):\n \"\"\"Apply dimensionality reduction on X.\n\n X is projected on the first principal components previous extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n\n X = np.dot(X, self.components_.T)\n return X\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n X = check_array(X)\n X = self._fit(X)\n return np.dot(X, self.components_.T)\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space.\n\n Returns an array X_original whose transform would be X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_components)\n New data, where n_samples in the number of samples\n and n_components is the number of components.\n\n Returns\n -------\n X_original array-like, shape (n_samples, n_features)\n\n Notes\n -----\n If whitening is enabled, inverse_transform does not compute the\n exact inverse operation of transform.\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X_original = np.dot(X, self.components_)\n if self.mean_ is not None:\n X_original = X_original + self.mean_\n return X_original\n"
] |
[
[
"numpy.product",
"numpy.asscalar",
"scipy.sparse.issparse",
"numpy.ma.getmaskarray",
"numpy.asarray",
"numpy.isnan",
"numpy.compress",
"numpy.percentile",
"scipy.sparse.csr_matrix",
"numpy.asmatrix",
"numpy.diff",
"numpy.isscalar",
"numpy.array",
"numpy.zeros",
"numpy.divide"
],
[
"scipy.stats.scoreatpercentile",
"numpy.maximum",
"numpy.mean",
"numpy.ones"
],
[
"numpy.linspace"
],
[
"sklearn.utils.testing.assert_raises",
"sklearn.utils.testing.clean_warning_registry",
"sklearn.base.clone",
"numpy.max",
"sklearn.utils.testing.assert_true",
"sklearn.svm.LinearSVC",
"sklearn.utils.testing.ignore_warnings",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.feature_extraction.text.strip_accents_ascii",
"sklearn.utils.testing.SkipTest",
"numpy.unique",
"sklearn.utils.testing.assert_greater",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.utils.testing.assert_warns_message",
"numpy.argmax",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.utils.testing.assert_raise_message",
"numpy.testing.assert_array_almost_equal",
"sklearn.utils.testing.assert_false",
"numpy.min",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.HashingVectorizer",
"numpy.random.RandomState",
"numpy.array",
"numpy.sum",
"sklearn.utils.testing.assert_equal",
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.cross_val_score",
"sklearn.utils.testing.assert_less",
"numpy.sort",
"numpy.linalg.norm",
"numpy.testing.assert_array_equal",
"sklearn.utils.testing.assert_not_equal",
"sklearn.feature_extraction.text.strip_accents_unicode"
],
[
"numpy.diag",
"numpy.dot",
"sklearn.utils.testing.assert_array_almost_equal",
"numpy.sqrt",
"sklearn.utils.testing.assert_almost_equal",
"sklearn.externals.six.moves.cStringIO",
"sklearn.datasets.samples_generator.make_spd_matrix",
"numpy.round",
"numpy.max",
"sklearn.utils.testing.assert_true",
"numpy.mean",
"numpy.var",
"sklearn.utils.testing.ignore_warnings",
"numpy.trace",
"sklearn.mixture.gaussian_mixture._estimate_gaussian_covariances_full",
"numpy.eye",
"sklearn.utils.testing.assert_greater",
"sklearn.utils.testing.assert_warns_message",
"sklearn.mixture.gaussian_mixture._estimate_gaussian_covariances_diag",
"sklearn.mixture.gaussian_mixture._estimate_gaussian_covariances_tied",
"scipy.linalg.inv",
"numpy.zeros",
"sklearn.utils.testing.assert_raise_message",
"sklearn.utils.testing.assert_allclose",
"numpy.log",
"numpy.min",
"scipy.linalg.det",
"sklearn.mixture.gaussian_mixture.GaussianMixture",
"numpy.cov",
"sklearn.utils.testing.assert_array_equal",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"sklearn.utils.testing.assert_equal",
"sklearn.mixture.gaussian_mixture._estimate_log_gaussian_prob",
"sklearn.covariance.EmpiricalCovariance",
"sklearn.utils.testing.assert_greater_equal",
"numpy.sort",
"numpy.ones",
"scipy.stats.norm.logpdf",
"sklearn.metrics.cluster.adjusted_rand_score",
"numpy.prod",
"sklearn.mixture.gaussian_mixture._estimate_gaussian_covariances_spherical",
"sklearn.mixture.gaussian_mixture._compute_precision_cholesky",
"sklearn.mixture.base._check_X",
"numpy.vstack"
],
[
"sklearn.utils.testing.assert_array_almost_equal",
"sklearn.cluster.k_means_._mini_batch_step",
"sklearn.utils.testing.assert_raises_regex",
"sklearn.cluster.KMeans",
"sklearn.utils.testing.assert_almost_equal",
"numpy.asarray",
"numpy.minimum",
"sklearn.utils.testing.assert_raises",
"sklearn.externals.six.moves.cStringIO",
"sklearn.base.clone",
"sklearn.utils.testing.assert_true",
"sklearn.utils.testing.assert_warns",
"numpy.random.randint",
"scipy.sparse.coo_matrix",
"sklearn.utils.testing.SkipTest",
"numpy.may_share_memory",
"numpy.unique",
"numpy.arange",
"sklearn.utils.testing.assert_greater",
"sklearn.utils.testing.assert_warns_message",
"numpy.repeat",
"numpy.zeros",
"sklearn.utils.testing.assert_raise_message",
"numpy.asfortranarray",
"sklearn.datasets.load_iris",
"scipy.sparse.csr_matrix",
"sklearn.cluster.k_means_._labels_inertia",
"sklearn.cluster.k_means_._check_sample_weight",
"sklearn.utils.testing.assert_array_equal",
"sklearn.cluster.k_means_._init_centroids",
"sklearn.datasets.samples_generator.make_blobs",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"sklearn.utils.testing.assert_equal",
"sklearn.metrics.cluster.v_measure_score",
"sklearn.utils.validation._num_samples",
"sklearn.utils.testing.assert_less",
"numpy.sort",
"numpy.ones",
"numpy.testing.assert_array_equal",
"sklearn.cluster.k_means",
"numpy.linalg.norm",
"sklearn.cluster.MiniBatchKMeans",
"numpy.array_split",
"numpy.empty",
"sklearn.utils.extmath.row_norms"
],
[
"numpy.dot",
"numpy.log",
"scipy.linalg.svd",
"scipy.sparse.issparse",
"scipy.sparse.linalg.svds",
"numpy.mean",
"numpy.searchsorted",
"scipy.special.gammaln",
"numpy.var",
"numpy.sum",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MichelDeudon/neural-combinatorial-rl-tensorflow
|
[
"b736384c31d4ea642f0b890c1c5df31d235b6938"
] |
[
"Self_Net_TSP/actor.py"
] |
[
"import tensorflow as tf\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom dataset import DataGenerator\r\nfrom encoder import Attentive_encoder\r\nfrom decoder import Pointer_decoder\r\nfrom critic import Critic\r\nfrom config import get_config, print_config\r\n\r\n\r\n\r\n# Tensor summaries for TensorBoard visualization\r\ndef variable_summaries(name,var, with_max_min=False):\r\n with tf.name_scope(name):\r\n mean = tf.reduce_mean(var)\r\n tf.summary.scalar('mean', mean)\r\n with tf.name_scope('stddev'):\r\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\r\n tf.summary.scalar('stddev', stddev)\r\n if with_max_min == True:\r\n tf.summary.scalar('max', tf.reduce_max(var))\r\n tf.summary.scalar('min', tf.reduce_min(var))\r\n\r\n\r\n\r\nclass Actor(object):\r\n\r\n\r\n def __init__(self, config):\r\n self.config=config\r\n\r\n # Data config\r\n self.batch_size = config.batch_size # batch size\r\n self.max_length = config.max_length # input sequence length (number of cities)\r\n self.input_dimension = config.input_dimension # dimension of a city (coordinates)\r\n\r\n # Reward config\r\n self.avg_baseline = tf.Variable(config.init_baseline, trainable=False, name=\"moving_avg_baseline\") # moving baseline for Reinforce\r\n self.alpha = config.alpha # moving average update\r\n\r\n # Training config (actor)\r\n self.global_step= tf.Variable(0, trainable=False, name=\"global_step\") # global step\r\n self.lr1_start = config.lr1_start # initial learning rate\r\n self.lr1_decay_rate= config.lr1_decay_rate # learning rate decay rate\r\n self.lr1_decay_step= config.lr1_decay_step # learning rate decay step\r\n\r\n # Training config (critic)\r\n self.global_step2 = tf.Variable(0, trainable=False, name=\"global_step2\") # global step\r\n self.lr2_start = config.lr1_start # initial learning rate\r\n self.lr2_decay_rate= config.lr1_decay_rate # learning rate decay rate\r\n self.lr2_decay_step= config.lr1_decay_step # learning rate decay step\r\n\r\n # Tensor block holding the input sequences [Batch Size, Sequence Length, Features]\r\n self.input_ = tf.placeholder(tf.float32, [self.batch_size, self.max_length, self.input_dimension], name=\"input_coordinates\")\r\n\r\n self.build_permutation()\r\n self.build_critic()\r\n self.build_reward()\r\n self.build_optim()\r\n self.merged = tf.summary.merge_all()\r\n\r\n\r\n def build_permutation(self):\r\n\r\n with tf.variable_scope(\"encoder\"):\r\n\r\n Encoder = Attentive_encoder(self.config)\r\n encoder_output = Encoder.encode(self.input_)\r\n\r\n with tf.variable_scope('decoder'):\r\n # Ptr-net returns permutations (self.positions), with their log-probability for backprop\r\n self.ptr = Pointer_decoder(encoder_output, self.config)\r\n self.positions, self.log_softmax = self.ptr.loop_decode()\r\n variable_summaries('log_softmax',self.log_softmax, with_max_min = True)\r\n \r\n\r\n def build_critic(self):\r\n\r\n with tf.variable_scope(\"critic\"):\r\n # Critic predicts reward (parametric baseline for REINFORCE)\r\n self.critic = Critic(self.config)\r\n self.critic.predict_rewards(self.input_)\r\n variable_summaries('predictions',self.critic.predictions, with_max_min = True)\r\n\r\n\r\n def build_reward(self):\r\n\r\n with tf.name_scope('permutations'):\r\n\r\n # Reorder input % tour\r\n self.ordered_input_ = []\r\n for input_, path in zip(tf.unstack(self.input_,axis=0), tf.unstack(self.positions,axis=0)): # Unstack % batch axis\r\n self.ordered_input_.append(tf.gather_nd(input_,tf.expand_dims(path,1)))\r\n self.ordered_input_ = tf.transpose(tf.stack(self.ordered_input_,0),[2,1,0]) # [batch size, seq length +1 , features] to [features, seq length +1, batch_size] Rq: +1 because end = start = first_city\r\n\r\n # Ordered coordinates\r\n ordered_x_ = self.ordered_input_[0] # [seq length +1, batch_size]\r\n delta_x2 = tf.transpose(tf.square(ordered_x_[1:]-ordered_x_[:-1]),[1,0]) # [batch_size, seq length] delta_x**2\r\n ordered_y_ = self.ordered_input_[1] # [seq length +1, batch_size]\r\n delta_y2 = tf.transpose(tf.square(ordered_y_[1:]-ordered_y_[:-1]),[1,0]) # [batch_size, seq length] delta_y**2\r\n\r\n with tf.name_scope('environment'):\r\n\r\n # Get tour length (euclidean distance)\r\n inter_city_distances = tf.sqrt(delta_x2+delta_y2) # sqrt(delta_x**2 + delta_y**2) this is the euclidean distance between each city: depot --> ... ---> depot [batch_size, seq length]\r\n self.distances = tf.reduce_sum(inter_city_distances, axis=1) # [batch_size]\r\n #variable_summaries('tour_length',self.distances, with_max_min = True)\r\n\r\n # Define reward from tour length\r\n self.reward = tf.cast(self.distances,tf.float32)\r\n variable_summaries('reward',self.reward, with_max_min = True)\r\n\r\n\r\n def build_optim(self):\r\n # Update moving_mean and moving_variance for batch normalization layers\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n\r\n with tf.name_scope('baseline'):\r\n # Update baseline\r\n reward_mean, reward_var = tf.nn.moments(self.reward,axes=[0])\r\n self.base_op = tf.assign(self.avg_baseline, self.alpha*self.avg_baseline+(1.0-self.alpha)*reward_mean)\r\n tf.summary.scalar('average baseline',self.avg_baseline)\r\n\r\n with tf.name_scope('reinforce'):\r\n # Actor learning rate\r\n self.lr1 = tf.train.exponential_decay(self.lr1_start, self.global_step, self.lr1_decay_step,self.lr1_decay_rate, staircase=False, name=\"learning_rate1\")\r\n # Optimizer\r\n self.opt1 = tf.train.AdamOptimizer(learning_rate=self.lr1,beta1=0.9,beta2=0.99, epsilon=0.0000001)\r\n # Discounted reward\r\n self.reward_baseline = tf.stop_gradient(self.reward - self.avg_baseline - self.critic.predictions) # [Batch size, 1] \r\n variable_summaries('reward_baseline',self.reward_baseline, with_max_min = True)\r\n # Loss\r\n self.loss1 = tf.reduce_mean(self.reward_baseline*self.log_softmax,0)\r\n tf.summary.scalar('loss1', self.loss1)\r\n # Minimize step\r\n gvs = self.opt1.compute_gradients(self.loss1)\r\n capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs if grad is not None] # L2 clip\r\n self.train_step1 = self.opt1.apply_gradients(capped_gvs, global_step=self.global_step)\r\n\r\n with tf.name_scope('state_value'):\r\n # Critic learning rate\r\n self.lr2 = tf.train.exponential_decay(self.lr2_start, self.global_step2, self.lr2_decay_step,self.lr2_decay_rate, staircase=False, name=\"learning_rate1\")\r\n # Optimizer\r\n self.opt2 = tf.train.AdamOptimizer(learning_rate=self.lr2,beta1=0.9,beta2=0.99, epsilon=0.0000001)\r\n # Loss\r\n weights_ = 1.0 #weights_ = tf.exp(self.log_softmax-tf.reduce_max(self.log_softmax)) # probs / max_prob\r\n self.loss2 = tf.losses.mean_squared_error(self.reward - self.avg_baseline, self.critic.predictions, weights = weights_)\r\n tf.summary.scalar('loss2', self.loss1)\r\n # Minimize step\r\n gvs2 = self.opt2.compute_gradients(self.loss2)\r\n capped_gvs2 = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs2 if grad is not None] # L2 clip\r\n self.train_step2 = self.opt1.apply_gradients(capped_gvs2, global_step=self.global_step2)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # get config\r\n config, _ = get_config()\r\n\r\n # Build Model and Reward from config\r\n actor = Actor(config)\r\n\r\n print(\"Starting training...\")\r\n with tf.Session() as sess:\r\n tf.global_variables_initializer().run()\r\n print_config()\r\n\r\n solver = [] #Solver(actor.max_length)\r\n training_set = DataGenerator(solver)\r\n\r\n nb_epoch=2\r\n for i in tqdm(range(nb_epoch)): # epoch i\r\n\r\n # Get feed_dict\r\n input_batch = training_set.train_batch(actor.batch_size, actor.max_length, actor.input_dimension)\r\n feed = {actor.input_: input_batch}\r\n #print(' Input \\n', input_batch)\r\n\r\n permutation, distances = sess.run([actor.positions, actor.distances], feed_dict=feed) \r\n print(' Permutation \\n',permutation)\r\n print(' Tour length \\n',distances)\r\n\r\n\r\n variables_names = [v.name for v in tf.global_variables() if 'Adam' not in v.name]\r\n values = sess.run(variables_names)\r\n for k, v in zip(variables_names, values):\r\n print(\"Variable: \", k, \"Shape: \", v.shape)"
] |
[
[
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.get_collection",
"tensorflow.nn.moments",
"tensorflow.train.exponential_decay",
"tensorflow.stop_gradient",
"tensorflow.clip_by_norm",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.unstack",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.reduce_max",
"tensorflow.losses.mean_squared_error",
"tensorflow.reduce_mean",
"tensorflow.assign",
"tensorflow.expand_dims",
"tensorflow.reduce_min",
"tensorflow.variable_scope",
"tensorflow.sqrt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fkluger/torchgeometry
|
[
"5f1a4dc8ff3647a60901b79aa90a4e799829a7a2",
"5f1a4dc8ff3647a60901b79aa90a4e799829a7a2"
] |
[
"test/integration/test_focal.py",
"torchgeometry/losses/tversky.py"
] |
[
"import logging\nimport pytest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchgeometry as tgm\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestIntegrationFocalLoss:\n # optimization\n thresh = 1e-1\n lr = 1e-3\n num_iterations = 1000\n num_classes = 2\n\n # focal loss\n alpha = 2.0\n gamma = 2.0\n\n def generate_sample(self, base_target, std_val=0.1):\n target = base_target.float() / base_target.max()\n noise = std_val * torch.rand(1, 1, 6, 5)\n return target + noise\n\n @staticmethod\n def init_weights(m):\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_uniform_(m.weight)\n\n def test_conv2d_relu(self):\n\n # we generate base sample\n target = torch.LongTensor(1, 6, 5).fill_(0)\n for i in range(1, self.num_classes):\n target[..., i:-i, i:-i] = i\n\n m = nn.Sequential(\n nn.Conv2d(1, self.num_classes, kernel_size=3, padding=1),\n nn.ReLU(True),\n )\n m.apply(self.init_weights)\n\n optimizer = optim.Adam(m.parameters(), lr=self.lr)\n\n criterion = tgm.losses.FocalLoss(\n alpha=self.alpha, gamma=self.gamma, reduction='mean')\n # NOTE: uncomment to compare against vanilla cross entropy\n # criterion = nn.CrossEntropyLoss()\n\n for iter_id in range(self.num_iterations):\n sample = self.generate_sample(target)\n output = m(sample)\n loss = criterion(output, target)\n logger.debug(\"Loss: {}\".format(loss.item()))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n sample = self.generate_sample(target)\n output_argmax = torch.argmax(m(sample), dim=1)\n logger.debug(\"Output argmax: \\n{}\".format(output_argmax))\n\n # TODO(edgar): replace by IoU or find a more stable solution\n # for this test. The issue is that depending on\n # the seed to initialize the weights affects the\n # final results and slows down the convergence of\n # the algorithm.\n val = F.mse_loss(output_argmax.float(), target.float())\n if not val.item() < self.thresh:\n pytest.xfail(\"Wrong seed or initial weight values.\")\n",
"from typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .one_hot import one_hot\n\n# based on:\n# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py\n\n\nclass TverskyLoss(nn.Module):\n r\"\"\"Criterion that computes Tversky Coeficient loss.\n\n According to [1], we compute the Tversky Coefficient as follows:\n\n .. math::\n\n \\text{S}(P, G, \\alpha; \\beta) =\n \\frac{|PG|}{|PG| + \\alpha |P \\ G| + \\beta |G \\ P|}\n\n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n labels.\n - :math:`\\alpha` and :math:`\\beta` control the magnitude of the\n penalties for FPs and FNs, respectively.\n\n Notes:\n - :math:`\\alpha = \\beta = 0.5` => dice coeff\n - :math:`\\alpha = \\beta = 1` => tanimoto coeff\n - :math:`\\alpha + \\beta = 1` => F beta coeff\n\n Shape:\n - Input: :math:`(N, C, H, W)` where C = number of classes.\n - Target: :math:`(N, H, W)` where each value is\n :math:`0 ≤ targets[i] ≤ C−1`.\n\n Examples:\n >>> N = 5 # num_classes\n >>> loss = tgm.losses.TverskyLoss(alpha=0.5, beta=0.5)\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = loss(input, target)\n >>> output.backward()\n\n References:\n [1]: https://arxiv.org/abs/1706.05721\n \"\"\"\n\n def __init__(self, alpha, beta) -> None:\n super(TverskyLoss, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.eps = 1e-6\n\n def forward(\n self,\n input: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(input):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxNxHxW. Got: {}\"\n .format(input.shape))\n if not input.shape[-2:] == target.shape[-2:]:\n raise ValueError(\"input and target shapes must be the same. Got: {}\"\n .format(input.shape, input.shape))\n if not input.device == target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {}\" .format(\n input.device, target.device))\n # compute softmax over the classes axis\n input_soft = F.softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot = one_hot(target, num_classes=input.shape[1],\n device=input.device, dtype=input.dtype)\n\n # compute the actual dice score\n dims = (1, 2, 3)\n intersection = torch.sum(input_soft * target_one_hot, dims)\n fps = torch.sum(input_soft * (1. - target_one_hot), dims)\n fns = torch.sum((1. - input_soft) * target_one_hot, dims)\n\n numerator = intersection\n denominator = intersection + self.alpha * fps + self.beta * fns\n tversky_loss = numerator / (denominator + self.eps)\n return torch.mean(1. - tversky_loss)\n\n\n######################\n# functional interface\n######################\n\n\ndef tversky_loss(\n input: torch.Tensor,\n target: torch.Tensor,\n alpha,\n beta) -> torch.Tensor:\n r\"\"\"Function that computes Tversky loss.\n\n See :class:`~torchgeometry.losses.TverskyLoss` for details.\n \"\"\"\n return TverskyLoss(alpha, beta)(input, target)\n"
] |
[
[
"torch.LongTensor",
"torch.nn.Conv2d",
"torch.rand",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU"
],
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.sum",
"torch.is_tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rish-16/pytorch-lightning-bolts
|
[
"dfbef7acbaf8992b1921dfa3b7846e77fb38fa22"
] |
[
"pl_bolts/models/rl/vanilla_policy_gradient_model.py"
] |
[
"import argparse\nfrom collections import OrderedDict\nfrom typing import Tuple, List\nfrom warnings import warn\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.optim as optim\nfrom pytorch_lightning import seed_everything\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom torch.nn.functional import log_softmax, softmax\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom pl_bolts.datamodules import ExperienceSourceDataset\nfrom pl_bolts.models.rl.common.agents import PolicyAgent\nfrom pl_bolts.models.rl.common.networks import MLP\ntry:\n import gym\nexcept ModuleNotFoundError:\n warn('You want to use `gym` which is not installed yet, install it with `pip install gym`.') # pragma: no-cover\n _GYM_AVAILABLE = False\nelse:\n _GYM_AVAILABLE = True\n\n\nclass VanillaPolicyGradient(pl.LightningModule):\n def __init__(\n self,\n env: str,\n gamma: float = 0.99,\n lr: float = 0.01,\n batch_size: int = 8,\n n_steps: int = 10,\n avg_reward_len: int = 100,\n entropy_beta: float = 0.01,\n epoch_len: int = 1000,\n **kwargs\n ) -> None:\n \"\"\"\n PyTorch Lightning implementation of `Vanilla Policy Gradient\n <https://papers.nips.cc/paper/\n 1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf>`_\n Paper authors: Richard S. Sutton, David McAllester, Satinder Singh, Yishay Mansour\n Model implemented by:\n\n - `Donal Byrne <https://github.com/djbyrne>`\n\n Example:\n >>> from pl_bolts.models.rl.vanilla_policy_gradient_model import VanillaPolicyGradient\n ...\n >>> model = VanillaPolicyGradient(\"CartPole-v0\")\n\n Train::\n trainer = Trainer()\n trainer.fit(model)\n\n Args:\n env: gym environment tag\n gamma: discount factor\n lr: learning rate\n batch_size: size of minibatch pulled from the DataLoader\n batch_episodes: how many episodes to rollout for each batch of training\n entropy_beta: dictates the level of entropy per batch\n avg_reward_len: how many episodes to take into account when calculating the avg reward\n\n Note:\n This example is based on:\n https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition/blob/master/Chapter11/04_cartpole_pg.py\n\n Note:\n Currently only supports CPU and single GPU training with `distributed_backend=dp`\n \"\"\"\n super().__init__()\n\n if not _GYM_AVAILABLE:\n raise ModuleNotFoundError('This Module requires gym environment which is not installed yet.')\n\n # Hyperparameters\n self.lr = lr\n self.batch_size = batch_size\n self.batches_per_epoch = self.batch_size * epoch_len\n self.entropy_beta = entropy_beta\n self.gamma = gamma\n self.n_steps = n_steps\n\n self.save_hyperparameters()\n\n # Model components\n self.env = gym.make(env)\n self.net = MLP(self.env.observation_space.shape, self.env.action_space.n)\n self.agent = PolicyAgent(self.net)\n\n # Tracking metrics\n self.total_rewards = []\n self.episode_rewards = []\n self.done_episodes = 0\n self.avg_rewards = 0\n self.avg_reward_len = avg_reward_len\n self.eps = np.finfo(np.float32).eps.item()\n self.batch_states = []\n self.batch_actions = []\n\n self.state = self.env.reset()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Passes in a state x through the network and gets the q_values of each action as an output\n Args:\n x: environment state\n Returns:\n q values\n \"\"\"\n output = self.net(x)\n return output\n\n def train_batch(\n self,\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:\n \"\"\"\n Contains the logic for generating a new batch of data to be passed to the DataLoader\n Returns:\n yields a tuple of Lists containing tensors for states, actions and rewards of the batch.\n \"\"\"\n\n while True:\n\n action = self.agent(self.state, self.device)\n\n next_state, reward, done, _ = self.env.step(action[0])\n\n self.episode_rewards.append(reward)\n self.batch_actions.append(action)\n self.batch_states.append(self.state)\n self.state = next_state\n\n if done:\n self.done_episodes += 1\n self.state = self.env.reset()\n self.total_rewards.append(sum(self.episode_rewards))\n self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len:]))\n\n returns = self.compute_returns(self.episode_rewards)\n\n for idx in range(len(self.batch_actions)):\n yield self.batch_states[idx], self.batch_actions[idx], returns[idx]\n\n self.batch_states = []\n self.batch_actions = []\n self.episode_rewards = []\n\n def compute_returns(self, rewards):\n \"\"\"\n Calculate the discounted rewards of the batched rewards\n\n Args:\n rewards: list of batched rewards\n\n Returns:\n list of discounted rewards\n \"\"\"\n reward = 0\n returns = []\n\n for r in rewards[::-1]:\n reward = r + self.gamma * reward\n returns.insert(0, reward)\n\n returns = torch.tensor(returns)\n returns = (returns - returns.mean()) / (returns.std() + self.eps)\n\n return returns\n\n def loss(self, states, actions, scaled_rewards) -> torch.Tensor:\n \"\"\"\n Calculates the loss for VPG\n\n Args:\n states: batched states\n actions: batch actions\n scaled_rewards: batche Q values\n\n Returns:\n loss for the current batch\n \"\"\"\n\n logits = self.net(states)\n\n # policy loss\n log_prob = log_softmax(logits, dim=1)\n log_prob_actions = scaled_rewards * log_prob[range(self.batch_size), actions[0]]\n policy_loss = -log_prob_actions.mean()\n\n # entropy loss\n prob = softmax(logits, dim=1)\n entropy = -(prob * log_prob).sum(dim=1).mean()\n entropy_loss = -self.entropy_beta * entropy\n\n # total loss\n loss = policy_loss + entropy_loss\n\n return loss\n\n def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], _) -> OrderedDict:\n \"\"\"\n Carries out a single step through the environment to update the replay buffer.\n Then calculates loss based on the minibatch recieved\n Args:\n batch: current mini batch of replay data\n _: batch number, not used\n Returns:\n Training loss and log metrics\n \"\"\"\n states, actions, scaled_rewards = batch\n\n loss = self.loss(states, actions, scaled_rewards)\n\n log = {\n \"episodes\": self.done_episodes,\n \"reward\": self.total_rewards[-1],\n \"avg_reward\": self.avg_rewards,\n }\n return OrderedDict(\n {\n \"loss\": loss,\n \"avg_reward\": self.avg_rewards,\n \"log\": log,\n \"progress_bar\": log,\n }\n )\n\n def configure_optimizers(self) -> List[Optimizer]:\n \"\"\" Initialize Adam optimizer\"\"\"\n optimizer = optim.Adam(self.net.parameters(), lr=self.lr)\n return [optimizer]\n\n def _dataloader(self) -> DataLoader:\n \"\"\"Initialize the Replay Buffer dataset used for retrieving experiences\"\"\"\n dataset = ExperienceSourceDataset(self.train_batch)\n dataloader = DataLoader(dataset=dataset, batch_size=self.batch_size)\n return dataloader\n\n def train_dataloader(self) -> DataLoader:\n \"\"\"Get train loader\"\"\"\n return self._dataloader()\n\n def get_device(self, batch) -> str:\n \"\"\"Retrieve device currently being used by minibatch\"\"\"\n return batch[0][0][0].device.index if self.on_gpu else \"cpu\"\n\n @staticmethod\n def add_model_specific_args(arg_parser) -> argparse.ArgumentParser:\n \"\"\"\n Adds arguments for DQN model\n Note: these params are fine tuned for Pong env\n Args:\n arg_parser: the current argument parser to add to\n Returns:\n arg_parser with model specific cargs added\n \"\"\"\n\n arg_parser.add_argument(\"--entropy_beta\", type=float, default=0.01, help=\"entropy value\")\n arg_parser.add_argument(\"--batches_per_epoch\", type=int, default=10000, help=\"number of batches in an epoch\")\n arg_parser.add_argument(\"--batch_size\", type=int, default=32, help=\"size of the batches\")\n arg_parser.add_argument(\"--lr\", type=float, default=1e-3, help=\"learning rate\")\n arg_parser.add_argument(\"--env\", type=str, required=True, help=\"gym environment tag\")\n arg_parser.add_argument(\"--gamma\", type=float, default=0.99, help=\"discount factor\")\n arg_parser.add_argument(\"--seed\", type=int, default=123, help=\"seed for training run\")\n\n arg_parser.add_argument(\n \"--avg_reward_len\",\n type=int,\n default=100,\n help=\"how many episodes to include in avg reward\",\n )\n\n return arg_parser\n\n\ndef cli_main():\n parser = argparse.ArgumentParser(add_help=False)\n\n # trainer args\n parser = pl.Trainer.add_argparse_args(parser)\n\n # model args\n parser = VanillaPolicyGradient.add_model_specific_args(parser)\n args = parser.parse_args()\n\n model = VanillaPolicyGradient(**args.__dict__)\n\n # save checkpoints based on avg_reward\n checkpoint_callback = ModelCheckpoint(\n save_top_k=1, monitor=\"avg_reward\", mode=\"max\", period=1, verbose=True\n )\n\n seed_everything(123)\n trainer = pl.Trainer.from_argparse_args(\n args, deterministic=True, checkpoint_callback=checkpoint_callback\n )\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n cli_main()\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.utils.data.DataLoader",
"numpy.finfo",
"torch.tensor",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robcalon/EMAworkbench
|
[
"c9e81d330c0f3b1f8ed5d0184500bd9b367a7326",
"c9e81d330c0f3b1f8ed5d0184500bd9b367a7326"
] |
[
"ema_workbench/connectors/vensimDLLwrapper.py",
"ema_workbench/examples/feature_scoring_flu_overtime.py"
] |
[
"\"\"\"\n\nthis is a first draft for wrapping the vensim dll in a pythonic way\n\nby default it is assumed the dll is readily available. If this generates an\nVensimError, you have to find the location of the dll and either copy it to\nC:\\Windows\\System32 and/or C:\\Windows\\SysWOW64, or use::\n\n vensim = ctypes.windll.LoadLibrary('location of dll')\n\nTypically, the dll can be found in ../AppData/Local/Vensim/vendll32.dll\n\n\n\"\"\"\nimport ctypes\nimport struct\nimport sys\n\nimport numpy as np\n\nfrom ..util import EMAError, EMAWarning, get_module_logger\n\ntry:\n WindowsError # @UndefinedVariable\nexcept NameError:\n WindowsError = None\n\n# Created on 21 okt. 2010\n#\n# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>\n_logger = get_module_logger(__name__)\n\n\nclass VensimWarning(EMAWarning):\n \"\"\"\n base vensim warning\n \"\"\"\n pass\n\n\nclass VensimError(EMAError):\n \"\"\"\n base Vensim error\n \"\"\"\n pass\n\n\ntry:\n vensim_single = ctypes.windll.vendll32\nexcept AttributeError:\n vensim_single = None\nexcept WindowsError:\n vensim_single = None\n\ntry:\n vensim_double = ctypes.windll.LoadLibrary(\n 'C:\\Windows\\SysWOW64\\VdpDLL32.dll')\nexcept AttributeError:\n vensim_double = None\nexcept WindowsError:\n vensim_double = None\n\ntry:\n vensim_64 = ctypes.windll.vendll64\nexcept AttributeError:\n vensim_64 = None\nexcept WindowsError:\n vensim_64 = None\n\nif struct.calcsize(\"P\") * 8 == 64:\n if vensim_64:\n vensim = vensim_64\n _logger.info('using 64 bit vensim')\n\n else:\n raise ImportError(\"vensim dll not found\")\n # 64 bit python\nelse:\n\n if vensim_single and vensim_double:\n vensim = vensim_single\n _logger.info(\n \"both single and double precision vensim available, using single\")\n elif vensim_single:\n vensim = vensim_single\n _logger.info('using single precision vensim')\n elif vensim_double:\n vensim = vensim_double\n _logger.info('using double precision vensim')\n\ndel sys, struct\n\n\ndef be_quiet(quietflag):\n \"\"\"\n this allows you to turn off the work in progress dialog that Vensim\n displays during simulation and other activities, and also prevent the\n appearance of yes or no dialogs.\n\n use 0 for normal interaction, 1 to prevent the appearance of any work\n in progress windows, and 2 to also prevent the appearance of any\n interrogative dialogs'\n \"\"\"\n if quietflag > 2:\n raise VensimError(\"incorrect value for quietflag\")\n\n return vensim.vensim_be_quiet(quietflag)\n\n\ndef check_status():\n \"\"\"check status is used to check the current status of the Vensim DLL, for\n details on the return values check DSS reference chapter 12\"\"\"\n\n return vensim.vensim_check_status()\n\n\ndef command(command):\n \"\"\"execute a command, for details see chapter 5 of the vensim DSS manual\"\"\"\n\n return_val = vensim.vensim_command(command.encode('utf-8'))\n if return_val == 0:\n raise VensimWarning(\"command failed \" + command)\n return return_val\n\n\ndef continue_simulation(num_inter):\n \"\"\"This method continues the simulation for num_inter Time steps.\n\n Parameters\n ----------\n num_inter : int\n the number of TIME_STEP iterations that should be executed\n during the continuation\n \"\"\"\n\n return_val = vensim.vensim_continue_simulation(num_inter)\n if return_val == -1:\n raise VensimWarning(\"floating point error has occurred\")\n\n return return_val\n\n\ndef finish_simulation():\n \"\"\"completes a simulation started with start simulation\"\"\"\n\n return_val = vensim.vensim_finish_simulation()\n if return_val == 0:\n raise VensimWarning(\"failure to finish simulation\")\n return return_val\n\n\ndef get_data(filename, variable_name, tname=\"Time\"):\n \"\"\"\n Retrieves data from simulation runs or imported data sets. In contrast\n to the Vensim DLL, this method retrieves all the data, and not only the\n data for the specified length.\n\n Parameters\n ----------\n filename : str\n the name of the .vdf file that contains the data\n variable_name : str\n the name of the variable to retrieve data on\n tname : str\n the name of the time axis against which to pull the data,\n by default this is Time\n\n Returns\n -------\n a tuple with an for an array for varname and and array for tname.\n\n \"\"\"\n vval = (ctypes.c_float * 1)()\n tval = (ctypes.c_float * 1)()\n maxn = ctypes.c_int(0)\n\n filename = filename.encode('utf-8')\n varname = variable_name.encode('utf-8')\n tname = tname.encode('utf-8')\n\n return_val = vensim.vensim_get_data(filename,\n varname,\n tname,\n vval,\n tval,\n maxn)\n\n if return_val == 0:\n raise VensimWarning(\n \"variable \" +\n variable_name +\n \" not found in dataset\")\n\n vval = (ctypes.c_float * int(return_val))()\n tval = (ctypes.c_float * int(return_val))()\n maxn = ctypes.c_int(int(return_val))\n\n return_val = vensim.vensim_get_data(filename,\n varname,\n tname,\n vval,\n tval,\n maxn)\n\n vval = np.ctypeslib.as_array(vval)\n tval = np.ctypeslib.as_array(tval)\n\n return vval, tval\n\n\ndef get_dpval(name, varval):\n \"\"\"\n use this to get the value of a variable during a simulation, as a game\n is progressing, or during simulation setup. This function is only useful\n if you are using the double precision Vensim DLL\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef get_dpvecvals(vecoff, dpvals, veclen):\n \"\"\"\n This is the same as get_vecvals except it takes a double vector to store\n values. This method is only meaningful in case of the double precision DLL\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef get_info(infowanted):\n \"\"\"\n Use this function to get information about vensim, for details see DSS\n reference chapter 12\n\n Parameters\n ----------\n infowanted : int\n field that specifies the info wanted\n \"\"\"\n\n buf = ctypes.create_string_buffer(b\"\", 512)\n maxBuf = ctypes.c_int(512)\n a = vensim.vensim_get_info(infowanted, buf, maxBuf)\n buf = ctypes.create_string_buffer(b\"\", int(a))\n maxBuf = ctypes.c_int(int(a))\n vensim.vensim_get_info(infowanted, buf, maxBuf)\n\n result = repr(buf.raw)\n result = result.strip()\n result = result.rstrip(\"'\")\n result = result.lstrip(\"'\")\n result = result.split(r\"\\x00\")\n result = result[0:-2]\n return result\n\n\ndef get_sens_at_time(filename, varname, timename, attime, vals, maxn):\n \"\"\"\n Get results from a sensitivity run at a specific type and across\n sensitivity runs.\n\n currently not implemented\n \"\"\"\n raise NotImplementedError\n\n\ndef get_substring():\n \"\"\"\n Utility function that is designed to make it easier to work with\n get_varnames, get_info, and get_varattribs.\n\n currently not implemented\n \"\"\"\n raise NotImplementedError\n\n\ndef get_val(name):\n \"\"\"\n This function returns the value of a variable during a simulation, as a\n game is progressing, or during simulation setup\n\n Parameters\n ----------\n name : str\n the name of variable for which one wants to retrieve the value.\n\n \"\"\"\n value = ctypes.c_float(0)\n return_val = vensim.vensim_get_val(\n name.encode('utf-8'), ctypes.byref(value))\n if return_val == 0:\n raise VensimWarning(\"variable not found\")\n\n return value.value\n\n\ndef get_varattrib(varname, attribute):\n \"\"\"\n This function can be used to access the attributes of a variable.\n\n Parameters\n ----------\n varname : str\n name for which you want attribute\n attribute : int\n attribute you want\n\n Notes\n -----\n\n ====== =============\n number meaning\n ====== =============\n 1 Units,\n 2 the comment,\n 3 the equation,\n 4 causes,\n 5 uses,\n 6 initial causes only,\n 7 active causes only,\n 8 the subscripts the variable has,\n 9 all combinations those subscripts create,\n 10 the combination of subscripts that would be used by a graph tool,\n 11 the minimum value set in the equation editor,\n 12 the maximum and\n 13 the range,\n 14 the variable type (returned as \"Level\" etc) and\n 15 the main group of a variable\n ====== =============\n\n \"\"\"\n buf = ctypes.create_string_buffer(\"\", 10)\n maxBuf = ctypes.c_int(10)\n\n bufferlength = vensim.vensim_get_varattrib(varname.encode('utf-8'),\n attribute,\n buf,\n maxBuf)\n if bufferlength == -1:\n raise VensimWarning(\"variable not found\")\n\n buf = ctypes.create_string_buffer(\"\", int(bufferlength))\n maxBuf = ctypes.c_int(int(bufferlength))\n vensim.vensim_get_varattrib(\n varname.encode('utf-8'), attribute, buf, maxBuf)\n\n result = repr(buf.raw)\n result = result.strip()\n result = result.rstrip(\"'\")\n result = result.lstrip(\"'\")\n result = result.split(r\"\\x00\")\n result = [varname for varname in result if len(varname) != 0]\n\n return result\n\n\ndef get_varnames(filter='*', vartype=0): # @ReservedAssignment\n \"\"\"\n This function returns variable names in the model a filter can be specified\n in the same way as Vensim variable Selection filter (use * for all),\n vartype is an integer that specifies the types of variables you want to\n see.\n (see DSS reference chapter 12 for details)\n\n Parameters\n ----------\n filter : str\n selection filter, use \\* for all.\n vartype : int\n variable type to retrieve. See table\n\n\n Returns\n -------\n a list with the variable names\n\n Notes\n -----\n ====== =============\n number meaning\n ====== =============\n 0 all\n 1 levels\n 2 auxiliaries\n 3 data\n 4 initial\n 5 constant\n 6 lookup\n 7 group\n 8 subscript\n 9 constraint\n 10 test input\n 11 time base\n 12 gaming\n ====== =============\n\n \"\"\"\n\n filter = ctypes.c_char_p(filter) # @ReservedAssignment\n vartype = ctypes.c_int(vartype)\n buf = ctypes.create_string_buffer(\"\", 512)\n maxBuf = ctypes.c_int(512)\n\n a = vensim.vensim_get_varnames(filter, vartype, buf, maxBuf)\n buf = ctypes.create_string_buffer(\"\", int(a))\n maxBuf = ctypes.c_int(int(a))\n vensim.vensim_get_varnames(filter, vartype, buf, maxBuf)\n\n varnames = repr(buf.raw)\n varnames = varnames.strip()\n varnames = varnames.rstrip(\"'\")\n varnames = varnames.lstrip(\"'\")\n varnames = varnames.split(r\"\\x00\")\n varnames = [varname for varname in varnames if len(varname) != 0]\n\n return varnames\n\n\ndef get_varoff(varname):\n \"\"\"\n This function is intended for use with get_vecvals. By filling up a\n vector of offsets you can speed the retrieval of multiple values\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef get_vecvals(vecoff, vals, nvals):\n \"\"\"gets a vector of values at the current simulation time.\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef set_parent_window(window, r1, r2):\n \"\"\"\n This is used to set a window that will be the owner of an dialogs or\n message boxes that Vensim presents.\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef show_sketch(sketchnum, wantscroll, zoompercent, pwindow):\n \"\"\"\n Use this function to display a model diagram\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef start_simulation(loadfirst, game, overwrite):\n \"\"\"\n Start a simulation that will be performed a bit at a time.\n\n Parameters\n ----------\n loadfirst : bool\n if True the run resulting from the simulationshould be loaded\n first in the list of runs\n game : int\n if 0 treat simulation as a normal simulation, if 1, start a new\n game, if 2, continue with a game\n overwrite : bool\n if True, automatically overwrite existing files when simulation\n starts\n\n \"\"\"\n\n return_val = vensim.vensim_start_simulation(loadfirst, game, overwrite)\n if return_val == 0:\n raise VensimWarning(\"simulation not started\")\n\n return return_val\n\n\ndef synthesim_vals(offset, tval, varval):\n \"\"\"\n This is a specialized function that uses memory managed by Vensim\n to give access to values while SyntheSim is active.\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef tool_command(command, window, aswiptool):\n \"\"\"\n Perform a command that will cause output to be created, or the printing or\n exporting of the contents of a currently displayed item.\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef contextAdd(wantcleanup):\n \"\"\"\n creates a new context for the server version of Vensim\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef contextDrop(context):\n \"\"\"\n drops a context that was created by contextAdd\n\n currently not implemented\n \"\"\"\n\n raise NotImplementedError\n\n\ndef use_double_precision():\n \"\"\"\n convenience function for changing reference to dll to dll for double\n precision.\n\n\n In order to ensure that double precision is used when running in parallel,\n call this function at the top of the module in which you define the model\n interface.\n\n \"\"\"\n\n global vensim\n try:\n vensim = ctypes.windll.LoadLibrary(r'C:\\Windows\\SysWOW64\\VdpDLL32.dll')\n except WindowsError:\n raise EMAError(\"double precision vensim dll not found\")\n",
"'''\nCreated on 30 Oct 2018\n\n@author: jhkwakkel\n'''\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom ema_workbench import ema_logging, load_results\nfrom ema_workbench.analysis import (get_ex_feature_scores,\n RuleInductionType)\n\nema_logging.log_to_stderr(level=ema_logging.INFO)\n\n# load data\nfn = r'./data/1000 flu cases no policy.tar.gz'\nx, outcomes = load_results(fn)\n\nx = x.drop(['model', 'policy'], axis=1)\ny = outcomes['deceased population region 1']\n\n#\n# 'infected fraction R1'\n\nall_scores = []\nfor i in range(0, y.shape[1], 2):\n data = y[:, i]\n scores = get_ex_feature_scores(x, data,\n mode=RuleInductionType.REGRESSION)[0]\n\n all_scores.append(scores)\nall_scores = pd.concat(all_scores, axis=1, sort=False)\nall_scores[all_scores < 0.075] = 0 # cleans up results\nnormalized = all_scores.divide(all_scores.sum(axis=1), axis=0)\nnormalized = all_scores.divide(all_scores.sum(axis=1), axis=0)\n\nlabels = normalized.index.values\nprint(labels)\ny = normalized.values\n\nfig, ax = plt.subplots()\nax.stackplot(range(len(labels)), y.T, labels=labels)\n# ax.legend()\nplt.show()\n"
] |
[
[
"numpy.ctypeslib.as_array"
],
[
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
moyix/pytorch-cifar
|
[
"21a7c34104b95194bb9fedbefb640cb3f0fc2a0f"
] |
[
"main.py"
] |
[
"'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport numpy as np\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nfrom models import *\nfrom utils import progress_bar\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--kc', default=64, type=int, help='model size')\nparser.add_argument('--epoch', default=400, type=int, help='total training epochs')\nparser.add_argument('--resume', '-r', default=None, type=str, help='resume from checkpoint')\nparser.add_argument('--noise', default=0, type=int, help='label noise %')\nparser.add_argument('--eval', action='store_true', help='only do evaluation')\nparser.add_argument('--quiet', '-q', action='store_true', help='be quiet')\nargs = parser.parse_args()\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nif not args.quiet: print('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ndo_download = not os.path.exists('./data')\n\nif args.resume:\n # Load checkpoint.\n if not args.quiet: print('==> Resuming from checkpoint..')\n checkpoint = torch.load(args.resume)\n args.kc = checkpoint['kc']\n args.noise = checkpoint['noise']\n args.epoch = checkpoint['end_epoch']\n\n# Training data with optional noise\ndef flip_random_label(x):\n image, label = x\n wrong = list(range(10))\n del wrong[label]\n label = np.random.choice(wrong)\n x = image, label\n\n return x\n\nnoise_indices = []\nnoise_labels = []\nif not args.eval:\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=do_download, transform=transform_train)\n\n if args.noise != 0:\n # If resuming we want the label flips to be the same\n if args.resume:\n noise_indices = checkpoint['noise_indices']\n noise_labels = checkpoint['noise_labels']\n else:\n noise_frac = args.noise / 100\n num_noise_samples = int(noise_frac * len(trainset))\n if not args.quiet: print(f'Flipping {args.noise}% of labels ({num_noise_samples} samples)')\n noise_indices = np.random.choice(np.arange(len(trainset)), size=num_noise_samples, replace=False)\n noisy_data = [x for x in trainset]\n if args.resume:\n for label,index in zip(noise_labels, noise_indices):\n noisy_data[index] = (noisy_data[index][0], label)\n else:\n for i in noise_indices:\n noisy_data[i] = flip_random_label(noisy_data[i])\n noise_labels = [noisy_data[i][1] for i in noise_indices]\n trainset = noisy_data\n\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=128, shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=do_download, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=2)\n\n# Model\nif not args.quiet: print('==> Building model..')\nnet = PreActResNet18(args.kc)\nnet = net.to(device)\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\nif args.resume:\n net.load_state_dict(checkpoint['net'])\n start_epoch = checkpoint['epoch'] + 1\n\ncriterion = nn.CrossEntropyLoss()\n# Adam with LR=0.0001\noptimizer = optim.Adam(net.parameters(), lr=0.0001)\n\n# Training\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if not args.quiet:\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\ndef test(epoch):\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if not args.quiet:\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n acc = 100.*correct/total\n # Save checkpoint.\n if epoch % 10 == 0 and not args.eval:\n if not args.quiet: print('Saving..')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n 'kc': args.kc,\n 'noise': args.noise,\n 'noise_indices': noise_indices,\n 'noise_labels': noise_labels,\n 'end_epoch': args.epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, f'./checkpoint/noise{args.noise}_kc{args.kc}_epoch{epoch}_ckpt.pth')\n return acc\n\nif args.eval:\n if not args.resume:\n parser.error(\"--eval requires --resume CHECKPOINT\")\n print(args.kc, args.noise, test(0))\nelse:\n for epoch in range(start_epoch, args.epoch+1):\n train(epoch)\n test(epoch)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"numpy.random.choice",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gxbjtu/stock
|
[
"2279fa1f3ab029123066f4c4d74db47d4c23e6a4",
"2279fa1f3ab029123066f4c4d74db47d4c23e6a4"
] |
[
"tf/minst_serving/mnist_input_data.py",
"libs/common.py"
] |
[
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Functions for downloading and reading MNIST data.\"\"\"\n\nfrom __future__ import print_function\n\nimport gzip\nimport os\n\nimport numpy\nfrom six.moves import urllib\n\n# CVDF mirror of http://yann.lecun.com/exdb/mnist/\nSOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'\nTRAIN_IMAGES = 'train-images-idx3-ubyte.gz'\nTRAIN_LABELS = 'train-labels-idx1-ubyte.gz'\nTEST_IMAGES = 't10k-images-idx3-ubyte.gz'\nTEST_LABELS = 't10k-labels-idx1-ubyte.gz'\nVALIDATION_SIZE = 5000\n\n\ndef maybe_download(filename, work_directory):\n \"\"\"Download the data from Yann's website, unless it's already here.\"\"\"\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size))\n return filepath\n\n\ndef _read32(bytestream):\n dt = numpy.dtype(numpy.uint32).newbyteorder('>')\n return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef extract_images(filename):\n \"\"\"Extract the images into a 4D uint8 numpy array [index, y, x, depth].\"\"\"\n print('Extracting %s' % filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data\n\n\ndef dense_to_one_hot(labels_dense, num_classes=10):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n\ndef extract_labels(filename, one_hot=False):\n \"\"\"Extract the labels into a 1D uint8 numpy array [index].\"\"\"\n print('Extracting %s' % filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels\n\n\nclass DataSet(object):\n \"\"\"Class encompassing test, validation and training MNIST data set.\"\"\"\n\n def __init__(self, images, labels, fake_data=False, one_hot=False):\n \"\"\"Construct a DataSet. one_hot arg is used only if fake_data is true.\"\"\"\n\n if fake_data:\n self._num_examples = 10000\n self.one_hot = one_hot\n else:\n assert images.shape[0] == labels.shape[0], (\n 'images.shape: %s labels.shape: %s' % (images.shape,\n labels.shape))\n self._num_examples = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n assert images.shape[3] == 1\n images = images.reshape(images.shape[0],\n images.shape[1] * images.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n images = images.astype(numpy.float32)\n images = numpy.multiply(images, 1.0 / 255.0)\n self._images = images\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0\n\n @property\n def images(self):\n return self._images\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_examples(self):\n return self._num_examples\n\n @property\n def epochs_completed(self):\n return self._epochs_completed\n\n def next_batch(self, batch_size, fake_data=False):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]\n\n\ndef read_data_sets(train_dir, fake_data=False, one_hot=False):\n \"\"\"Return training, validation and testing data sets.\"\"\"\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=one_hot)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=one_hot)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=one_hot)\n return data_sets\n\n local_file = maybe_download(TRAIN_IMAGES, train_dir)\n train_images = extract_images(local_file)\n\n local_file = maybe_download(TRAIN_LABELS, train_dir)\n train_labels = extract_labels(local_file, one_hot=one_hot)\n\n local_file = maybe_download(TEST_IMAGES, train_dir)\n test_images = extract_images(local_file)\n\n local_file = maybe_download(TEST_LABELS, train_dir)\n test_labels = extract_labels(local_file, one_hot=one_hot)\n\n validation_images = train_images[:VALIDATION_SIZE]\n validation_labels = train_labels[:VALIDATION_SIZE]\n train_images = train_images[VALIDATION_SIZE:]\n train_labels = train_labels[VALIDATION_SIZE:]\n\n data_sets.train = DataSet(train_images, train_labels)\n data_sets.validation = DataSet(validation_images, validation_labels)\n data_sets.test = DataSet(test_images, test_labels)\n\n return data_sets\n",
"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\n# apk add py-mysqldb or\n\nimport platform\nimport datetime\nimport time\nimport sys\nimport os\nimport MySQLdb\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.types import NVARCHAR\nfrom sqlalchemy import inspect\nimport tushare as ts\nimport pandas as pd\nimport traceback\n\n# 使用环境变量获得数据库。兼容开发模式可docker模式。\nMYSQL_HOST = os.environ.get('MYSQL_HOST') if (os.environ.get('MYSQL_HOST') != None) else \"mariadb\"\nMYSQL_USER = os.environ.get('MYSQL_USER') if (os.environ.get('MYSQL_USER') != None) else \"root\"\nMYSQL_PWD = os.environ.get('MYSQL_PWD') if (os.environ.get('MYSQL_PWD') != None) else \"mariadb\"\nMYSQL_DB = os.environ.get('MYSQL_DB') if (os.environ.get('MYSQL_DB') != None) else \"stock_data\"\n\nprint(\"MYSQL_HOST :\", MYSQL_HOST, \",MYSQL_USER :\", MYSQL_USER, \",MYSQL_DB :\", MYSQL_DB)\nMYSQL_CONN_URL = \"mysql+mysqldb://\" + MYSQL_USER + \":\" + MYSQL_PWD + \"@\" + MYSQL_HOST + \"/\" + MYSQL_DB + \"?charset=utf8\"\nprint(\"MYSQL_CONN_URL :\", MYSQL_CONN_URL)\n\n\ndef engine():\n engine = create_engine(\n MYSQL_CONN_URL,\n encoding='utf8', convert_unicode=True)\n return engine\n\n\ndef conn():\n db = MySQLdb.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PWD, MYSQL_DB, charset=\"utf8\")\n # db.autocommit(on=True)\n return db\n\n\n# 定义通用方法函数,插入数据库表,并创建数据库主键,保证重跑数据的时候索引唯一。\ndef insert_db(data, table_name, write_index, primary_keys):\n # 定义engine\n engine_mysql = engine()\n # 使用 http://docs.sqlalchemy.org/en/latest/core/reflection.html\n # 使用检查检查数据库表是否有主键。\n insp = inspect(engine_mysql)\n col_name_list = data.columns.tolist()\n # 如果有索引,把索引增加到varchar上面。\n if write_index:\n # 插入到第一个位置:\n col_name_list.insert(0, data.index.name)\n print(col_name_list)\n data.to_sql(name=table_name, con=engine_mysql, schema=MYSQL_DB, if_exists='append',\n dtype={col_name: NVARCHAR(length=255) for col_name in col_name_list}, index=write_index)\n # 判断是否存在主键\n if insp.get_primary_keys(table_name) == []:\n with engine_mysql.connect() as con:\n # 执行数据库插入数据。\n try:\n con.execute('ALTER TABLE `%s` ADD PRIMARY KEY (%s);' % (table_name, primary_keys))\n except Exception as e:\n print(\"################## ADD PRIMARY KEY ERROR :\", e)\n\n\n# 插入数据。\ndef insert(sql, params=()):\n with conn() as db:\n print(\"insert sql:\" + sql)\n try:\n db.execute(sql, params)\n except Exception as e:\n print(\"error :\", e)\n\n\n# 查询数据\ndef select(sql, params=()):\n with conn() as db:\n print(\"select sql:\" + sql)\n try:\n db.execute(sql, params)\n except Exception as e:\n print(\"error :\", e)\n result = db.fetchall()\n return result\n\n\n# 计算数量\ndef select_count(sql, params=()):\n with conn() as db:\n print(\"select sql:\" + sql)\n try:\n db.execute(sql, params)\n except Exception as e:\n print(\"error :\", e)\n result = db.fetchall()\n # 只有一个数组中的第一个数据\n if len(result) == 1:\n return int(result[0][0])\n else:\n return 0\n\n\n# 通用函数。获得日期参数。\ndef run_with_args(run_fun):\n tmp_datetime_show = datetime.datetime.now() # 修改成默认是当日执行 + datetime.timedelta()\n tmp_datetime_str = tmp_datetime_show.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n str_db = \"MYSQL_HOST :\" + MYSQL_HOST + \", MYSQL_USER :\" + MYSQL_USER + \", MYSQL_DB :\" + MYSQL_DB\n print(\"\\n######################### \" + str_db + \" ######################### \")\n print(\"\\n######################### begin run %s %s #########################\" % (run_fun, tmp_datetime_str))\n start = time.time()\n # 要支持数据重跑机制,将日期传入。循环次数\n if len(sys.argv) == 3:\n # python xxx.py 2017-07-01 10\n tmp_year, tmp_month, tmp_day = sys.argv[1].split(\"-\")\n loop = int(sys.argv[2])\n tmp_datetime = datetime.datetime(int(tmp_year), int(tmp_month), int(tmp_day))\n for i in range(0, loop):\n # 循环插入多次数据,重复跑历史数据使用。\n # time.sleep(5)\n tmp_datetime_new = tmp_datetime + datetime.timedelta(days=i)\n try:\n run_fun(tmp_datetime_new)\n except Exception as e:\n print(\"error :\", e)\n traceback.print_exc()\n elif len(sys.argv) == 2:\n # python xxx.py 2017-07-01\n tmp_year, tmp_month, tmp_day = sys.argv[1].split(\"-\")\n tmp_datetime = datetime.datetime(int(tmp_year), int(tmp_month), int(tmp_day))\n try:\n run_fun(tmp_datetime)\n except Exception as e:\n print(\"error :\", e)\n traceback.print_exc()\n else:\n # tmp_datetime = datetime.datetime.now() + datetime.timedelta(days=-1)\n try:\n run_fun(tmp_datetime_show) # 使用当前时间\n except Exception as e:\n print(\"error :\", e)\n traceback.print_exc()\n print(\"######################### finish %s , use time: %s #########################\" % (\n tmp_datetime_str, time.time() - start))\n\n\n# 设置基础目录,每次加载使用。\nbash_stock_tmp = \"/data/cache/hist_data_cache/%s/%s/\"\nif not os.path.exists(bash_stock_tmp):\n os.makedirs(bash_stock_tmp) # 创建多个文件夹结构。\n print(\"######################### init tmp dir #########################\")\n\n\n# 增加读取股票缓存方法。加快处理速度。\ndef get_hist_data_cache(code, date_start, date_end):\n cache_dir = bash_stock_tmp % (date_end[0:7], date_end)\n # 如果没有文件夹创建一个。月文件夹和日文件夹。方便删除。\n # print(\"cache_dir:\", cache_dir)\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n cache_file = cache_dir + \"%s^%s.gzip.pickle\" % (date_end, code)\n # 如果缓存存在就直接返回缓存数据。压缩方式。\n if os.path.isfile(cache_file):\n print(\"######### read from cache #########\", cache_file)\n return pd.read_pickle(cache_file, compression=\"gzip\")\n else:\n print(\"######### get data, write cache #########\", code, date_start, date_end)\n stock = ts.get_hist_data(code, start=date_start, end=date_end)\n if stock is None:\n return None\n stock = stock.sort_index(0) # 将数据按照日期排序下。\n stock.to_pickle(cache_file, compression=\"gzip\")\n return stock\n"
] |
[
[
"numpy.multiply",
"numpy.arange",
"numpy.dtype",
"numpy.random.shuffle",
"numpy.frombuffer",
"numpy.zeros"
],
[
"pandas.read_pickle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Srijanb97/gcn_assignment
|
[
"a1df8a180eebb53b66c04c00eb3f88fa357fe382"
] |
[
"src/modeling/tasks/graph_classification.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass GraphClassifier(nn.Module):\n def __init__(self, hidden_dim: int, num_classes: int, pooling_op: str):\n super(GraphClassifier, self).__init__()\n # TODO: Define the graph classifier\n # graph classifier can be an MLP\n self.linear = nn.Linear(hidden_dim, hidden_dim)\n self.graph_classifier = nn.Linear(hidden_dim, num_classes)\n self.pooling_op = pooling_op\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Given node features x, applies two operations:\n 1. Pools the node representations using the pooling operation specified\n 2. Applies a classifier to the pooled representation\n \"\"\"\n # TODO: Implement the forward pass for the graph classifier\n \n pooled_rep = self.pool(x, self.pooling_op)\n out = self.linear(pooled_rep)\n classifier_logits = self.graph_classifier(out)\n return classifier_logits\n \n def pool(self, x: torch.Tensor, operation: str = \"last\") -> torch.Tensor:\n \"\"\"Given node features x, applies a pooling operation to return a \n single aggregated feature vector.\n\n Args:\n x (torch.Tensor): [The node features]\n operation (str, optional): [description]. Defaults to \"last\".\n\n Raises:\n NotImplementedError: [description]\n\n Returns:\n torch.Tensor: [A single feature vector for the graph]\n \"\"\"\n if operation == \"mean\":\n return x.mean(dim=0)\n elif operation == \"max\":\n return x.max(dim=0)[0]\n elif operation == \"last\":\n return x[-1]\n else:\n raise NotImplementedError()\n"
] |
[
[
"torch.nn.Linear"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TanguyUrvoy/normalizing-flows
|
[
"e485fe0875c117517353a9ab40e19ff951561cfc",
"e485fe0875c117517353a9ab40e19ff951561cfc"
] |
[
"normalizing_flows/flows/glow/glow_flow.py",
"normalizing_flows/flows/glow/gaussianize.py"
] |
[
"import tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nfrom normalizing_flows.flows import Flow, Transform\nfrom normalizing_flows.flows.affine import BatchNorm\nfrom . import (\n InvertibleConv,\n ActNorm,\n Squeeze,\n Split,\n AffineCoupling,\n Parameterize,\n Gaussianize,\n coupling_nn_glow\n)\n\ndef glow_step(layer_index, coupling_nn_ctor=coupling_nn_glow(), act_norm=True, name='glow_step'):\n norm = ActNorm(name=f'{name}_act_norm') if act_norm else BatchNorm(name=f'{name}_batch_norm')\n invertible_conv = InvertibleConv(name=f'{name}_inv_conv')\n affine_coupling = AffineCoupling(layer_index, nn_ctor=coupling_nn_ctor, name=f'{name}_affine_coupling')\n flow_steps = [norm, invertible_conv, affine_coupling]\n return Flow(flow_steps)\n\ndef glow_layer(layer_index,\n parameterize: Parameterize,\n depth=4,\n coupling_nn_ctor=coupling_nn_glow(),\n split_axis=-1,\n act_norm=True,\n name='glow_layer'):\n squeeze = Squeeze(name=f'{name}_squeeze')\n steps = Flow.uniform(depth, lambda i: glow_step(layer_index,\n coupling_nn_ctor=coupling_nn_ctor,\n act_norm=act_norm,\n name=f'{name}_step{i}'))\n layer_steps = [squeeze, steps]\n if split_axis is not None:\n layer_steps.append(Split(parameterize, split_axis=split_axis, name=f'{name}_split'))\n return Flow(layer_steps)\n\nclass GlowFlow(Transform):\n \"\"\"\n Glow normalizing flow (Kingma et al, 2018).\n Note that all Glow ops define forward as x -> z (data to encoding)\n rather than the canonical interpretation of z -> x. Conversely,\n inverse is defined as z -> x (encoding to data). The implementations\n provided by this module are written to be consistent with the\n terminology as defined by the Glow authors. Note that this is inconsistent\n with the 'flows' module in general, which specifies 'forward' as z -> x\n and vice versa. This can be corrected easily using the flows.Invert transform.\n \"\"\"\n def __init__(self,\n input_shape=None,\n num_layers=1,\n depth=4,\n cond_shape=None,\n parameterize_ctor=Gaussianize,\n coupling_nn_ctor=coupling_nn_glow(),\n act_norm=True,\n name='glow_flow',\n *args, **kwargs):\n \"\"\"\n Creates a new Glow normalizing flow with the given configuration.\n\n input_shape : shape of input; can be provided here or at a later time to 'initialize'\n num_layers : number of \"layers\" in the multi-scale Glow architecture\n depth_per_layer : number of glow steps per layer\n \n parameterize_ctor : a function () -> Paramterize (see consructor docs for Split)\n coupling_nn_ctor : function that constructs a Keras model for affine coupling steps\n act_norm : if true, use act norm in Glow layers; otherwise, use batch norm\n \"\"\"\n def _layer(i):\n \"\"\"Builds layer i; omits split op for final layer\"\"\"\n assert i < num_layers, f'expected i < {num_layers}; got {i}'\n return glow_layer(i,\n parameterize_ctor(name=f'{name}_layer{i}_param'),\n depth=depth,\n coupling_nn_ctor=coupling_nn_ctor,\n act_norm=act_norm,\n split_axis=None if i == num_layers - 1 else -1,\n name=f'{name}_layer{i}')\n super().__init__(*args, name=name, **kwargs)\n self.num_layers = num_layers\n self.depth = depth\n self.cond_shape = cond_shape\n self.layers = [_layer(i) for i in range(num_layers)]\n self.parameterize = parameterize_ctor()\n if input_shape is not None:\n self.initialize(input_shape)\n \n def _build_cond_fn(self, cond_shape, z_shape):\n from tensorflow.keras import Model\n from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Conv2D, Concatenate\n x_in = Input(z_shape[1:])\n cond_in = Input(cond_shape)\n y = Dense(tf.math.reduce_prod(z_shape[1:]), name=f'{self.name}_cond_dense')(Flatten()(cond_in))\n y = Reshape(z_shape[1:])(y)\n y = Concatenate(axis=-1)([x_in, y])\n y = Conv2D(z_shape[-1], 3, padding='same')(y)\n return Model(inputs=[x_in, cond_in], outputs=y)\n \n def _forward_shape(self, input_shape):\n for layer in self.layers:\n input_shape = layer._forward_shape(input_shape)\n return input_shape\n \n def _inverse_shape(self, input_shape):\n for layer in reversed(self.layers):\n input_shape = layer._inverse_shape(input_shape)\n return input_shape\n \n def _initialize(self, input_shape):\n for layer in self.layers:\n layer.initialize(input_shape)\n input_shape = layer._forward_shape(input_shape)\n self.parameterize.initialize(input_shape)\n if self.cond_shape is not None:\n self.cond_fn = self._build_cond_fn(self.cond_shape, input_shape)\n \n def _flatten_zs(self, zs):\n zs_reshaped = []\n for z in zs:\n zs_reshaped.append(tf.reshape(z, (tf.shape(z)[0], -1)))\n return tf.concat(zs_reshaped, axis=-1)\n \n def _unflatten_z(self, z):\n assert self.input_shape is not None\n batch_size = tf.shape(z)[0]\n output_shape = self._forward_shape(self.input_shape)\n st = np.prod(output_shape[1:])\n z_k = tf.reshape(z[:,-st:], (batch_size, *output_shape[1:]))\n zs = [z_k]\n for i in range(self.num_layers-1):\n layer_i = self.layers[self.num_layers-i-1]\n output_shape = layer_i._inverse_shape(output_shape)\n size_i = np.prod(output_shape[1:])\n z_i = z[:,-st-size_i:-st]\n zs.insert(0, tf.reshape(z_i, (batch_size, *output_shape[1:])))\n st += size_i\n return zs\n \n def _forward(self, x, return_zs=False, **kwargs):\n assert self.cond_shape is None or 'y_cond' in kwargs, 'y_cond must be supplied for conditional flow'\n zs = []\n x_i = x\n fldj = 0.0\n for i in range(self.num_layers-1):\n layer = self.layers[i]\n (x_i, z_i), fldj_i = layer.forward(x_i)\n fldj += fldj_i\n zs.append(z_i)\n # final layer\n x_i, fldj_i = self.layers[-1].forward(x_i)\n fldj += fldj_i\n # Gaussianize (parameterize) final x_i\n h = tf.zeros_like(x_i)\n if self.cond_shape is not None:\n h = self.cond_fn([h, kwargs['y_cond']])\n z_i, fldj_i = self.parameterize.forward(h, x_i)\n fldj += fldj_i\n zs.append(z_i)\n if return_zs:\n return zs, fldj\n z = self._flatten_zs(zs)\n return tf.reshape(z, tf.shape(x)), fldj\n \n def _inverse(self, z, input_zs=False, **kwargs):\n assert self.cond_shape is None or 'y_cond' in kwargs, 'y_cond must be supplied for conditional flow'\n zs = z if input_zs else self._unflatten_z(tf.reshape(z, (tf.shape(z)[0], -1)))\n assert len(zs) == self.num_layers, 'number of latent space inputs should match number of layers'\n h = tf.zeros_like(zs[-1])\n if self.cond_shape is not None:\n h = self.cond_fn([h, kwargs['y_cond']])\n ildj = 0.0\n x_i, ildj_i = self.parameterize.inverse(h, zs[-1])\n ildj += ildj_i\n x_i, ildj_i = self.layers[-1].inverse(x_i)\n ildj += ildj_i\n for i in range(self.num_layers-1):\n layer = self.layers[self.num_layers-i-2]\n x_i, ildj_i = layer.inverse(x_i, zs[-i-2])\n ildj += ildj_i\n return x_i, ildj\n \n def _regularization_loss(self):\n return tf.math.add_n([layer._regularization_loss() for layer in self.layers])\n",
"import tensorflow as tf\nimport tensorflow_probability as tfp\nfrom normalizing_flows.flows import Transform\nfrom . import Parameterize\n\ndef gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)):\n if inverse:\n z = tf.math.exp(log_sigmas)*x + mus\n ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3])\n else:\n z = (x - mus)*tf.math.exp(-log_sigmas)\n ldj = -tf.math.reduce_sum(log_sigmas, axis=[1,2,3])\n return z, ldj\n \nclass Gaussianize(Parameterize):\n \"\"\"\n Implementation of parameterize for a Gaussian prior. Corresponds to the \"Gaussianization\" step in Glow (Kingma et al, 2018).\n \"\"\"\n def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs):\n super().__init__(*args, num_parameters=2, input_shape=input_shape, name=name, **kwargs)\n \n def _forward(self, x1, x2, **kwargs):\n params = self.parameterizer(x1)\n mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]\n z2, fldj = gaussianize(x2, mus, log_sigmas)\n return z2, fldj\n \n def _inverse(self, x1, z2, **kwargs):\n params = self.parameterizer(x1)\n mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]\n x2, ildj = gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True))\n return x2, ildj\n \ndef log_gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)):\n \"\"\"\n Standardize log normal random variable x using mus and log_sigmas.\n \"\"\"\n if inverse:\n scales = tf.math.exp(log_sigmas)\n log_x = tf.math.log(x)\n ldj = log_x\n log_y = log_x*scales + mus\n ldj += log_sigmas\n z = tf.math.exp(log_y)\n return z, ldj\n else:\n scales = tf.math.exp(-log_sigmas)\n log_x = tf.math.log(x)\n ldj = -log_x\n log_y = (log_x - mus)*scales\n ldj -= log_sigmas\n z = tf.math.exp(log_y)\n return z, ldj\n\nclass LogGaussianize(Parameterize):\n \"\"\"\n Implementation of Parameterize for a log-Gaussian prior.\n \"\"\"\n def __init__(self, input_shape=None, epsilon=1.0E-3, name='log_gaussianize', *args, **kwargs):\n super().__init__(*args, num_parameters=2, input_shape=input_shape, name=name, **kwargs)\n self.epsilon = epsilon\n \n def _forward(self, x1, x2, **kwargs):\n \"\"\"\n A log normal RV X = exp(mu + sigma*Z) where Z ~ N(0,I).\n The forward pass scales to a standard log normal with mu=0, sigma=1 by computing:\n exp(Z) = (X / exp(mu))^(1/sigma)\n \"\"\"\n params = self.parameterizer(x1)\n mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]\n # compute softplus activation\n z2, ldj = log_gaussianize(x2, mus, log_sigmas)\n z2 = tf.where(x2 > self.epsilon, z2, x2)\n ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj))\n return z2, tf.math.reduce_sum(ldj, axis=[1,2,3])\n \n def _inverse(self, x1, z2, **kwargs):\n params = self.parameterizer(x1)\n mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]\n x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True))\n x2 = tf.where(z2 > self.epsilon, x2, z2)\n ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj))\n return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])\n \ndef half_gaussianize(x, log_sigmas, inverse=tf.constant(False)):\n if inverse:\n z = tf.math.exp(log_sigmas)*x\n ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3])\n else:\n z = x*tf.math.exp(-log_sigmas)\n ldj = -tf.math.reduce_sum(log_sigmas, axis=[1,2,3])\n return z, ldj\n\nclass HalfGaussianize(Parameterize):\n \"\"\"\n Implementation of parameterize for a half-Gaussian prior.\n \"\"\"\n def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs):\n super().__init__(*args, num_parameters=1, input_shape=input_shape, name=name, **kwargs)\n \n def _forward(self, x1, x2, **kwargs):\n log_sigmas = self.parameterizer(x1)\n z2, fldj = half_gaussianize(x2, log_sigmas)\n return z2, fldj\n \n def _inverse(self, x1, z2, **kwargs):\n log_sigmas = self.parameterizer(x1)\n x2, ildj = half_gaussianize(z2, log_sigmas, inverse=tf.constant(True))\n return x2, ildj\n \ndef exponentiate(x, log_lambdas, inverse=tf.constant(False)):\n if not inverse:\n z = tf.math.exp(log_lambdas)*x\n ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3])\n else:\n z = x*tf.math.exp(-log_lambdas)\n ldj = -tf.math.reduce_sum(log_lambdas, axis=[1,2,3])\n return z, ldj\n\nclass Exponentiate(Parameterize):\n \"\"\"\n Implementation of parameterize for an exponetial prior.\n \"\"\"\n def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs):\n super().__init__(*args, num_parameters=1, input_shape=input_shape, name=name, **kwargs)\n \n def _forward(self, x1, x2, **kwargs):\n log_lambdas = self.parameterizer(x1)\n z2, fldj = exponentiate(x2, log_lambdas)\n return z2, fldj\n \n def _inverse(self, x1, z2, **kwargs):\n log_lambdas = self.parameterizer(x1)\n x2, ildj = exponentiate(z2, log_lambdas, inverse=tf.constant(True))\n return x2, ildj\n"
] |
[
[
"tensorflow.keras.layers.Concatenate",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.zeros_like",
"numpy.prod",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Flatten",
"tensorflow.math.reduce_prod",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.constant",
"tensorflow.math.log",
"tensorflow.math.exp",
"tensorflow.zeros_like",
"tensorflow.math.reduce_sum",
"tensorflow.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
atztogo/phono3py
|
[
"37ac5b19e0cfa462d32d31cadd5fcb4f2ff6b477"
] |
[
"setup.py"
] |
[
"\"\"\"Phono3py setup.py.\"\"\"\nimport os\n\nimport numpy\nimport setuptools\n\n# Ensure that 'site.cfg' exists.\nif not os.path.exists(\"site.cfg\"):\n msg_list = [\n '\"site.cfg\" file is needed to run setup.py.',\n \"See about installation at https://phonopy.github.io/phono3py/install.html.\",\n \"A minimum setting of site.cfg to build with openmp support is:\",\n \"# ------------------------------\",\n \"[phono3py]\",\n \"extra_compile_args = -fopenmp\",\n \"# ------------------------------\",\n \"Please create an emply site.cfg (no-openmp support) to run setup.py\",\n \"unless any custom setting is needed, although this is considered unusual.\",\n ]\n\n raise FileNotFoundError(\"\\n\".join(msg_list))\n\n# Retrieve the default flags from the numpy installation\n# This also means one can override this with a site.cfg\n# configuration file\nfrom numpy.distutils.system_info import dict_append, get_info, system_info\n\ngit_num = None\n\n# use flags defined in numpy\nall_info_d = get_info(\"ALL\")\nlapack_info_d = get_info(\"lapack_opt\")\n\n\nclass phono3py_info(system_info):\n \"\"\"See system_info in numpy.\"\"\"\n\n section = \"phono3py\"\n\n def calc_info(self):\n \"\"\"Read in *all* options in the [phono3py] section of site.cfg.\"\"\"\n info = self.calc_libraries_info()\n dict_append(info, **self.calc_extra_info())\n dict_append(info, include_dirs=self.get_include_dirs())\n self.set_info(**info)\n\n\nmacros = []\n\n# in numpy>=1.16.0, silence build warnings about deprecated API usage\nmacros.append((\"NPY_NO_DEPRECATED_API\", \"0\"))\n\n# Avoid divergence in tetrahedron method by ensuring denominator > 1e-10.\n# macros.append((\"THM_EPSILON\", \"1e-10\"))\n\nwith_threaded_blas = False\nwith_mkl = False\n\n# define options\n# these are the basic definitions for all extensions\nopts = lapack_info_d.copy()\nif \"mkl\" in opts.get(\"libraries\", \"\"):\n with_mkl = True\n\nif with_mkl:\n with_threaded_blas = True\n # generally this should not be needed since the numpy distutils\n # finding of MKL creates the SCIPY_MKL_H flag\n macros.append((\"MKL_LAPACKE\", None))\n\nif with_threaded_blas:\n macros.append((\"MULTITHREADED_BLAS\", None))\n\n# Create the dictionary for compiling the codes\ndict_append(opts, **all_info_d)\ndict_append(opts, include_dirs=[\"c\"])\ndict_append(opts, define_macros=macros)\n# Add numpy's headers\ninclude_dirs = numpy.get_include()\nif include_dirs is not None:\n dict_append(opts, include_dirs=[include_dirs])\n\n# Add any phono3py manual flags from here\nadd_opts = phono3py_info().get_info()\ndict_append(opts, **add_opts)\n\n# Different extensions\nextensions = []\n\n# Define the modules\nsources_phono3py = [\n \"c/_phono3py.c\",\n \"c/bzgrid.c\",\n \"c/collision_matrix.c\",\n \"c/fc3.c\",\n \"c/grgrid.c\",\n \"c/imag_self_energy_with_g.c\",\n \"c/interaction.c\",\n \"c/isotope.c\",\n \"c/lagrid.c\",\n \"c/lapack_wrapper.c\",\n \"c/phono3py.c\",\n \"c/phonoc_utils.c\",\n \"c/pp_collision.c\",\n \"c/real_self_energy.c\",\n \"c/real_to_reciprocal.c\",\n \"c/reciprocal_to_normal.c\",\n \"c/snf3x3.c\",\n \"c/tetrahedron_method.c\",\n \"c/triplet.c\",\n \"c/triplet_grid.c\",\n \"c/triplet_iw.c\",\n]\nextensions.append(\n setuptools.Extension(\"phono3py._phono3py\", sources=sources_phono3py, **opts)\n)\n\n\nsources_phononmod = [\n \"c/_phononmod.c\",\n \"c/dynmat.c\",\n \"c/lapack_wrapper.c\",\n \"c/phonon.c\",\n \"c/phononmod.c\",\n]\nextensions.append(\n setuptools.Extension(\"phono3py._phononmod\", sources=sources_phononmod, **opts)\n)\n\nsources_lapackepy = [\"c/_lapackepy.c\", \"c/lapack_wrapper.c\"]\nextensions.append(\n setuptools.Extension(\"phono3py._lapackepy\", sources=sources_lapackepy, **opts)\n)\n\npackages_phono3py = [\n \"phono3py\",\n \"phono3py.conductivity\",\n \"phono3py.cui\",\n \"phono3py.interface\",\n \"phono3py.other\",\n \"phono3py.phonon\",\n \"phono3py.phonon3\",\n \"phono3py.sscha\",\n]\nscripts_phono3py = [\n \"scripts/phono3py\",\n \"scripts/phono3py-load\",\n \"scripts/phono3py-kaccum\",\n \"scripts/phono3py-kdeplot\",\n \"scripts/phono3py-coleigplot\",\n]\n\nif __name__ == \"__main__\":\n version_nums = [None, None, None]\n with open(\"phono3py/version.py\") as w:\n for line in w:\n if \"__version__\" in line:\n for i, num in enumerate(line.split()[2].strip('\"').split(\".\")):\n version_nums[i] = num\n break\n\n # To deploy to pypi by travis-CI\n if os.path.isfile(\"__nanoversion__.txt\"):\n nanoversion = 0\n with open(\"__nanoversion__.txt\") as nv:\n try:\n for line in nv:\n nanoversion = int(line.strip())\n break\n except ValueError:\n nanoversion = 0\n if nanoversion != 0:\n version_nums.append(nanoversion)\n elif git_num:\n version_nums.append(git_num)\n\n if None in version_nums:\n print(\"Failed to get version number in setup.py.\")\n raise\n\n version = \".\".join([\"%s\" % n for n in version_nums[:3]])\n if len(version_nums) > 3:\n version += \"-%s\" % version_nums[3]\n\n setuptools.setup(\n name=\"phono3py\",\n version=version,\n description=\"This is the phono3py module.\",\n author=\"Atsushi Togo\",\n author_email=\"[email protected]\",\n url=\"http://phonopy.github.io/phono3py/\",\n packages=packages_phono3py,\n python_requires=\">=3.7\",\n install_requires=[\n \"numpy>=1.15.0\",\n \"scipy\",\n \"PyYAML\",\n \"matplotlib>=2.2.2\",\n \"h5py\",\n \"spglib\",\n \"phonopy>=2.14,<2.15\",\n ],\n provides=[\"phono3py\"],\n scripts=scripts_phono3py,\n ext_modules=extensions,\n )\n"
] |
[
[
"numpy.distutils.system_info.dict_append",
"numpy.distutils.system_info.get_info",
"numpy.get_include"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhixuanli/SLN-Amodal
|
[
"266c648f6838e37f3ada7223f212b21f730ffb64"
] |
[
"data/amodalImage.py"
] |
[
"import numpy as np\nimport random\nimport cv2,os,pickle\nfrom skimage import morphology\n\nclass amodalImage:\n def __init__(self, image_file=None):\n with open(image_file, 'rb') as fp:\n self.Image_anns = pickle.load(fp)\n\n # use number to label areas, 2**i = visual areas index for amodal i ,2**(32+i) = invisual mask index for amodal i\n\n def reLayerMask(self,mask_amodal, mask_invis):\n mask_zeros = np.zeros(mask_amodal[0].shape).astype('bool')\n labal = np.zeros(mask_amodal[0].shape).astype('uint64')\n for i in range(len(mask_amodal)):\n if i >= 32:\n continue\n if len(mask_invis[i]):\n invis = mask_invis[i] > 0\n labal[invis] |= 1 << (i + 32)\n mask_vis = mask_amodal[i] - mask_invis[i]\n else:\n mask_vis = mask_amodal[i]\n\n labal[mask_vis > 0] |= 1 << i\n\n labal = self.remove_small_path(labal, min_size=64)\n return labal\n\n def remove_small_path(self, labal, min_size=64):\n color = np.unique(labal)\n for i in range(len(color)):\n mask = (labal == color[i])\n mask_new = morphology.remove_small_objects(mask, min_size=min_size)\n if not mask_new.max():\n labal[mask] = 0\n return labal\n\n def get_image_labals(self,labal):\n labal_ids = np.unique(labal)\n if labal_ids[0] == 0:\n labal_ids = np.delete(labal_ids, 0)\n return labal_ids\n\n # id start from 0\n def objectID_to_masks(self,labal, id, labal_ids=None):\n if labal_ids is None:\n labal_ids = self.get_image_labals(labal)\n\n mask_vis, mask_invis = [], []\n index_vis = ((labal_ids >> id) & 1 == 1).nonzero()\n index_invis = ((labal_ids >> (id + 32)) & 1 == 1).nonzero()\n\n for items in index_vis:\n if items.size > 0:\n mask_vis.append(labal == labal_ids[items[0]])\n for items in index_invis:\n if items.size > 0:\n mask_invis.append(labal == labal_ids[items[0]])\n\n return (mask_vis, index_vis, mask_invis, index_invis)\n\n # id start from 0, id<0 return all masks\n def maskID_to_mask(self,labal, id, labal_ids=None):\n if labal_ids is None:\n labal_ids = self.get_image_labals(labal)\n\n mask = []\n if id < 0:\n for items in labal_ids:\n mask.append(labal == items)\n else:\n mask.append(labal == labal_ids[id])\n return mask\n\n def number_to_index(self,labal_id):\n bin_index, objectID = 0, []\n while labal_id:\n if labal_id & np.uint64(1):\n objectID.append(bin_index)\n bin_index += 1\n labal_id = labal_id >> np.uint64(1)\n return objectID\n\n def remove_last_one(self,number, depth):\n while number and depth:\n number = number & (number - 1)\n depth -= 1\n return number\n\n # id start from 0\n # return vis object id invis layer 1 - n\n def maskID_to_objectIDs(self,labal, id, labal_ids=None):\n if labal_ids is None:\n labal_ids = self.get_image_labals(labal)\n labal_id = labal_ids[id]\n\n vis = (labal_id << np.uint64(32)) >> np.uint64(32) # remove highest 32 bit\n invis = labal_id >> np.uint64(32) ## remove lowest 32 bit\n\n object_id_vis = self.number_to_index(vis)\n object_id_invis = self.number_to_index(invis)\n object_id_vis.extend(object_id_invis)\n return object_id_vis\n\n def layer_to_mask(self,labal, depth, labal_ids=None):\n if labal_ids is None:\n labal_ids = self.get_image_labals(labal)\n mask, objectID = [], []\n if 0 == depth:\n vis = (labal_ids << np.uint64(32)) >> np.uint64(32)\n for i in range(len(vis)):\n mask.append(self.maskID_to_mask(labal, i))\n objectID.append(self.number_to_index(vis[i]))\n return (mask, objectID)\n\n else:\n # find (depth)th 1 from left to right, is exist have depth layer else not\n depth -= 1\n invis = labal_ids >> np.uint64(32)\n for i in range(len(invis)):\n new_labal = self.remove_last_one(invis[i], depth)\n if new_labal:\n mask.append(self.maskID_to_mask(labal, i))\n objectID.append(self.number_to_index(invis[i]))\n return (mask, objectID)"
] |
[
[
"numpy.delete",
"numpy.uint64",
"numpy.zeros",
"numpy.unique"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jorgepadilla19/gdsfactory
|
[
"68e1c18257a75d4418279851baea417c8899a165",
"68e1c18257a75d4418279851baea417c8899a165",
"68e1c18257a75d4418279851baea417c8899a165",
"68e1c18257a75d4418279851baea417c8899a165",
"68e1c18257a75d4418279851baea417c8899a165",
"68e1c18257a75d4418279851baea417c8899a165",
"68e1c18257a75d4418279851baea417c8899a165"
] |
[
"gdsfactory/simulation/simphony/components/gc.py",
"gdsfactory/functions.py",
"gdsfactory/autoplacer/yaml_placer.py",
"gdsfactory/simulation/plot.py",
"fixme/p4/omegaconf_error.py",
"gdsfactory/components/awg.py",
"gdsfactory/tests/test_min_exclusion.py"
] |
[
"from gdsfactory.config import sparameters_path\nfrom gdsfactory.simulation.simphony.model_from_sparameters import model_from_filepath\n\n\ndef gc1550te(filepath=sparameters_path / \"gc2dte\" / \"gc1550.dat\", numports=2):\n \"\"\"Returns Sparameter model for 1550nm TE grating_coupler.\n\n .. plot::\n :include-source:\n\n import gdsfactory.simulation.simphony as gs\n import gdsfactory.simulation.simphony.components as gc\n\n c = gc.gc1550te()\n gs.plot_model(c)\n \"\"\"\n return model_from_filepath(filepath=filepath, numports=numports)\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import numpy as np\n\n wav = np.linspace(1520, 1570, 1024) * 1e-9\n f = 3e8 / wav\n c = gc1550te()\n s = c.s_parameters(freq=f)\n\n plt.plot(wav, np.abs(s[:, 1] ** 2))\n print(c.pins)\n plt.legend()\n plt.show()\n",
"\"\"\"All functions return a Component so you can easily pipe or compose them.\n\nThere are two types of functions:\n\n- decorators: return the original component\n- containers: return a new component\n\n\"\"\"\nfrom functools import lru_cache\n\nimport numpy as np\nfrom omegaconf import OmegaConf\nfrom pydantic import validate_arguments\n\nfrom gdsfactory.cell import cell\nfrom gdsfactory.component import Component\nfrom gdsfactory.components.text_rectangular import text_rectangular_multi_layer\nfrom gdsfactory.functools_ import partial\nfrom gdsfactory.port import auto_rename_ports\nfrom gdsfactory.types import (\n Anchor,\n Axis,\n ComponentFactory,\n ComponentOrFactory,\n Float2,\n Layer,\n List,\n Optional,\n Strs,\n)\n\ncache = lru_cache(maxsize=None)\n\n\ndef add_port(component: Component, **kwargs) -> Component:\n \"\"\"Return Component with a new port.\"\"\"\n component.add_port(**kwargs)\n return component\n\n\n@cell\ndef add_text(\n component: ComponentOrFactory,\n text: str = \"\",\n text_offset: Float2 = (0, 0),\n text_anchor: Anchor = \"cc\",\n text_factory: ComponentFactory = text_rectangular_multi_layer,\n) -> Component:\n \"\"\"Return component inside a new component with text geometry.\n\n Args:\n component:\n text: text string.\n text_offset: relative to component anchor. Defaults to center (cc).\n text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).\n text_factory: function to add text labels.\n \"\"\"\n component = component() if callable(component) else component\n component_new = Component()\n component_new.component = component\n ref = component_new.add_ref(component)\n\n t = component_new << text_factory(text)\n t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))\n\n component_new.add_ports(ref.ports)\n component_new.copy_child_info(component)\n return component_new\n\n\ndef add_texts(\n components: List[ComponentOrFactory],\n prefix: str = \"\",\n index0: int = 0,\n **kwargs,\n) -> List[Component]:\n \"\"\"Return a list of Component with text labels.\n\n Args:\n components: list of components\n prefix: Optional prefix for the labels\n index0: defaults to 0 (0, for first component, 1 for second ...)\n\n keyword Args:\n text_offset: relative to component size info anchor. Defaults to center.\n text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).\n text_factory: function to add text labels.\n \"\"\"\n return [\n add_text(component, text=f\"{prefix}{i+index0}\", **kwargs)\n for i, component in enumerate(components)\n ]\n\n\n@cell\ndef rotate(\n component: ComponentOrFactory,\n angle: float = 90,\n) -> Component:\n \"\"\"Return rotated component inside a new component.\n\n Most times you just need to place a reference and rotate it.\n This rotate function just encapsulates the rotated reference into a new component.\n\n Args:\n component:\n angle: in degrees\n \"\"\"\n component = component() if callable(component) else component\n component_new = Component()\n component_new.component = component\n ref = component_new.add_ref(component)\n ref.rotate(angle)\n component_new.add_ports(ref.ports)\n component_new.copy_child_info(component)\n return component_new\n\n\nrotate90 = partial(rotate, angle=90)\nrotate90n = partial(rotate, angle=-90)\nrotate180 = partial(rotate, angle=180)\n\n\n@cell\ndef mirror(component: Component, p1: Float2 = (0, 1), p2: Float2 = (0, 0)) -> Component:\n \"\"\"Return new Component with a mirrored reference.\n\n Args:\n p1: first point to define mirror axis\n p2: second point to define mirror axis\n \"\"\"\n component_new = Component()\n component_new.component = component\n ref = component_new.add_ref(component)\n ref.mirror(p1=p1, p2=p2)\n component_new.add_ports(ref.ports)\n component_new.copy_child_info(component)\n return component_new\n\n\n@cell\ndef move(\n component: Component,\n origin=(0, 0),\n destination=None,\n axis: Optional[Axis] = None,\n) -> Component:\n \"\"\"Return new Component with a moved reference to the original component.\n\n Args:\n origin: of component\n destination:\n axis: x or y axis\n \"\"\"\n component_new = Component()\n component_new.component = component\n ref = component_new.add_ref(component)\n ref.move(origin=origin, destination=destination, axis=axis)\n component_new.add_ports(ref.ports)\n component_new.copy_child_info(component)\n return component_new\n\n\ndef move_port_to_zero(component: Component, port_name: str = \"o1\"):\n \"\"\"Return a container that contains a reference to the original component.\n where the new component has port_name in (0, 0)\n \"\"\"\n if port_name not in component.ports:\n raise ValueError(\n f\"port_name = {port_name!r} not in {list(component.ports.keys())}\"\n )\n return move(component, -component.ports[port_name].midpoint)\n\n\ndef update_info(component: Component, **kwargs) -> Component:\n \"\"\"Return Component with updated info.\"\"\"\n component.info.update(**kwargs)\n return component\n\n\n@validate_arguments\ndef add_settings_label(\n component: Component, layer_label: Layer = (66, 0), settings: Optional[Strs] = None\n) -> Component:\n \"\"\"Add a settings label to a component.\n\n Args:\n component:\n layer_label:\n settings: tuple or list of settings. if None, adds all changed settings\n\n \"\"\"\n d = (\n {setting: component.get_setting(setting) for setting in settings}\n if settings\n else component.info.changed\n )\n\n component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)\n return component\n\n\n__all__ = (\n \"add_port\",\n \"add_text\",\n \"add_settings_label\",\n \"auto_rename_ports\",\n \"cache\",\n \"mirror\",\n \"move\",\n \"move_port_to_zero\",\n \"rotate\",\n \"update_info\",\n)\n\n\nif __name__ == \"__main__\":\n import gdsfactory as gf\n\n c = gf.components.mmi1x2(\n length_mmi=10,\n decorator=gf.partial(add_settings_label, settings=[\"name\", \"length_mmi\"]),\n )\n # c.show()\n\n cr = c.rotate()\n cr.pprint()\n cr.show()\n\n # cm = move(c, destination=(20, 20))\n # cm.show()\n\n # cm = mirror(c)\n # cm.show()\n\n # cm = c.mirror()\n # cm.show()\n\n # cm2 = move_port_to_zero(cm)\n # cm2.show()\n\n # cm3 = add_text(c, \"hi\")\n # cm3.show()\n\n # cr = rotate(component=c)\n # cr.show()\n # print(component_rotated)\n\n # component_rotated.pprint\n # component_netlist = component.get_netlist()\n # component.pprint_netlist()\n",
"\"\"\" You can define both DOEs and placer information in a YAML file\nAll the placer information need to be nested inside a placer section\n\n```yaml\niso_lines_coarse1:\n component: ISO_COARS_OPT1\n settings:\n dx: [50.]\n\n placer:\n type: pack_row/ pack_col/ grid / fixed_coords\n pack_grid:\n x0: 0\n y0: 0\n align_x: W\n align_y: S\n next_to: iso_lines_coarse1\n```\n\n\"\"\"\nimport collections\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport klayout.db as pya\nimport numpy as np\nfrom klayout.dbcore import Cell, CellInstArray, Layout\nfrom numpy import float64\nfrom omegaconf import OmegaConf\n\nfrom gdsfactory.autoplacer import text\nfrom gdsfactory.autoplacer.helpers import CELLS, import_cell, load_gds\nfrom gdsfactory.config import CONFIG, logger\nfrom gdsfactory.types import NSEW\n\nUM_TO_GRID = 1e3\nDEFAULT_BBOX_LAYER_IGNORE = [(8484, 8484)]\n\n\ndef to_grid(x: float64, um_to_grid: int = UM_TO_GRID) -> int:\n return int(x * um_to_grid)\n\n\nclass SizeInfo:\n def __init__(\n self,\n cell: Cell,\n layout: Optional[Layout] = None,\n ignore_layers: List[Tuple[int, int]] = DEFAULT_BBOX_LAYER_IGNORE,\n um_to_grid: int = UM_TO_GRID,\n ) -> None:\n \"\"\"\n layout is required if cell is a cell reference instead of a cell\n \"\"\"\n if isinstance(cell, pya.CellInstArray):\n bbox = cell.bbox(layout)\n else:\n parent_layout = cell.layout()\n layer_indexes = parent_layout.layer_indexes()\n\n for process_id, purpose_id in ignore_layers:\n layer_id = parent_layout.find_layer(process_id, purpose_id)\n if layer_id in layer_indexes:\n layer_indexes.remove(layer_id)\n\n bbox = None\n for layer_id in layer_indexes:\n per_layer_bbox = cell.bbox_per_layer(layer_id)\n if bbox is None:\n bbox = per_layer_bbox\n else:\n bbox = bbox + per_layer_bbox\n\n self.box = bbox\n\n self.west = self.box.left / um_to_grid\n self.east = self.box.right / um_to_grid\n\n self.south = self.box.bottom / um_to_grid\n self.north = self.box.top / um_to_grid\n\n self.width = self.east - self.west\n self.height = self.north - self.south\n\n xc = int(0.5 * (self.east + self.west))\n yc = int(0.5 * (self.north + self.south))\n\n self.sw = np.array([self.west, self.south])\n self.se = np.array([self.east, self.south])\n self.nw = np.array([self.west, self.north])\n self.ne = np.array([self.east, self.north])\n\n self.cw = np.array([self.west, yc])\n self.ce = np.array([self.east, yc])\n self.nc = np.array([xc, self.north])\n self.sc = np.array([xc, self.south])\n self.cc = self.center = np.array([xc, yc])\n\n @property\n def rect(self):\n w, e, s, n = self.west, self.east, self.south, self.north\n return [(w, s), (e, s), (e, n), (w, n)]\n\n def __str__(self):\n return \"w: {}\\ne: {}\\ns: {}\\nn: {}\\n\".format(\n self.west, self.east, self.south, self.north\n )\n\n\ndef placer_grid_cell_refs(\n cells,\n cols: int = 1,\n rows: int = 1,\n dx: float = 10.0,\n dy: float = 10.0,\n x0: float = 0,\n y0: float = 0,\n um_to_grid: float = UM_TO_GRID,\n **settings,\n):\n \"\"\"cells: list of cells - order matters for placing\"\"\"\n\n indices = [(i, j) for j in range(cols) for i in range(rows)]\n\n if rows * cols < len(cells):\n raise ValueError(\n \"Shape ({}, {}): Not enough emplacements ({}) for all these components\"\n \" ({}).\".format(rows, cols, len(indices), len(cells))\n )\n components = []\n for cell, (i, j) in zip(cells, indices):\n _x = int((x0 + j * dx) * um_to_grid)\n _y = int((y0 + i * dy) * um_to_grid)\n\n transform = pya.Trans(_x, _y)\n c_ref = pya.CellInstArray(cell.cell_index(), transform)\n components += [c_ref]\n\n return components\n\n\ndef pack_row(\n cells: List[Cell],\n row_ids: Optional[List[int]] = None,\n nb_cols: Optional[int] = None,\n x0: Union[float, int] = 0,\n y0: Union[float, int] = 0,\n align_x: NSEW = \"W\",\n align_y: NSEW = \"S\",\n margin: Union[float, int] = 20,\n margin_x: Optional[Union[float, int]] = None,\n margin_y: Optional[Union[float, int]] = None,\n um_to_grid: int = UM_TO_GRID,\n period_x: Optional[float] = None,\n period_y: Optional[float] = None,\n rotation: int = 0,\n) -> List[CellInstArray]:\n \"\"\"Pack row.\n\n Args:\n cells: a list of cells (size n)\n row_ids: a list of row ids (size n)\n where each id represents the row where the cell should be placed\n None by default => all cells in the same row\n nb_cols: number of columns\n period_x, period_y: not used by default,\n if set, use this period instead of computing the component spacing\n from the margin and the component dimension\n\n Returns: list of cell references\n \"\"\"\n si_list = [SizeInfo(c, um_to_grid=um_to_grid) for c in cells]\n heights = [si.height for si in si_list]\n margin_y = margin_y if margin_y is not None else margin\n margin_x = margin_x if margin_x is not None else margin\n\n if row_ids is None:\n row_ids = []\n nb_cells = len(cells)\n if nb_cols is None:\n nb_cols = len(cells)\n nb_full_rows = nb_cells // nb_cols\n nb_cols_last_row = nb_cells % nb_cols\n for row_id in range(nb_full_rows):\n row_ids += [row_id] * nb_cols\n\n last_row_index = row_id + 1\n row_ids += [last_row_index] * nb_cols_last_row\n\n if len(cells) != len(row_ids):\n raise ValueError(\n \"Each cell should be assigned a row id. \"\n f\"Got {len(cells)} cells for {len(row_ids)} row ids\"\n )\n\n # Find the height of each row to fit the cells\n # Also group the cells by row\n\n unique_row_ids = list(set(row_ids))\n unique_row_ids.sort()\n _row_to_heights = {r: [] for r in set(row_ids)}\n row_to_cells = {r: [] for r in unique_row_ids}\n for row, h, cell in zip(row_ids, heights, cells):\n _row_to_heights[row] += [h]\n row_to_cells[row] += [cell]\n\n row_to_height = {k: max(v) for k, v in _row_to_heights.items()}\n\n components = []\n\n # Do the packing per row\n y = y0\n for row in unique_row_ids:\n cells = row_to_cells[row]\n x = x0\n\n for c in cells:\n si = SizeInfo(c, um_to_grid=um_to_grid)\n if align_x == \"W\" and align_y == \"S\":\n component_origin = si.sw\n elif align_x == \"E\" and align_y == \"S\":\n component_origin = si.se\n elif align_x == \"E\" and align_y == \"N\":\n component_origin = si.ne\n elif align_x == \"W\" and align_y == \"N\":\n component_origin = si.nw\n try:\n _x = to_grid(x - component_origin[0], um_to_grid)\n _y = to_grid(y - component_origin[1], um_to_grid)\n\n transform = pya.Trans(rotation / 2, 0, _x, _y)\n c_ref = pya.CellInstArray(c.cell_index(), transform)\n components += [c_ref]\n\n except BaseException:\n logger.error(x, component_origin[0], um_to_grid)\n logger.error(\"ISSUE PLACING AT\", _x, _y)\n if align_x not in [\"W\", \"E\"]:\n logger.error(\"align_x should be `W`, `E` or a float\")\n if align_y not in [\"N\", \"S\"]:\n logger.error(\"align_y should be `N`, `S` or a float\")\n # raise\n\n dx = si.width + margin_x if period_x is None else period_x\n if align_x == \"W\":\n x += dx\n else:\n x -= dx\n\n dy = row_to_height[row] + margin_y if period_y is None else period_y\n\n if align_y == \"S\":\n y += dy\n else:\n y -= dy\n\n return components\n\n\ndef pack_col(\n cells: List[Cell],\n col_ids: None = None,\n nb_rows: Optional[int] = None,\n x0: float = 0,\n y0: float = 0,\n align_x: NSEW = \"W\",\n align_y: NSEW = \"S\",\n margin: int = 20,\n margin_x: Optional[int] = None,\n margin_y: Optional[int] = None,\n um_to_grid: int = UM_TO_GRID,\n period_x: Optional[float] = None,\n period_y: Optional[float] = None,\n rotation: int = 0,\n) -> List[CellInstArray]:\n \"\"\"\n\n Args:\n cells: a list of cells (size n)\n col_ids: a list of column ids (size n)\n where each id represents the row where the cell should be placed\n None by default => all cells are packed in the same column\n\n Returns:\n list of cell references\n \"\"\"\n widths = [SizeInfo(c, um_to_grid=um_to_grid).width for c in cells]\n margin_y = margin_y if margin_y is not None else margin\n margin_x = margin_x if margin_x is not None else margin\n\n if col_ids is None:\n col_ids = []\n nb_cells = len(cells)\n if nb_rows is None:\n nb_rows = len(cells)\n nb_full_cols = nb_cells // nb_rows\n nb_rows_last_col = nb_cells % nb_rows\n for col_id in range(nb_full_cols):\n col_ids += [col_id] * nb_rows\n\n last_col_index = col_id + 1\n col_ids += [last_col_index] * nb_rows_last_col\n\n if len(cells) != len(col_ids):\n raise ValueError(\n \"Each cell should be assigned a row id. \"\n f\"Got {len(cells)} cells for {len(col_ids)} col ids\"\n )\n\n # Find the width of each column to fit the cells\n # Also group the cells by column\n\n unique_col_ids = list(set(col_ids))\n unique_col_ids.sort()\n _col_to_widths = {r: [] for r in set(col_ids)}\n col_to_cells = {r: [] for r in unique_col_ids}\n for col, w, cell in zip(col_ids, widths, cells):\n _col_to_widths[col] += [w]\n col_to_cells[col] += [cell]\n\n col_to_width = {k: max(v) for k, v in _col_to_widths.items()}\n\n components = []\n\n # Do the packing per column\n x = x0\n for col in unique_col_ids:\n cells = col_to_cells[col]\n y = y0\n\n for c in cells:\n si = SizeInfo(c, um_to_grid=um_to_grid)\n if align_x == \"W\" and align_y == \"S\":\n component_origin = si.sw\n elif align_x == \"E\" and align_y == \"S\":\n component_origin = si.se\n elif align_x == \"E\" and align_y == \"N\":\n component_origin = si.ne\n elif align_x == \"W\" and align_y == \"N\":\n component_origin = si.nw\n\n _x = to_grid(x - component_origin[0], um_to_grid=um_to_grid)\n _y = to_grid(y - component_origin[1], um_to_grid=um_to_grid)\n\n try:\n transform = pya.Trans(rotation / 2, 0, _x, _y)\n # transform = pya.Trans(_x, _y)\n c_ref = pya.CellInstArray(c.cell_index(), transform)\n components += [c_ref]\n except BaseException:\n print(x, component_origin[0], um_to_grid)\n print(y, component_origin[1], um_to_grid)\n print(\"ISSUE PLACING AT\", _x, _y)\n print(\"ISSUE PLACING at\", _x, _y)\n\n dy = si.height + margin_y if period_y is None else period_y\n if align_y == \"S\":\n y += dy\n else:\n y -= dy\n\n dx = col_to_width[col] + margin_x if period_x is None else period_x\n if align_x == \"W\":\n x += dx\n else:\n x -= dx\n\n return components\n\n\ndef placer_fixed_coords(\n cells,\n x,\n y,\n x0: float = 0,\n y0: float = 0,\n do_permutation: bool = False,\n um_to_grid=UM_TO_GRID,\n **kwargs,\n):\n \"\"\"place cells using a list of coordinates\"\"\"\n\n # List all coordinates\n if do_permutation:\n coords = [(_x, _y) for _x in x for _y in y]\n else:\n coords = [(_x, _y) for _x, _y in zip(x, y)]\n\n # Update origin\n coords = [(c[0] + x0, c[1] + y0) for c in coords]\n\n # Generate cell list\n if len(cells) == 1:\n cells = cells * len(coords)\n\n # update coordinates from um to grid\n coords = [(int(c[0] * um_to_grid), int(c[1] * um_to_grid)) for c in coords]\n\n # Generate transforms\n transforms = [pya.Trans(*c) for c in coords]\n\n return [pya.CellInstArray(c.cell_index(), t) for c, t in zip(cells, transforms)]\n\n\ndef load_yaml(filepath: Path) -> Any:\n \"\"\"load placer settings\n\n Args:\n filepath: a yaml file containing the does and placer information\n\n Returns:\n a dictionnary of DOEs with:\n {\n doe_name1: {...}\n doe_name2: {...}\n ...\n }\n\n \"\"\"\n\n does = {}\n data = OmegaConf.load(filepath)\n data = OmegaConf.to_container(data)\n mask = data.pop(\"mask\")\n\n if \"layer_doe_label\" not in mask:\n mask[\"layer_doe_label\"] = (102, 6)\n\n for doe_name, doe in data.items():\n _doe = {}\n _doe.update(doe)\n does[doe_name] = _doe\n return does, mask\n\n\nDOE_CELLS = {}\n\n\ndef load_doe(doe_name: str, doe_root: Path) -> List[Layout]:\n \"\"\"\n Load all components for this DOE from the cache\n \"\"\"\n doe_dir = os.path.join(doe_root, doe_name)\n content_file = os.path.join(doe_dir, \"content.txt\")\n\n if os.path.isfile(content_file):\n with open(content_file) as f:\n lines = f.read().split(\"\\n\")\n line = lines[0]\n\n if line.startswith(\"TEMPLATE:\"):\n # If using a template, load the GDS from DOE folder used as a template\n template_name = line.split(\":\")[1].strip()\n return load_doe(template_name, doe_root)\n\n else:\n # Otherwise load the GDS from the current folder\n component_names = line.split(\" , \")\n gdspaths = [\n os.path.join(doe_dir, name + \".gds\") for name in component_names\n ]\n cells = [load_gds(gdspath) for gdspath in gdspaths]\n\n # print(\"LOAD DOE\")\n # for _c in cells:\n # print(_c.top_cell().name)\n # print()\n\n return cells\n\n\nPLACER_NAME2FUNC = {\n \"grid\": placer_grid_cell_refs,\n \"pack_row\": pack_row,\n \"pack_col\": pack_col,\n \"fixed_coords\": placer_fixed_coords,\n}\n\n\ndef separate_does_from_templates(dicts: Dict[str, Any]) -> Any:\n templates = {}\n does = {}\n for name, d in dicts.items():\n if \"type\" in d.keys(): # and d[\"type\"] == \"template\":\n templates[name] = d\n else:\n does[name] = d\n\n # We do not want to propagate \"type\": template to the does => removing it here\n for d in templates.values():\n d.pop(\"type\")\n\n return does, templates\n\n\ndef update_dicts_recurse(\n target_dict: Dict[str, Any], default_dict: Dict[str, Any]\n) -> Dict[str, Any]:\n target_dict = target_dict.copy()\n default_dict = default_dict.copy()\n for k, v in default_dict.items():\n if k not in target_dict:\n vtype = type(v)\n if vtype == dict or vtype == collections.OrderedDict:\n target_dict[k] = v.copy() # To avoid issues when popping\n else:\n target_dict[k] = v\n else:\n vtype = type(target_dict[k])\n if vtype == dict or vtype == collections.OrderedDict:\n target_dict[k] = update_dicts_recurse(target_dict[k], default_dict[k])\n return target_dict\n\n\ndef place_from_yaml(\n filepath_yaml: Path,\n root_does: Path = CONFIG[\"cache_doe_directory\"],\n precision: float = 1e-9,\n fontpath: Path = text.FONT_PATH,\n default_align_x: NSEW = \"W\",\n default_align_y: NSEW = \"S\",\n default_margin: int = 10,\n default_x0: NSEW = \"E\",\n default_y0: NSEW = \"S\",\n) -> Cell:\n \"\"\"Returns a gds cell composed of DOEs/components given in a yaml file\n allows for each DOE to have its own x and y spacing (more flexible than method1)\n\n Args:\n filepath_yaml:\n root_does: used for cache, requires content.txt\n \"\"\"\n transform_identity = pya.Trans(0, 0)\n dicts, mask_settings = load_yaml(filepath_yaml)\n\n does, templates = separate_does_from_templates(dicts)\n\n placed_doe = None\n placed_does = {}\n top_level_name = mask_settings.get(\"name\", \"TOP_LEVEL\")\n layer_doe_label = mask_settings[\"layer_doe_label\"]\n top_level_layout = pya.Layout()\n\n # Set database units according to precision\n top_level_layout.dbu = precision / 1e-6\n dbu = top_level_layout.dbu\n um_to_grid = int(1 / dbu)\n\n top_level = top_level_layout.create_cell(top_level_name)\n global CELLS\n CELLS[top_level_name] = top_level_layout\n\n default_doe_settings = {\n \"add_doe_label\": False,\n \"add_doe_visual_label\": False,\n \"dx_visual_label\": 0,\n \"dy_visual_label\": 0,\n }\n\n for doe_name, doe in does.items():\n\n # If a template is specified, apply it\n if \"template\" in doe:\n doe_templates = doe[\"template\"]\n if type(doe_templates) != list:\n doe_templates = [doe_templates]\n for doe_template in doe_templates:\n try:\n doe = update_dicts_recurse(doe, templates[doe_template])\n\n except BaseException:\n print(doe_template, \"does not exist\")\n raise\n doe = update_dicts_recurse(doe, default_doe_settings)\n\n # Get all the components\n components = load_doe(doe_name, root_does)\n\n # Check that the high level components are all unique\n # For now this is mostly to circumvent a bug\n # But the design manual also specifies that DOE components should have\n # unique names. So one instance per cell\n\n if components:\n if len(components) != len(set([_c.top_cell().name for _c in components])):\n __dict_component_debug = {}\n for _c in components:\n _name = _c.top_cell().name\n if _name not in __dict_component_debug:\n __dict_component_debug[_name] = 0\n __dict_component_debug[_name] += 1\n duplicates_components = [\n _name\n for _name, _count in __dict_component_debug.items()\n if _count > 1\n ]\n print(\"Please remove duplicate components at DOE entry level: \")\n print(duplicates_components)\n\n components = [\n import_cell(top_level_layout, _c.top_cell()) for _c in components\n ]\n\n default_placer_settings = {\n \"align_x\": default_align_x,\n \"align_y\": default_align_y,\n \"margin\": default_margin,\n \"x0\": default_x0,\n \"y0\": default_y0,\n }\n settings = default_placer_settings.copy()\n placer = doe.get(\"placer\")\n\n if placer:\n placer_type = placer.pop(\"type\", \"pack_col\")\n settings.update(doe[\"placer\"])\n else:\n placer_type = \"pack_col\"\n\n if placer_type not in PLACER_NAME2FUNC:\n raise ValueError(\n f\"{placer_type} is not an available placer, Choose:\"\n f\" {list(PLACER_NAME2FUNC.keys())}\"\n )\n _placer = PLACER_NAME2FUNC[placer_type]\n\n # All other attributes are assumed to be settings for the placer\n\n # Check if the cell should be attached to a specific parent cell\n if \"parent\" in settings:\n parent_name = settings.pop(\"parent\")\n if parent_name not in CELLS:\n # Create parent cell in layout and insert it under top level\n parent_cell = top_level_layout.create_cell(parent_name)\n CELLS[parent_name] = parent_cell\n parent_cell_instance = pya.CellInstArray(\n parent_cell.cell_index(), transform_identity\n )\n top_level.insert(parent_cell_instance)\n doe_parent_cell = CELLS[parent_name]\n else:\n # If no parent specified, insert the DOE at top level\n doe_parent_cell = top_level\n\n # Check if we should create a DOE cell which regroups the DOEs\n if \"with_doe_cell\" in settings:\n with_doe_cell = settings.pop(\"with_doe_cell\")\n else:\n with_doe_cell = True\n\n # x0, y0 can either be float or string\n x0 = settings.pop(\"x0\")\n y0 = settings.pop(\"y0\")\n\n # Check whether we are doing relative or absolute placement\n # if (x0 in [\"E\", \"W\"] or y0 in [\"N\", \"S\"]) and not placed_doe:\n # raise ValueError(\n # \"At least one DOE must be placed to use relative placement\"\n # )\n\n # For relative placement (to previous DOE)\n if \"margin_x\" not in settings:\n settings[\"margin_x\"] = settings[\"margin\"]\n if \"margin_y\" not in settings:\n settings[\"margin_y\"] = settings[\"margin\"]\n\n if \"inter_margin_x\" not in settings:\n inter_margin_x = settings[\"margin_x\"]\n else:\n inter_margin_x = settings.pop(\"inter_margin_x\")\n\n if \"inter_margin_y\" not in settings:\n inter_margin_y = settings[\"margin_y\"]\n else:\n inter_margin_y = settings.pop(\"inter_margin_y\")\n\n align_x = settings[\"align_x\"]\n align_y = settings[\"align_y\"]\n\n # Make sure that the alignment is sensible depending on how we stack\n\n # If we specify a DOE to place next to, use it\n if \"next_to\" in settings:\n placed_doe = placed_does[settings.pop(\"next_to\")]\n\n # print(placed_doe)\n # print(placed_does)\n\n # Otherwise, use previously placed DOE as starting point\n doe_si = (\n SizeInfo(placed_doe, top_level_layout, um_to_grid=um_to_grid)\n if placed_doe is not None\n else None\n )\n if x0 == \"E\":\n x0 = doe_si.east\n if align_x == \"W\":\n x0 += inter_margin_x\n\n if x0 == \"W\":\n x0 = doe_si.west\n if align_x == \"E\":\n x0 -= inter_margin_x\n\n if y0 == \"N\":\n y0 = doe_si.north\n if align_y == \"S\":\n y0 += inter_margin_y\n\n if y0 == \"S\":\n y0 = doe_si.south\n if align_y == \"N\":\n y0 -= inter_margin_y\n\n # Add x0, y0 in settings as float\n settings[\"x0\"] = x0\n settings[\"y0\"] = y0\n\n settings[\"um_to_grid\"] = um_to_grid\n\n placed_components = _placer(components, **settings)\n\n # Place components within a cell having the DOE name\n\n if with_doe_cell or len(placed_components) > 1:\n doe_cell = top_level_layout.create_cell(doe_name)\n CELLS[doe_name] = doe_cell\n for instance in placed_components:\n doe_cell.insert(instance)\n placed_does[doe_name] = doe_cell\n placed_doe = doe_cell\n doe_instance = pya.CellInstArray(doe_cell.cell_index(), transform_identity)\n else:\n # If only single cell and we want to skip the sweep cell\n doe_instance = placed_components[0]\n placed_does[doe_name] = doe_instance\n placed_doe = doe_instance\n\n add_doe_label = doe[\"add_doe_label\"]\n add_doe_visual_label = doe[\"add_doe_visual_label\"]\n\n if add_doe_label:\n layer_label_index, layer_label_datatype = layer_doe_label\n layer_index = top_level.layout().insert_layer(\n pya.LayerInfo(layer_label_index, layer_label_datatype)\n )\n # Add the name of the DOE at the center of the cell\n _p = doe_instance.bbox(top_level_layout).center()\n _text = pya.Text(doe_name, _p.x, _p.y)\n top_level.shapes(layer_index).insert(_text)\n\n if add_doe_visual_label:\n _bbox = doe_instance.bbox(top_level_layout)\n\n idbu = 1 / top_level.layout().dbu\n x_text = _bbox.center().x + doe[\"dx_visual_label\"] * idbu\n y_text = _bbox.bottom + (15.0 + doe[\"dy_visual_label\"]) * idbu\n _text = text.add_text(\n top_level, doe_name, position=(x_text, y_text), fontpath=fontpath\n )\n # _transform = pya.DTrans(x_text, y_text)\n # top_level.insert(pya.CellInstArray(_text.cell_index(), _transform))\n\n doe_parent_cell.insert(doe_instance)\n\n return top_level\n\n\ndef place_and_write(\n filepath_yaml, root_does=CONFIG[\"cache_doe_directory\"], filepath_gds=\"top_level.gds\"\n):\n c = place_from_yaml(filepath_yaml, root_does)\n logger.info(\"writing...\")\n c.write(filepath_gds)\n\n\ndef assemble_subdies_from_yaml(filepath, subdies_directory, mask_directory=None):\n data = OmegaConf.load(filepath)\n data = OmegaConf.to_container(data)\n\n mask = data.pop(\"mask\")\n mask_name = mask[\"name\"]\n\n # Remaining entries are subdies\n dict_subdies = {\n k: (v[\"x\"], v[\"y\"], v[\"R\"] if \"R\" in v else 0) for k, v in data.items()\n }\n\n return assemble_subdies(mask_name, dict_subdies, subdies_directory, mask_directory)\n\n\ndef assemble_subdies(\n mask_name,\n dict_subdies,\n subdies_directory,\n mask_directory=None,\n um_to_grid=UM_TO_GRID,\n):\n \"\"\"\n Args:\n dict_subdies: {subdie_name: (x, y, rotation) in (um, um, deg)}\n subdies_directory: directory where the subdies should be looked for\n \"\"\"\n top_level_layout = pya.Layout()\n top_level = top_level_layout.create_cell(mask_name)\n if mask_directory is None:\n mask_directory = subdies_directory\n\n for subdie_name, (x_um, y_um, R) in dict_subdies.items():\n gdspath = os.path.join(subdies_directory, subdie_name + \".gds\")\n subdie = load_gds(gdspath).top_cell()\n\n _subdie = import_cell(top_level_layout, subdie)\n\n t = pya.Trans(R / 2, 0, int(x_um * um_to_grid), int(y_um * um_to_grid))\n # t = pya.Trans(0, 0)\n subdie_instance = pya.CellInstArray(_subdie.cell_index(), t)\n top_level.insert(subdie_instance)\n\n top_level.write(os.path.join(mask_directory, mask_name + \".gds\"))\n return top_level\n\n\ndef _demo():\n import gdsfactory as gf\n\n c = gf.components.straight()\n gdspath = c.write_gds_with_metadata()\n\n layout1 = load_gds(gdspath)\n cell1 = layout1.top_cell()\n cell1_instance1 = pya.CellInstArray(cell1.cell_index(), pya.Trans(10, 0))\n\n layout2 = pya.Layout()\n layout2.create_cell(\"TOP_LEVEL\")\n\n layout2.cell(\"TOP_LEVEL\").insert(cell1_instance1)\n layout2.write(\"test.gds\")\n\n\nif __name__ == \"__main__\":\n _demo()\n print(CELLS)\n",
"from typing import Optional, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pandas import DataFrame\n\nimport gdsfactory as gf\n\n\ndef plot_sparameters(\n df: DataFrame,\n logscale: bool = True,\n keys: Optional[Tuple[str, ...]] = None,\n **sim_settings,\n):\n \"\"\"Plots Sparameters from a pandas DataFrame.\n\n Args:\n df: Sparameters pandas DataFrame\n logscale: plots 20*log10(S)\n keys: list of keys to plot, plots all by default.\n\n Keyword Args:\n sim_settings: simulation settings for the write_sparameters_function\n\n \"\"\"\n\n w = df[\"wavelengths\"] * 1e3\n keys = keys or [\n key for key in df.keys() if key.lower().startswith(\"s\") and key.endswith(\"m\")\n ]\n\n for key in keys:\n if key in df:\n y = df[key]\n y = 20 * np.log10(y) if logscale else y\n plt.plot(w, y, label=key[:-1])\n else:\n raise ValueError(f\"{key} not in {df.keys()}\")\n plt.legend()\n plt.xlabel(\"wavelength (nm)\")\n plt.ylabel(\"|S| (dB)\") if logscale else plt.ylabel(\"|S|\")\n plt.show()\n\n\ndef plot_imbalance2x2(df: DataFrame, port1: str = \"s13m\", port2: str = \"s14m\") -> None:\n \"\"\"Plots imbalance in % for 2x2 coupler\"\"\"\n y1 = df[port1].values\n y2 = df[port2].values\n imbalance = y1 / y2\n x = df[\"wavelengths\"] * 1e3\n plt.plot(x, 100 * abs(imbalance))\n plt.xlabel(\"wavelength (nm)\")\n plt.ylabel(\"imbalance (%)\")\n plt.grid()\n\n\ndef plot_loss2x2(df: DataFrame, port1: str = \"s13m\", port2: str = \"s14m\") -> None:\n \"\"\"Plots imbalance in % for 2x2 coupler\"\"\"\n y1 = df[port1].values\n y2 = df[port2].values\n x = df[\"wavelengths\"] * 1e3\n plt.plot(x, abs(10 * np.log10(y1 ** 2 + y2 ** 2)))\n plt.xlabel(\"wavelength (nm)\")\n plt.ylabel(\"excess loss (dB)\")\n\n\nplot_loss1x2 = gf.partial(plot_loss2x2, port1=\"s13m\", port2=\"s12m\")\nplot_imbalance1x2 = gf.partial(plot_imbalance2x2, port1=\"s13m\", port2=\"s12m\")\n\n\nif __name__ == \"__main__\":\n import gdsfactory.simulation as sim\n\n df = sim.get_sparameters_data_lumerical(component=gf.components.mmi1x2)\n plot_sparameters(df, logscale=True)\n plt.show()\n",
"\"\"\"Waiting for https://github.com/omry/omegaconf/issues/725\n\"\"\"\nimport numpy as np\nimport gdsfactory as gf\n\n\[email protected]\ndef straight(\n length: float = 10.0,\n) -> gf.Component:\n c = gf.Component()\n c.info[\"width\"] = np.float64(3.2)\n return c\n\n\nif __name__ == \"__main__\":\n c = straight()\n",
"\"\"\"Sample AWG.\"\"\"\nimport numpy as np\n\nimport gdsfactory as gf\nfrom gdsfactory.component import Component\nfrom gdsfactory.cross_section import strip\nfrom gdsfactory.types import CrossSectionFactory\n\n\[email protected]\ndef free_propagation_region(\n width1: float = 2.0,\n width2: float = 20.0,\n length: float = 20.0,\n wg_width: float = 0.5,\n inputs: int = 1,\n outputs: int = 10,\n cross_section: CrossSectionFactory = strip,\n **kwargs,\n) -> Component:\n r\"\"\"\n\n .. code::\n\n length\n <-->\n /|\n / |\n width1| | width2\n \\ |\n \\|\n \"\"\"\n y1 = width1 / 2\n y2 = width2 / 2\n x = cross_section(**kwargs)\n o = x.info[\"cladding_offset\"]\n layers_cladding = x.info[\"layers_cladding\"] or []\n layer = x.info[\"layer\"]\n\n xpts = [0, length, length, 0]\n ypts = [y1, y2, -y2, -y1]\n\n c = gf.Component()\n c.add_polygon((xpts, ypts), layer=layer)\n\n if inputs == 1:\n c.add_port(\n \"o1\",\n midpoint=(0, 0),\n width=wg_width,\n orientation=180,\n layer=layer,\n )\n else:\n y = np.linspace(-width1 / 2 + wg_width / 2, width1 / 2 - wg_width / 2, inputs)\n y = gf.snap.snap_to_grid(y)\n for i, y in enumerate(y):\n c.add_port(\n f\"W{i}\",\n midpoint=(0, y),\n width=wg_width,\n orientation=0,\n layer=layer,\n )\n\n y = np.linspace(-width2 / 2 + wg_width / 2, width2 / 2 - wg_width / 2, outputs)\n y = gf.snap.snap_to_grid(y)\n for i, y in enumerate(y):\n c.add_port(\n f\"E{i}\",\n midpoint=(length, y),\n width=wg_width,\n orientation=0,\n layer=layer,\n )\n\n ypts = [y1 + o, y2 + o, -y2 - o, -y1 - o]\n\n for layer in layers_cladding:\n c.add_polygon((xpts, ypts), layer=layer)\n\n c.info[\"length\"] = length\n c.info[\"width1\"] = width1\n c.info[\"width2\"] = width2\n return c\n\n\[email protected]\ndef free_propagation_region_input(inputs: int = 1, **kwargs) -> Component:\n return free_propagation_region(inputs=inputs, **kwargs)\n\n\[email protected]\ndef free_propagation_region_output(\n inputs: int = 10, width1: float = 10.0, width2: float = 20.0, **kwargs\n) -> Component:\n return free_propagation_region(\n inputs=inputs, width2=width2, width1=width1, **kwargs\n )\n\n\[email protected]\ndef awg(\n arms: int = 10,\n outputs: int = 3,\n free_propagation_region_input_function=free_propagation_region_input,\n free_propagation_region_output_function=free_propagation_region_output,\n fpr_spacing: float = 50.0,\n) -> Component:\n \"\"\"Returns a basic Arrayed Waveguide grating.\n\n Args:\n arms: number of arms\n outputs: number of outputs\n free_propagation_region_input_function: for input\n free_propagation_region_output_function: for output\n fpr_spacing: x separation between input/output FPR\n\n \"\"\"\n c = Component()\n fpr_in = free_propagation_region_input_function(\n inputs=1,\n outputs=arms,\n )\n fpr_out = free_propagation_region_output_function(\n inputs=outputs,\n outputs=arms,\n )\n\n fpr_in_ref = c.add_ref(fpr_in)\n fpr_out_ref = c.add_ref(fpr_out)\n\n fpr_in_ref.rotate(90)\n fpr_out_ref.rotate(90)\n\n fpr_out_ref.x += fpr_spacing\n routes = gf.routing.get_bundle(\n fpr_in_ref.get_ports_list(prefix=\"E\"), fpr_out_ref.get_ports_list(prefix=\"E\")\n )\n\n c.lengths = []\n for route in routes:\n c.add(route.references)\n c.lengths.append(route.length)\n\n c.add_port(\"o1\", port=fpr_in_ref.ports[\"o1\"])\n\n for i, port in enumerate(fpr_out_ref.get_ports_list(prefix=\"W\")):\n c.add_port(f\"E{i}\", port=port)\n\n c.delta_length = np.mean(np.diff(c.lengths))\n return c\n\n\nif __name__ == \"__main__\":\n c = free_propagation_region(inputs=2, outputs=4)\n # print(c.ports.keys())\n c = awg()\n c.show()\n",
"from typing import Tuple\n\nimport numpy as np\nimport pytest\n\nimport gdsfactory as gf\nfrom gdsfactory.component import Component\nfrom gdsfactory.geometry import check_exclusion\n\n\ndef get_device(\n space: float,\n width: float = 0.5,\n layer1: Tuple[int, int] = (1, 0),\n layer2: Tuple[int, int] = (2, 0),\n) -> Component:\n c = gf.Component()\n r1 = c << gf.components.rectangle(size=(width, width), layer=layer1)\n r2 = c << gf.components.rectangle(size=(width, width), layer=layer2)\n r1.xmax = 0\n r2.xmin = space\n return c\n\n\[email protected](\n \"space,min_space,area_expected\", [(0.16, 0.1, 0), (0.1, 0.11, 50000)]\n)\ndef test_exclusion(space: float, min_space: float, area_expected: int) -> None:\n c = get_device(space=space)\n area = check_exclusion(c, min_space=min_space)\n assert np.isclose(area, area_expected)\n\n\nif __name__ == \"__main__\":\n test_exclusion(0.16, 0.1, 0)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.linspace"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.log10",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.float64"
],
[
"numpy.diff",
"numpy.linspace"
],
[
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
msc5/attitudes
|
[
"89a3ab7aaf98e4985f1e0e6ac7e353a0798d677f"
] |
[
"python/quaternion.py"
] |
[
"\nimport numpy as np\n\n\nclass vector:\n\n def cross(v):\n v1, v2, v3 = v.squeeze()\n return np.array([\n [0, -v3, v2],\n [v3, 0, -v1],\n [-v2, v1, 0]\n ])\n\n\nclass quaternion:\n\n def from_theta(e, theta):\n n = 1 if not hasattr(theta, '__len__') else len(theta)\n q = np.append(e.reshape((3, 1)) * np.sin(theta / 2), np.cos(theta / 2))\n return q.reshape((4, n))\n\n def psi(q):\n qv = q[0:3]\n q4 = q[3]\n qcross = vector.cross(qv)\n a = q[3] * np.eye(3) - qcross\n b = - (qv.T).reshape((1, 3))\n return np.concatenate((a, b), axis=0)\n\n def xi(q):\n qv = q[0:3]\n q4 = q[3]\n qcross = vector.cross(qv)\n a = q[3] * np.eye(3) + qcross\n b = - (qv.T).reshape((1, 3))\n return np.concatenate((a, b), axis=0)\n\n def cross(q):\n return np.append(quaternion.psi(q), q, axis=1)\n\n def dot(q):\n return np.append(quaternion.xi(q), q, axis=1)\n\n def A(q):\n return quaternion.xi(q).T @ quaternion.psi(q)\n\n\nif __name__ == \"__main__\":\n\n from rich import print\n\n q = quaternion\n\n rot = q.from_theta(np.array([0, 0, 1]), np.pi / 2)\n print(rot, rot.shape)\n\n rot_xi = q.xi(rot)\n print(rot_xi, rot_xi.shape)\n\n rot_cross = q.cross(rot)\n print(rot_cross, rot_cross.shape)\n\n rot_A = q.A(rot)\n print(rot_A, rot_A.shape)\n"
] |
[
[
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
matthewli125/SRCFD
|
[
"77d117c7a98996c3b64c001c79eae04aa6a545f8"
] |
[
"neural_net/fsrcnn_predict.py"
] |
[
"from keras.models import load_model\r\nimport h5py\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nfrom keras.optimizers import Adam\r\nimport os\r\nfrom keras import backend as K\r\nimport tensorflow as tf\r\n\r\ndef PSNR(y_true, y_pred):\r\n\tmax_pixel = 1.0\r\n\treturn 10.0 * tf_log10((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true))))\r\n\r\ndef tf_log10(x):\r\n numerator = tf.log(x)\r\n denominator = tf.log(tf.constant(10, dtype=numerator.dtype))\r\n return numerator / denominator\r\n\r\n\r\ndata = h5py.File(\"D:/water_large_sorted.h5\")\r\nx = data[\"lowres\"][:48]\r\n##x2 = []\r\n##x = np.squeeze(x)\r\n##for i in range(len(x)):\r\n## temp = cv2.resize(x[i], (16, 16), interpolation = cv2.INTER_CUBIC)\r\n## temp = temp.reshape(16, 16, 1)\r\n## x2.append(temp)\r\n##x2 = np.asarray(x2)\r\n\r\nmodelname = input()\r\n\r\nmodel = load_model(modelname, custom_objects = {'PSNR' : PSNR})\r\n\r\nresult = model.predict(x, verbose = 1)\r\nresult = result.reshape(48, 32, 32)\r\n\r\ny = data[\"highres\"][:48]\r\ny = y.reshape(48, 32, 32)\r\nfor i in range(48):\r\n plt.imshow(y[i])\r\n plt.savefig(\"D:/SRCFD/truth/truth_fsrcnn {}.png\".format(i))\r\n\r\nfor i in range(48):\r\n plt.imshow(result[i])\r\n plt.savefig(\"D:/SRCFD/neuralnetoutput/neural net output_fsrcnn {}.png\".format(i))\r\n\r\n\r\n\r\nimport glob\r\nimport moviepy.editor as mpy\r\nimport time\r\n\r\ntime = ((time.asctime().replace(\" \", \" \")).replace(\" \", \"_\")).replace(\":\", \"-\")\r\n\r\nfile_list = glob.glob('D:/SRCFD/neuralnetoutput/*.png')\r\nlist.sort(file_list, key=lambda x: int(x.split(' ')[3].split('.png')[0]))\r\nprint(file_list)\r\nclip = mpy.ImageSequenceClip(file_list, fps=24)\r\nclip.write_gif('neuralnet_fsrcnn {}.gif'.format(time), fps=24)\r\n\r\nfile_list = glob.glob('D:/SRCFD/truth/*.png')\r\nlist.sort(file_list, key=lambda x: int(x.split(' ')[1].split('.png')[0]))\r\nprint(file_list)\r\nclip = mpy.ImageSequenceClip(file_list, fps=24)\r\nclip.write_gif('truth_fsrcnn.gif'.format(time), fps=24)\r\n"
] |
[
[
"matplotlib.pyplot.imshow",
"tensorflow.constant",
"tensorflow.log"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
PUTvision/sequentations
|
[
"1ecfa80918f87aa6d9d43a18e7a26bec27f9686f"
] |
[
"tests/test_augmentations_transform.py"
] |
[
"import pytest\nimport numpy as np\n\nimport context\nfrom sequentations.augmentations.transforms import RandomGamma, ColorJitter, Normalize\n\n\ndef test_random_gamma():\n arr = np.full((2, 100, 100, 3), fill_value=127, dtype=np.uint8)\n aug = RandomGamma()\n data = aug(image=arr, force_apply=True)['image']\n\n assert np.shape(data) == np.shape(arr)\n assert np.count_nonzero(data != 127) != 0\n\n first_slice = arr[0]\n for current_slice in arr[1:]:\n assert np.all(first_slice == current_slice)\n\n\ndef test_color_jitter():\n arr = np.full((2, 100, 100, 3), fill_value=127, dtype=np.uint8)\n aug = ColorJitter()\n data = aug(image=arr, force_apply=True)['image']\n\n assert np.shape(data) == np.shape(arr)\n assert np.count_nonzero(data != 127) != 0\n\n first_slice = arr[0]\n for current_slice in arr[1:]:\n assert np.all(first_slice == current_slice)\n\n\ndef test_normalize_v2():\n arr = np.zeros((2, 100, 100, 3), dtype=np.uint8)\n aug = Normalize(mean=(0, 0, 0), std=(1, 1, 1))\n data = aug(image=arr, force_apply=True)['image']\n\n assert np.isclose(data.max(), 0.0)\n assert np.isclose(data.mean(), 0.0)\n assert np.isclose(data.std(), 0.0)\n assert np.isclose(data.min(), 0.0)\n\n arr = np.ones((1, 100, 100, 3), dtype=np.uint8)*255\n aug = Normalize(mean=(0, 0, 0), std=(1, 1, 1))\n data = aug(image=arr, force_apply=True)['image']\n\n assert np.isclose(data.max(), 1.0)\n assert np.isclose(data.mean(), 1.0)\n assert np.isclose(data.std(), 0.0)\n assert np.isclose(data.min(), 1.0)"
] |
[
[
"numpy.full",
"numpy.all",
"numpy.ones",
"numpy.shape",
"numpy.count_nonzero",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Stanford-BIS/pystorm
|
[
"4acaaee78a04b69ad17554126018016800e5a140",
"4acaaee78a04b69ad17554126018016800e5a140"
] |
[
"pystorm/hal/net_builder.py",
"pystorm/examples/measure_power_interleaved.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom pystorm.hal.run_control import RunControl\nfrom pystorm.hal import data_utils\n\nclass NetBuilder(object):\n\n def __init__(self, HAL, net=None):\n \"\"\"Initialize NetBuilder:\n\n Inputs:\n =======\n HAL (HAL object) : \n net (hal.neuromorph.graph object, default None) : \n User may provide a custom network they constructed.\n If no network is supplied, typically one will be added with a \n call like NetBuilder.create_single_pool_net()\n \"\"\"\n self.hal = HAL\n self.net = net\n\n def add_net(self, net):\n self.net = net\n\n def create_single_pool_net_from_spec(self, ps, decoders=None):\n return self.create_single_pool_net(\n ps.Y, ps.X, \n tap_matrix=ps.TPM, \n decoders=decoders, \n biases=ps.biases,\n gain_divs=ps.gain_divisors,\n loc_yx=ps.loc_yx,\n diffusor_cuts_yx=ps.diffusor_cuts_yx)\n\n def create_single_pool_net(self, Y, X, tap_matrix=None, decoders=None, \n biases=0, gain_divs=1, loc_yx=(0, 0), diffusor_cuts_yx=None):\n \"\"\"Creates a Network with a single Pool\n \n Inputs:\n =======\n Y (int) : number of rows in the pool\n X (int) : number of columns in the pool\n tap_matrix ((N, dim) array or None (default)) :\n array of tap point/dimension assignments to each neuron\n if provided, Network will have an Input connected to its Pool\n if None, Network will not have an Input\n decoders ((dim, N) array or None (default)) :\n array of each neuron's decoding weight in each dimension\n if provided, Network will have an Ouput connected to its Pool\n if None, Network will not have an Output\n biases ((N,) int array or int) :\n bias bits for each neuron\n gain_divs ((N,) int array or int) :\n gain divisor bits for each neuron\n\n Returns:\n ========\n Network object\n \"\"\"\n N = Y * X\n\n if tap_matrix is None:\n Din = 0\n tap_spec = np.zeros((N, 1)) # have to put something in, (N, [[]]) might work\n else:\n if isinstance(tap_matrix, list):\n Din = len(tap_matrix)\n tap_spec = (N, tap_matrix)\n else:\n Din = tap_matrix.shape[1]\n tap_spec = tap_matrix\n assert tap_spec.shape[0] == N, (\n \"tap matrix has {} entries but Y*X={}\".format(tap_spec.shape[0], Y*X))\n\n if decoders is None:\n Dout = 0\n else:\n Dout = decoders.shape[0]\n\n from pystorm.hal.neuromorph import graph # to describe HAL/neuromorph network\n net = graph.Network(\"net\")\n\n # decoders are initially zero\n # we remap them later (without touching the rest of the network) using HAL.remap_weights()\n net.pool = net.create_pool(\"p1\", tap_spec, \n biases=biases, gain_divisors=gain_divs, \n xy=(X, Y), user_xy_loc=(loc_yx[1], loc_yx[0]),\n diffusor_cuts_yx=diffusor_cuts_yx)\n\n if Dout > 0:\n b1 = net.create_bucket(\"b1\", Dout)\n net.output = net.create_output(\"o1\", Dout)\n net.decoder_conn = net.create_connection(\"c_p1_to_b1\", net.pool, b1, decoders)\n net.create_connection(\"c_b1_to_o1\", b1, net.output, None)\n if Din > 0:\n net.input = net.create_input(\"i1\", Din)\n net.create_connection(\"c_i1_to_p1\", net.input, net.pool, None)\n\n self.net = net\n return net\n\n @staticmethod\n def to_synspace(nrny, nrnx):\n \"\"\"converts y, x nrn coordinate to synapse coordinate\"\"\"\n return nrny // 2, nrnx // 2\n\n @staticmethod\n def create_default_yx_taps(SY, SX, D, bad_syn=None):\n \"\"\"create 'good' (i.e. maximally adjacently orthogonal) arrays of synapses\n\n Inputs:\n ======\n SY, SX (int, int) : dimensions of grid to create synapses in\n D (int) : dimensionality of representation\n bad_syn (pandas dataframe indexed y,x or (y, x) np array) :\n synapses to avoid (e.g. because of high bias or long T_PE)\n\n Returns:\n =======\n (SY, SX, D)-array of tap points\n can be converted to (Y*X, D) what Pool takes as tap_spec \n with syn_taps_to_nrn_taps()\n \"\"\"\n \n if isinstance(bad_syn, np.ndarray) and bad_syn.shape != (SY, SX):\n raise ValueError(\"bad_syn should be 2D array-like and shape (SY, SX)\")\n\n if bad_syn is None:\n bad_syn = np.array([[False] * SY] * SX, dtype=bool)\n\n def get_bad_syn(y, x):\n if isinstance(bad_syn, pd.DataFrame):\n return bad_syn.loc[y, x]\n else:\n return bad_syn[y, x]\n \n def find_closest_not_bad(y, x):\n # XXX unused\n # search in expanding manhattan radii\n # doing this dumb-ly, O(N**2) instead of O(N)\n # really want to encode an outward spiral\n R = 1\n while True:\n if R == max(SX, SY):\n assert(False)\n\n ylo = max(y - R, 0)\n yhi = min(y + R, SY - 1)\n xlo = max(x - R, 0)\n xhi = min(x + R, SX - 1)\n\n # now pick the first good one\n for y in range(ylo, yhi):\n for x in range(xlo, xhi):\n\n if not get_bad_syn(y, x):\n return y, x\n\n R += 1\n\n def eliminate_projections(base_vect, neighbors):\n \"\"\"eliminate <neighbors> projections on base_vect\"\"\"\n if len(neighbors) == 1:\n proj = np.dot(neighbors[0], np.dot(neighbors[0], base_vect))\n base_vect -= proj\n assert(np.abs(np.dot(neighbors[0], base_vect)) < 1e-10)\n elif len(neighbors) > 1:\n to_elim = np.vstack(neighbors)\n U, S, VT = np.linalg.svd(to_elim)\n VpT = VT[:len(neighbors), :]\n proj = np.dot(VpT.T, np.dot(VpT, base_vect))\n base_vect -= proj\n assert(np.sum(np.abs(np.dot(to_elim, base_vect))) < 1e-10)\n return base_vect \n\n def get_cartesian_vector_set(D):\n vects = np.zeros((2*D, D))\n for d in range(D):\n vects[2*d, d] = 1\n vects[2*d+1, d] = -1\n return vects\n\n def get_random_unit_vector(D):\n gaussian = np.random.randn(D)\n return gaussian / np.linalg.norm(gaussian)\n\n\n # for D == 1, use on/off halves\n if D == 1:\n tap_matrix = np.zeros((SY, SX))\n for y in range(SY):\n for x in range(SX):\n if not get_bad_syn(y, x):\n if x < SX // 2:\n tap_matrix[y, x] = 1\n else:\n tap_matrix[y, x] = -1\n\n else:\n # can expose these later, I suppose\n use_mean = True\n cartesian = True\n cartesian_vects = get_cartesian_vector_set(D)\n\n # pick a random standard basis direction for each tap\n # try to keep adjacent vectors orthogonal\n\n # raster-scan, considering already-set vectors\n # neighborhood under consideration grows with dimensions\n tap_matrix = np.zeros((SY, SX, D), dtype=int)\n for y in range(SY):\n for x in range(SX):\n if not get_bad_syn(y,x):\n neighbors = []\n if D >= 2:\n if x > 0:\n if ~get_bad_syn(y, x - 1):\n neighbors.append('l')\n elif D == 2 and y > 0: # helps 2D with few taps\n neighbors.append('u')\n elif D == 2 and y > 0: # helps 2D with few taps\n neighbors.append('u')\n if D >= 3:\n if y > 0:\n neighbors.append('u')\n if D >= 4:\n if x > 0 and y > 0:\n neighbors.append('ul')\n if D >= 5:\n if x < grid_pts_X - 1 and y > 0:\n neighbors.append('ur')\n\n elim_vects = []\n for n in neighbors:\n if n == 'l':\n elim_vects.append(tap_matrix[y, x - 1])\n if n == 'u':\n elim_vects.append(tap_matrix[y - 1, x])\n if n == 'ul':\n elim_vects.append(tap_matrix[y - 1, x - 1])\n if n == 'ur':\n elim_vects.append(tap_matrix[y - 1, x + 1])\n\n base_vect_norm = 0\n fails = 0\n\n # debugging info\n base_vect_tries = []\n base_vect_elims = []\n base_vect_tries_cart = []\n\n while True:\n # now assign the base_vect to eliminate projections from neighbors\n # keep trying if we pick the base_vect badly\n\n base_vect = get_random_unit_vector(D)\n base_vect_tries.append(base_vect)\n\n # if convert completely random vector into its nearest\n # standard_basis vector\n if cartesian:\n similarities = np.dot(cartesian_vects, base_vect)\n base_vect = cartesian_vects[np.argmax(similarities)].copy()\n base_vect_tries_cart.append(base_vect)\n\n # eliminate projections\n # the base_vect we chose may be in the span of the neighbors\n # if so, try again, up to some limit\n try:\n base_vect = eliminate_projections(base_vect, elim_vects)\n base_vect_elims.append(base_vect)\n base_vect_norm = np.linalg.norm(base_vect)\n\n # if taking the neighbor's projections out of the\n # random vector leaves you with anything, break out\n if base_vect_norm > 1e-10:\n candidate_vect = base_vect / base_vect_norm\n\n # for any vector that \"works\", so does its opposite\n # use the one that moves the mean encoder closer to zero\n # XXX can also take into account if not orthogonal to \n # some neighbors, esp for D == 2\n if use_mean:\n curr_sum = np.sum(tap_matrix, axis=(0,1))\n pos_norm = np.linalg.norm(curr_sum + candidate_vect)\n neg_norm = np.linalg.norm(curr_sum - candidate_vect)\n if neg_norm < pos_norm:\n candidate_vect *= -1\n\n break # leave while with candidate_vect\n\n # shouldn't happen, but try again if it does \n except AssertionError: \n base_vect_norm = 0\n\n # print debug info if something goes really wrong\n fails += 1\n if fails > 100:\n print(\"failed at y,x: \", y, \",\", x)\n print(\"tap matrix neighborhood\")\n print(tap_matrix[y-1:y+1, x-1:x+1, :])\n print(\"last ten tries:\")\n print(\"random vector candidates\")\n print(np.array(base_vect_tries[-10:]))\n print(\"closest cartesian vector\")\n print(np.array(base_vect_tries_cart[-10:]))\n print(\"after eliminating neighbor's projections\")\n print(np.array(base_vect_elims[-10:]))\n\n raise RuntimeError(\"failed to get orthogonal vector 100 times\" + \n \"something is probably wrong with neighborhood logic\")\n\n tap_matrix[y, x, :] = candidate_vect\n\n tap_matrix = tap_matrix.reshape((SY, SX, D))\n for i in range(D):\n items = np.nonzero(tap_matrix[:, i])[0]\n if len(items) % 2 == 1:\n tap_matrix[items[-1], i] = 0\n\n return tap_matrix\n\n @staticmethod\n def get_diff_cuts_to_break_pool_in_half(height, width):\n x = width // 2\n cut_yxs = []\n for y in range(0, height, 4):\n cut_yxs.append((y, x + 1, 'left'))\n return cut_yxs\n\n def break_pool_in_half(self, pool):\n \"\"\"Opens the diffusor down the middle of a pool. \n\n Good for 1D pools with default tap points (improves yield).\n \n Parameters:\n ==========\n pool (Pool object) the pool (in the currently mapped network) to cut\n \"\"\" \n\n if self.net is None:\n raise RuntimeError(\"no Network attached to NetBuilder\")\n if self.hal.last_mapped_network != self.net:\n raise RuntimeError(\"Trying to run un-mapped network. Run map first.\")\n if pool not in self.net.get_pools():\n raise ValueError(\"supplied pool was not in the current network\")\n \n loc_y, loc_x = pool.mapped_yx\n cut_yxs = NetBuilder.get_diff_cuts_to_break_pool_in_half(pool.height, pool.width)\n for y, x, direction in cut_yxs:\n self.hal.set_diffusor(y + loc_y, x + loc_x, direction, 'broken')\n\n def open_all_diff_cuts(self):\n \"\"\"Opens all the diffusor cuts (no current passes)\n\n works on an already-mapped network. Remapping will erase this state.\n \"\"\"\n\n # this isn't strictly necessary (the fn doesn't operate on self.net)\n # but it does enforce that the network is already mapped\n if self.net is None:\n raise RuntimeError(\"no Network attached to NetBuilder\")\n if self.hal.last_mapped_network != self.net:\n raise RuntimeError(\"Trying to run un-mapped network. Run map first.\")\n\n CORE_ID = 0 \n # connect diffusor around pools\n for tile_id in range(256):\n self.hal.driver.OpenDiffusorAllCuts(CORE_ID, tile_id)\n\n @staticmethod\n def syn_taps_to_nrn_taps(tap_matrix, spacing=1):\n SY, SX, D = tap_matrix.shape\n Y = SY * 2 * spacing\n X = SX * 2 * spacing\n nrn_tap_matrix = np.zeros((Y, X, D))\n for d in range(D):\n nrn_tap_matrix[::2*spacing, ::2*spacing, d] = tap_matrix[:, :, d]\n return nrn_tap_matrix.reshape((Y * X, D))\n\n @staticmethod\n def make_taps_even(taps):\n \"\"\"taking a tap list or tap matrix, make the number of taps per dim even\n modifies taps, removing taps to meet the eveness condition\n \"\"\"\n if isinstance(taps, list):\n for tap_dim in taps:\n if len(tap_dim) % 2 == 1:\n tap_dim = tap_dim[:-1]\n else:\n dims = taps.shape[1]\n for d in range(dims):\n tap_dim = taps[:, d]\n if int(np.sum(np.abs(tap_dim))) % 2 == 1:\n nonzero_idxs = np.arange(len(tap_dim))[tap_dim != 0]\n rand_nonzero_idx = nonzero_idxs[np.random.randint(np.sum(tap_dim != 0))]\n taps[rand_nonzero_idx, d] = 0\n\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nfrom pystorm.hal import HAL\nHAL = HAL()\n\nfrom pystorm.hal.neuromorph import graph # to describe HAL/neuromorph network\n\nfrom pystorm.PyDriver import bddriver as bd # expose Driver functions directly for debug (cool!)\n\nfrom sys import exit\n\nnp.random.seed(0)\n\n\n###########################################\n# default network size parameters\n\nwidth = 64\nheight = 64\nDin = 1\nDint = 1\nDout = 1\nwidth_height = (width, height)\nd_range=(1,1)\nt_range=(1,1)\nexp_duration = 10\ntaps_per_dim = 8\n\nCORE = 0\n\n###########################################\n# misc driver parameters\ndownstream_time_res = 100 # ns\nupstream_time_res = 100000 # ns\n\nHAL.set_time_resolution(downstream_time_res, upstream_time_res)\n\ndef get_spike_times(num_spks, duration):\n start_time = ns_to_sec(HAL.driver.GetFPGATime()) + 0.5 # seconds\n end_time = start_time + duration # seconds\n # Calculate number of spikes to send based on input rate\n spike_times = np.linspace(start_time, end_time, num_spks)\n\n return spike_times\n\ndef create_syn_spike_train(tap_points):\n spikes = []\n for syn_addr, tap in enumerate(tap_points):\n## print(\"Tap{}: {}\".format(tap_addr, tap))\n if tap[0] == 1:\n # SYNAPSE_SIGN=1 means positive spike\n syn_word = bd.PackWord([(bd.InputSpike.SYNAPSE_ADDRESS, syn_addr), (bd.InputSpike.SYNAPSE_SIGN, 1)])\n# print(\"{}\\t{:09b}\".format(syn_word, syn_word))\n spikes.append(syn_word)\n elif tap[0] == -1:\n # SYNAPSE_SIGN=0 means negative spike\n syn_word = bd.PackWord([(bd.InputSpike.SYNAPSE_ADDRESS, syn_addr), (bd.InputSpike.SYNAPSE_SIGN, 0)])\n# print(\"{}\\t{:09b}\".format(syn_word, syn_word))\n spikes.append(syn_word)\n return np.array(spikes)\n\ndef set_tap_matrix(width, height, Din=1, taps_per_dim=8):\n tap_matrix = np.zeros((width*height, Din))\n if Din == 1:\n # one synapse per 4 neurons\n for x in range(0, width, 2):\n for y in range(0, height, 2):\n n = y * width + x\n if x < width // 2:\n tap_matrix[n, 0] = 1\n else:\n tap_matrix[n, 0] = -1\n elif Din > 1:\n for d in range(Din):\n for s in [-1, 1]:\n if s == -1:\n num_taps = taps_per_dim // 2\n else:\n num_taps = taps_per_dim - taps_per_dim // 2\n\n for t in range(num_taps):\n while True:\n x = np.random.randint(width//2)\n y = np.random.randint(height//2)\n n = (2*y) * width + (2*x)\n if np.all(tap_matrix[n, :] == 0): # keep trying until an unused synapse is found\n tap_matrix[n, d] = s\n break\n else:\n print(\"Din must be 1 or greater\")\n assert(False)\n\n return tap_matrix\n\ndef create_decode_network(width=width, height=height, Din=Din, Dout=Dout, d_range=d_range):\n \"\"\"\n data flow with traffic on:\n\n input IO ->\n tag horn ->\n\n (pre-fifo valve) ->\n FIFO ->\n (post-fifo valve) ->\n\n TAT ->\n\n AER_tx ->\n neurons ->\n AER_rx ->\n (neuron output valve) ->\n\n PAT ->\n accumulator ->\n\n (pre-fifo valve) ->\n FIFO ->\n (post-fifo valve) ->\n\n TAT ->\n\n tag funnel ->\n output IO\n \"\"\"\n\n N = width * height\n\n net = graph.Network(\"net\")\n\n min_d, max_d = d_range\n decoders = np.ones((Dout, N)) * (max_d - min_d) + min_d\n\n tap_matrix = set_tap_matrix(width, height)\n# tap_matrix = np.zeros((N, Din))\n# if Din == 1:\n# # one synapse per 4 neurons\n# for x in range(0, width, 2):\n# for y in range(0, height, 2):\n# n = y * width + x\n# if x < width // 2:\n# tap_matrix[n, 0] = 1\n# else:\n# tap_matrix[n, 0] = -1\n# else:\n# print(\"need to implement reasonable taps for Din > 1\")\n# assert(False)\n\n i1 = net.create_input(\"i1\", Din)\n p1 = net.create_pool(\"p1\", tap_matrix)\n b1 = net.create_bucket(\"b1\", Dout)\n o1 = net.create_output(\"o1\", Dout)\n\n net.create_connection(\"c_i1_to_p1\", i1, p1, None)\n decoder_conn = net.create_connection(\"c_p1_to_b1\", p1, b1, decoders)\n net.create_connection(\"c_b1_to_o1\", b1, o1, None)\n\n return net\n\ndef create_transform_network(width=width, height=height, Din=Din, Dint=Dint, Dout=Dout, d_range=d_range, t_range=t_range):\n\n N = width * height\n\n net = graph.Network(\"net\")\n\n min_d, max_d = d_range\n decoders = np.ones((Dint, N)) * (max_d - min_d) - min_d\n\n min_t, max_t = t_range\n trains = np.ones((Dout, Dint)) * (max_t - min_t) - min_t\n\n tap_matrix = set_tap_matrix(width, height)\n# tap_matrix = np.zeros((N, Din))\n# if Din == 1:\n# # one synapse per 4 neurons\n# for x in range(0, width, 2):\n# for y in range(0, height, 2):\n# n = y * width + x\n# if x < width // 2:\n# tap_matrix[n, 0] = 1\n# else:\n# tap_matrix[n, 0] = -1\n# else:\n# print(\"need to implement reasonable taps for Din > 1\")\n# assert(False)\n\n i1 = net.create_input(\"i1\", Din)\n p1 = net.create_pool(\"p1\", tap_matrix, xy=(width, height))\n b1 = net.create_bucket(\"b1\", Dint)\n b2 = net.create_bucket(\"b1\", Dout)\n o1 = net.create_output(\"o1\", Dout)\n\n net.create_connection(\"c_i1_to_p1\", i1, p1, None)\n net.create_connection(\"c_p1_to_b1\", p1, b1, decoders)\n net.create_connection(\"c_b1_to_b2\", b1, b2, trans)\n net.create_connection(\"c_b2_to_o1\", b2, o1, None)\n\n return net\n\ndef create_decode_encode_network(width=width, height=height, Dint=Dint, d_range=d_range, taps_per_dim=taps_per_dim, measure_tags=False, is_recur=False):\n\n N = width * height\n\n net = graph.Network(\"net\")\n\n min_d, max_d = d_range\n decoders = np.ones((Dint, N)) * (max_d - min_d) + min_d\n\n tap_matrix = set_tap_matrix(width, height, Dint, taps_per_dim)\n# tap_matrix = np.zeros((N, Dint))\n# if Dint == 1:\n# # one synapse per 4 neurons\n# for x in range(0, width, 2):\n# for y in range(0, height, 2):\n# n = y * width + x\n# if x < width // 2:\n# tap_matrix[n, 0] = 1\n# else:\n# tap_matrix[n, 0] = -1\n# else:\n# for d in range(Dint):\n# for s in [-1, 1]:\n# if s == -1:\n# num_taps = taps_per_dim // 2\n# else:\n# num_taps = taps_per_dim - taps_per_dim // 2\n#\n# for t in range(num_taps):\n# while True:\n# x = np.random.randint(width//2)\n# y = np.random.randint(height//2)\n# n = (2*y) * width + (2*x)\n# if np.all(tap_matrix[n, :] == 0): # keep trying until an unused synapse is found\n# tap_matrix[n, d] = s\n# break\n\n p1 = net.create_pool(\"p1\", tap_matrix)\n b1 = net.create_bucket(\"b1\", Dint)\n if not is_recur:\n p2 = net.create_pool(\"p2\", tap_matrix)\n\n if measure_tags:\n o1 = net.create_output(\"o1\", Dint)\n\n net.create_connection(\"c_p1_to_b1\", p1, b1, decoders)\n\n if not is_recur:\n net.create_connection(\"c_b1_to_p2\", b1, p2, None)\n else:\n net.create_connection(\"c_b1_to_p1\", b1, p1, None)\n\n if measure_tags:\n net.create_connection(\"c_b1_to_o1\", b1, o1, None)\n\n return net\n\n###########################################\n# define different experiments, each measuring the throughput of a different component\n\ndef ns_to_sec(ns):\n return ns * 1e-9\n\ndef names_to_dict(names, locs):\n return dict( (name, locs[name]) for name in names )\n\nclass Experiment(object):\n\n duration = exp_duration # seconds\n\n def run(self):\n assert(False and \"derived class must implement run()\")\n\n # utility functions, commonly used by derived experiments\n def start_all_neurons(self):\n for i in range(4096):\n HAL.driver.EnableSoma(CORE, i)\n HAL.driver.Flush()\n\n def kill_all_neurons(self):\n for i in range(4096):\n HAL.driver.DisableSoma(CORE, i)\n HAL.driver.Flush()\n\n def set_default_slow_dac_values(self):\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_ADC_BIAS_1 , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_ADC_BIAS_2 , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_EXC , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_DC , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_INH , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_LK , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PD , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PU , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_DIFF_G , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_DIFF_R , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_OFFSET, 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_REF , 1)\n\n def set_default_fast_dac_values(self):\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_ADC_BIAS_1 , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_ADC_BIAS_2 , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_EXC , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_DC , 544)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_INH , 512)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_LK , 1024)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PD , 1024)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PU , 1024)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_DIFF_G , 1024)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_DIFF_R , 1)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_OFFSET, 1024)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_REF , 1024)\n\n def set_neurons_slow(self):\n self.set_default_slow_dac_values()\n for i in range(4096):\n HAL.driver.SetSomaGain(CORE, i, bd.bdpars.SomaGainId.ONE_FOURTH)\n HAL.driver.SetSomaOffsetSign(CORE, i, bd.bdpars.SomaOffsetSignId.NEGATIVE)\n HAL.driver.SetSomaOffsetMultiplier(CORE, i, bd.bdpars.SomaOffsetMultiplierId.ZERO)\n\n def set_neurons_fast(self):\n self.set_default_fast_dac_values()\n for i in range(4096):\n HAL.driver.SetSomaGain(CORE, i, bd.bdpars.SomaGainId.ONE)\n HAL.driver.SetSomaOffsetSign(CORE, i, bd.bdpars.SomaOffsetSignId.POSITIVE)\n HAL.driver.SetSomaOffsetMultiplier(CORE, i, bd.bdpars.SomaOffsetMultiplierId.THREE)\n\n def make_enabled_neurons_spike(self, bias):\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_OFFSET, bias)\n for i in range(4096):\n HAL.driver.SetSomaGain(CORE, i, bd.bdpars.SomaGainId.ONE)\n HAL.driver.SetSomaOffsetSign(CORE, i, bd.bdpars.SomaOffsetSignId.POSITIVE)\n HAL.driver.SetSomaOffsetMultiplier(CORE, i, bd.bdpars.SomaOffsetMultiplierId.THREE)\n\n\n def check_outputs(self, outputs, obj, max_Dout):\n if not np.all(outputs[:,1] == obj):\n print('ERROR: got unexpected output object')\n exit(-1)\n if not np.all(outputs[:,2] <= max_Dout):\n print('ERROR: got unexpected output dimension')\n exit(-1)\n\n def get_output_count(self, outputs, obj, max_Dout):\n if outputs.shape[0] > 0:\n self.check_outputs(outputs, obj, max_Dout)\n\n total_count = np.sum(outputs[:,3])\n #print(\"tag count: {}\".format(total_count))\n else:\n total_count = 0\n\n return total_count\n\n def compute_output_rate(self, outputs, obj, max_Dout):\n if outputs.shape[0] > 0:\n self.check_outputs(outputs, obj, max_Dout)\n\n min_time = np.min(outputs[:,0])\n max_time = np.max(outputs[:,0])\n\n total_count = np.sum(outputs[:,3])\n\n rate = total_count / ns_to_sec(max_time - min_time)\n else:\n rate = 0\n\n return rate\n\n def make_fast_synapse(self):\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PD, 1024)\n HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PU, 1024)\n\n###########################################\n# Get baseline static power\n\nclass Static(Experiment):\n\n def __init__(self, duration=Experiment.duration):\n self.pars = names_to_dict([\"duration\"], locals())\n self.results = {}\n self.description = \"don't map any network, just measure baseline power\"\n\n def run(self):\n HAL.driver.InitBD()\n self.kill_all_neurons()\n # nothing to do, neurons should be killed without mapping\n\n # Compare bias influence on static population of neurons\n# self.set_neurons_slow()\n# input(\"Press Enter to go to fast mode\")\n# self.set_neurons_fast()\n\n print(\"Unmapped network for baseline power, measure power now\")\n time.sleep(self.pars[\"duration\"])\n\n###########################################\n# Get AER tx power\n\nclass AERTX(Experiment):\n # counting setup:\n # default\n # power setup:\n # neurons -> (stopped at post-neuron valve)\n\n def __init__(self, soma_bias=2, d_val=.1, duration=Experiment.duration):\n self.pars = names_to_dict([\"soma_bias\", \"duration\", \"d_val\"], locals())\n self.results = {}\n self.description = \"measure AER xmitter power. Pars: \" + str(self.pars)\n\n def count_after_experiment(self, net, start_time):\n HAL.stop_traffic()\n end_time = time.time()\n time.sleep(.1)\n # all decoders are 1, output count is spike count\n outputs = HAL.get_outputs()\n tag_count = self.get_output_count(outputs, net.get_outputs()[0], 0)\n \n tag_rate = tag_count/(end_time-start_time)\n print(\"measured\", tag_rate, \"accumulator outputs per second\")\n spike_rate = tag_rate / self.pars[\"d_val\"] \n print(\"inferred\", spike_rate, \"spikes per second\")\n\n return spike_rate, tag_rate\n\n def run(self):\n net = create_decode_network(width=64, height=64, d_range=(self.pars[\"d_val\"], self.pars[\"d_val\"]))\n HAL.map(net)\n\n # give the neurons some juice\n self.set_neurons_fast()\n self.make_enabled_neurons_spike(self.pars[\"soma_bias\"])\n\n tag_rate_list = []\n spk_rate_list = []\n for _ in range(5):\n # turn on traffic, count spikes\n print(\"enabling traffic, counting spikes\")\n start_time = time.time()\n HAL.start_traffic(flush=False)\n HAL.enable_output_recording()\n\n time.sleep(self.pars[\"duration\"])\n\n spk_rate, tag_rate = self.count_after_experiment(net, start_time)\n tag_rate_list.append(tag_rate)\n spk_rate_list.append(spk_rate)\n\n self.results[\"tag_rate\"] = int(np.mean(tag_rate_list))\n self.results[\"spike_rate\"] = int(np.mean(spk_rate_list))\n print(\"\\nAverage Tag Rate: {}\".format(int(np.mean(tag_rate_list))))\n print(\"Average Spike Rate: {}\".format(int(np.mean(spk_rate_list))))\n print(\"Toggling neurons and AER TX active, measure voltage now\")\n\n dur = self.pars[\"duration\"]\n n_cycles = 40\n skip_cycles = 3\n #duration_list = [dur, dur]*n_cycles\n duration_list = [skip_cycles*dur] + [dur, dur]*n_cycles\n\n print(\"Test time durations: {}\".format(duration_list))\n print(\"Test should take: {} s\".format(np.sum(duration_list)))\n print(\"Ensure at least {} readings are taken\".format(np.sum(duration_list)/0.185))\n input(\"Press Enter and measure power now (for time synchronization)\")\n t_orig = time.time()\n time.sleep(skip_cycles*self.pars[\"duration\"])\n for i in range(n_cycles):\n self.kill_all_neurons()\n print(\"[{}]Set AERTX to not receive inputs (Killed neurons): {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n self.start_all_neurons()\n print(\"[{}]Set AERTX to receive inputs (Started neurons): {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n\n print(\"Total test time: {}\".format(time.time()-t_orig))\n\n input(\"Press Enter to after you've measured power, to check no outputs are seen\")\n print(\"sanity check: should expect no outputs b/c traffic was stopped\")\n\n time.sleep(self.pars[\"duration\"])\n outputs = HAL.get_outputs()\n tag_rate = self.compute_output_rate(outputs, net.get_outputs()[0], 0)\n print(\"sanity check: measured\", tag_rate, \"spikes per second (expect 0)\")\n spike_rate = tag_rate / self.pars[\"d_val\"]\n print(\"inferred\", spike_rate, \"spikes per second\")\n\n###########################################\n# Get decode operation (PAT + Acc) power\n\nclass Decode(Experiment):\n # counting setup:\n # default\n # power setup:\n # neurons -> AERTX -> PAT -> accumulator -> stopped at pre-FIFO valve\n\n def __init__(self, soma_bias=2, d_val=.1, Dout=10, duration=Experiment.duration):\n self.pars = names_to_dict([\"soma_bias\", \"duration\", \"d_val\", \"Dout\"], locals())\n self.results = {}\n self.description = \"measure power for decode operation: AER xmitter + PAT + accumulator. Pars: \" + str(self.pars)\n\n def count_after_experiment(self, net):\n HAL.stop_traffic()\n time.sleep(.1)\n # all decoders are 1, output count is spike count\n outputs = HAL.get_outputs()\n tag_rate = self.compute_output_rate(outputs, net.get_outputs()[0], self.pars[\"Dout\"])\n print(\"measured\", tag_rate, \"accumulator outputs per second\")\n spike_rate = tag_rate / self.pars[\"d_val\"] / self.pars[\"Dout\"]\n print(\"inferred\", spike_rate, \"spikes per second\")\n return spike_rate, tag_rate\n\n def run(self):\n net = create_decode_network(width=64, height=64, Dout=self.pars[\"Dout\"], d_range=(self.pars[\"d_val\"], self.pars[\"d_val\"]))\n HAL.map(net)\n\n # give the neurons some juice\n self.set_neurons_fast()\n self.make_enabled_neurons_spike(self.pars[\"soma_bias\"])\n # make synapses as fast as possible to allow the most traffic\n self.make_fast_synapse()\n\n # turn on traffic\n print(\"enabling traffic, counting tags out\")\n HAL.start_traffic()\n HAL.enable_output_recording()\n\n time.sleep(self.pars[\"duration\"])\n\n spike_rate, tag_rate = self.count_after_experiment(net)\n self.results[\"spike_rate\"] = spike_rate\n self.results[\"tag_rate\"] = tag_rate\n\n print(\"disabling pre-FIFO valve, measure power now\")\n #HAL.start_traffic(flush=False)\n HAL.driver.SetPreFIFOTrafficState(CORE, False)\n HAL.enable_output_recording(flush=True)\n\n #time.sleep(self.pars[\"duration\"])\n\n dur = self.pars[\"duration\"]\n n_cycles = 20\n skip_cycles = 3\n #duration_list = [dur, dur]*n_cycles\n duration_list = [skip_cycles*dur] + [dur, dur]*n_cycles\n\n print(\"Test time durations: {}\".format(duration_list))\n print(\"Test should take: {} s\".format(np.sum(duration_list)))\n print(\"Ensure at least {} readings are taken\".format(np.sum(duration_list)/0.185))\n input(\"Press Enter and measure power now (for time synchronization)\")\n t_orig = time.time()\n time.sleep(skip_cycles*self.pars[\"duration\"])\n for i in range(n_cycles):\n HAL.stop_traffic()\n print(\"[{}]Set PAT+ACC to not receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n HAL.start_traffic()\n HAL.driver.SetPreFIFOTrafficState(CORE, False)\n print(\"[{}]Set PAT+ACC to receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n\n print(\"Total test time: {}\".format(time.time()-t_orig))\n\n input(\"Press Enter to after you've measured power, to check no outputs are seen\")\n print(\"sanity check: should expect no outputs with pre-FIFO valve closed\")\n self.count_after_experiment(net)\n\n###########################################\n# Get Input IO/horn\n\nclass InputIO(Experiment):\n # no counting setup, we use the SG to produce an exact rate\n # power setups:\n # IO/horn:\n # IO -> horn -> (closed pre-FIFO valve)\n def __init__(self, input_rate=1000, duration=Experiment.duration):\n self.pars = names_to_dict([\"input_rate\", \"duration\"], locals())\n self.results = {}\n self.description = \"Measure input IO + tag horn power. Pars: \" + str(self.pars)\n\n def run(self):\n net = create_decode_network()\n HAL.map(net)\n\n # don't want any neuron power\n self.kill_all_neurons()\n\n time.sleep(.1)\n\n # sanity check, make sure there are no spikes\n time.sleep(.01)\n spikes = HAL.get_spikes()\n print(\"sanity check: got\", len(spikes), \"spikes (expect some amount)\")\n\n HAL.enable_spike_recording()\n time.sleep(.01)\n spikes = HAL.get_spikes()\n print(\"sanity check: got\", len(spikes), \"spikes (expect 0)\")\n\n # leave traffic off, that keeps the pre-FIFO valve closed\n inp = net.get_inputs()[0]\n HAL.set_input_rate(inp, 0, self.pars[\"input_rate\"])\n\n dur = self.pars[\"duration\"]\n n_cycles = 40\n skip_cycles = 3\n #duration_list = [dur, dur]*n_cycles\n duration_list = [skip_cycles*dur] + [dur, dur]*n_cycles\n\n print(\"Test time durations: {}\".format(duration_list))\n print(\"Test should take: {} s\".format(np.sum(duration_list)))\n print(\"Ensure at least {} readings are taken\".format(np.sum(duration_list)/0.185))\n input(\"Press Enter and measure power now (for time synchronization)\")\n t_orig = time.time()\n time.sleep(skip_cycles*self.pars[\"duration\"])\n for i in range(n_cycles):\n HAL.set_input_rate(inp, 0, 0)\n print(\"[{}]Set InputIO to not receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n HAL.set_input_rate(inp, 0, self.pars[\"input_rate\"])\n print(\"[{}]Set InputIO to receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n\n print(\"Total test time: {}\".format(time.time()-t_orig))\n\n input(\"Press Enter to after you've measured power\")\n\n###########################################\n# rxmitter power\nclass AERRX(Experiment):\n # no counting setup, we use the FPGA to produce an exact rate\n # AER RX\n # IO -> AER rx\n def __init__(self, input_rate=1000, width=width, height=height, duration=Experiment.duration):\n self.pars = names_to_dict([\"input_rate\", \"width\", \"height\", \"duration\"], locals())\n self.results = {}\n self.description = \"Measure tap point/AER rx power. Pars: \" + str(self.pars)\n\n def run(self):\n net = create_decode_network(width=self.pars[\"width\"], height=self.pars[\"height\"])\n HAL.map(net)\n\n # don't want any neuron power\n self.kill_all_neurons()\n # make synapses as fast as possible to allow the most traffic\n self.make_fast_synapse()\n\n # Stop traffic to ensure that pre-FIFO valve is closed and won't affect traffic\n HAL.stop_traffic(flush=True)\n\n # allow time for settings to be performed\n time.sleep(.1)\n\n # Set time of spikes 1/rate interval apart from each other\n dur = self.pars[\"duration\"]\n rate_interval = 1.0/self.pars[\"input_rate\"]\n num_spks = int(dur//rate_interval)\n #print(\"Num spks: {}\".format(num_spks))\n\n # Create a list of all the synapse addresses to hit\n tap_points = set_tap_matrix(width, height)\n #print(\"Tap: {}\".format(np.reshape(tap_points,(width,height))))\n spikes = create_syn_spike_train(tap_points)\n# print(\"Basic Spike List: {}\".format(spikes))\n\n # Repeat the list of synapse addresses to fill up the duration of the test\n num_itr = num_spks//len(spikes)\n num_itr_rem = num_spks % len(spikes)\n spikes = np.concatenate((np.tile(spikes, num_itr), spikes[:num_itr_rem]))\n #print(\"spikes: {}\".format(spikes[-1024:]))\n print(\"Num_spikes: {}\".format(len(spikes)))\n\n n_cycles = 5\n skip_cycles = 3\n #duration_list = [dur, dur]*n_cycles\n duration_list = [skip_cycles*dur] + [dur, dur]*n_cycles\n\n print(\"Test time durations: {}\".format(duration_list))\n print(\"Test should take: {} s\".format(np.sum(duration_list)))\n print(\"Ensure at least {} readings are taken\".format(np.sum(duration_list)/0.185))\n input(\"Press Enter and measure power now (for time synchronization)\")\n t_orig = time.time()\n time.sleep(skip_cycles*self.pars[\"duration\"])\n for i in range(n_cycles):\n # don't let traffic into the FIFO, essentially measuring Static+InputIO\n #HAL.driver.SendSpikes(CORE, zero_spikes, spike_times, True)\n #print(\"[{}]Set AERRX to not receive spikes: {}\".format(i, time.time()-t_orig))\n print(\"[{}]Set AERRX to not receive spikes: {}\".format(i, HAL.driver.GetFPGATime()))\n # don't really send any spikes, just wait for 10 seconds, and let the timestamps do their job from the previous iteration \n time.sleep(self.pars[\"duration\"])\n # Create the spike times for the duration of the test\n spike_times = get_spike_times(num_spks, dur) * 1e9\n spike_times = spike_times.astype(int, copy=False)\n #print(\"Time to get spike times: {}\".format(time.time()-t0))\n #print(\"spike_Times:\\n{}\\n{}\".format(spike_times[:10], spike_times[-10:]))\n #print(\"Len spike_Times: {}\".format(len(spike_times)))\n\n t0 = time.time()\n print(\"[{}]Set AERRX to receive spikes: {}\".format(i, HAL.driver.GetFPGATime()))\n HAL.driver.SendSpikes(CORE, spikes, spike_times, True)\n print(\"Time to send spikes: {}\".format(time.time()-t0))\n #print(\"[{}]Set AERRX to receive spikes: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n\n print(\"Total test time: {}\".format(time.time()-t_orig))\n\n###########################################\n# FIFO\n\nclass FIFO(Experiment):\n # no counting setup, we use the SG to produce an exact rate\n # FIFO:\n # IO -> horn -> FIFO -> (closed post-FIFO valve)\n def __init__(self, input_rate=1000, soma_bias=875, d_val=0.0026, Dout=3, duration=Experiment.duration):\n self.pars = names_to_dict([\"input_rate\", \"duration\", \"Dout\", \"d_val\", \"soma_bias\"], locals())\n self.results = {}\n self.description = \"Measure FIFO power. Pars: \" + str(self.pars)\n\n def run(self):\n net = create_decode_network()\n# net = create_decode_network(width=64, height=64, Dout=self.pars[\"Dout\"], d_range=(self.pars[\"d_val\"], self.pars[\"d_val\"]))\n HAL.map(net)\n\n# # give the neurons some juice\n# self.set_neurons_fast()\n# self.make_enabled_neurons_spike(self.pars[\"soma_bias\"])\n\n # set the input rate\n inp = net.get_inputs()[0]\n HAL.set_input_rate(inp, 0, self.pars[\"input_rate\"])\n\n # allow time for settings to be performed\n time.sleep(.1)\n\n dur = self.pars[\"duration\"]\n n_cycles = 35\n duration_list = [dur, dur]*n_cycles\n #duration_list = [3*dur] + [dur, dur]*n_cycles\n\n print(\"Test time durations: {}\".format(duration_list))\n print(\"Test should take: {} s\".format(np.sum(duration_list)))\n print(\"Ensure at least {} readings are taken\".format(np.sum(duration_list)/0.185))\n# # cut off all traffic to the neuron pool\n# HAL.disable_output_recording()\n HAL.driver.SetPostFIFOTrafficState(CORE, False)\n # don't want any neuron power\n self.kill_all_neurons()\n\n # At this point, the chip should be off except for input spikes and FIFO\n\n # sanity check, monitor pre-fifo spikes\n HAL.driver.SetPreFIFODumpState(CORE, True)\n time.sleep(.5)\n print(\"trying to get pre-fifo dump\")\n dumped = HAL.driver.GetPreFIFODump(CORE)\n print(\"sanity check: with pre-fifo dump on, got\", len(dumped), \"pre-FIFO events (expect\", self.pars[\"input_rate\"]*.5, \")\")\n HAL.driver.SetPreFIFODumpState(CORE, False)\n time.sleep(.5)\n dumped = HAL.driver.GetPreFIFODump(CORE) # Clear FIFO dump queue\n dumped = HAL.driver.GetPreFIFODump(CORE) # Ensure cleared FIFO is actually off\n print(\"sanity check: make sure pre-FIFO dump actually turned off. Got\", len(dumped), \"pre-FIFO events (expect 0)\")\n\n input(\"Press Enter and measure power now (for time synchronization)\")\n t_orig = time.time()\n time.sleep(3*self.pars[\"duration\"])\n\n for i in range(n_cycles):\n # don't let traffic into the FIFO, essentially measuring Static+InputIO\n HAL.driver.SetPreFIFOTrafficState(CORE, False)\n print(\"[{}]Set FIFO to not receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n HAL.driver.SetPreFIFOTrafficState(CORE, True)\n print(\"[{}]Set FIFO to receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n\n print(\"Total test time: {}\".format(time.time()-t_orig))\n\n###########################################\n# tap point/txmitter power\nclass TapPointAndAERRX(Experiment):\n # no counting setup, we use the SG to produce an exact rate\n # tap point/AER RX\n # IO -> horn -> FIFO -> TAT -> AER rx\n def __init__(self, input_rate=1000, width=width, height=height, duration=Experiment.duration):\n self.pars = names_to_dict([\"input_rate\", \"width\", \"height\", \"duration\"], locals())\n self.results = {}\n self.description = \"Measure tap point/AER rx power. Pars: \" + str(self.pars)\n\n def run(self):\n net = create_decode_network(width=self.pars[\"width\"], height=self.pars[\"height\"])\n HAL.map(net)\n\n # don't want any neuron power\n self.kill_all_neurons()\n # make synapses as fast as possible to allow the most traffic\n self.make_fast_synapse()\n\n HAL.start_traffic(flush=False)\n\n # allow time for settings to be performed\n time.sleep(.1)\n\n # set the input rate\n inp = net.get_inputs()[0]\n HAL.set_input_rate(inp, 0, self.pars[\"input_rate\"])\n\n # Clear FIFO Dumps (in case they have old data)\n pre_dumped = HAL.driver.GetPreFIFODump(CORE)\n post_dumped = HAL.driver.GetPostFIFODump(CORE)\n\n # sanity check, monitor pre-fifo and post-fifo spikes\n HAL.driver.SetPreFIFODumpState(CORE, True)\n time.sleep(.5)\n print(\"trying to get pre-fifo dump\")\n pre_dumped = HAL.driver.GetPreFIFODump(CORE)\n print(\"sanity check: with pre-fifo dump on, got\", len(pre_dumped), \"pre-FIFO events (expect\", self.pars[\"input_rate\"]*.5, \")\")\n HAL.driver.SetPreFIFODumpState(CORE, False)\n time.sleep(.5)\n pre_dumped = HAL.driver.GetPreFIFODump(CORE)\n print(\"sanity check: make sure pre-FIFO dump actually turned off. Got\", len(pre_dumped), \"pre-FIFO events (expect 0)\")\n print(\"[OUTPUT] sanity check: FIFO should not overflow\")\n print(\"[OUTPUT] total overflows:\", HAL.get_overflow_counts())\n\n HAL.driver.SetPostFIFODumpState(CORE, True)\n time.sleep(.5)\n print(\"trying to get post-fifo dump\")\n _, post_dumped = HAL.driver.GetPostFIFODump(CORE)\n print(\"sanity check: with post-fifo dump on, got\", len(post_dumped), \"post-FIFO events (expect\", self.pars[\"input_rate\"]*.5, \")\")\n HAL.driver.SetPostFIFODumpState(CORE, False)\n time.sleep(.5)\n _, post_dumped = HAL.driver.GetPostFIFODump(CORE)\n print(\"sanity check: make sure post-FIFO dump actually turned off. Got\", len(post_dumped), \"post-FIFO events (expect 0)\")\n print(\"[OUTPUT] sanity check: FIFO should not overflow\")\n print(\"[OUTPUT] total overflows:\", HAL.get_overflow_counts())\n\n dur = self.pars[\"duration\"]\n n_cycles = 43\n skip_cycles = 3\n #duration_list = [dur, dur]*n_cycles\n duration_list = [skip_cycles*dur] + [dur, dur]*n_cycles\n\n print(\"Test time durations: {}\".format(duration_list))\n print(\"Test should take: {} s\".format(np.sum(duration_list)))\n print(\"Ensure at least {} readings are taken\".format(np.sum(duration_list)/0.185))\n input(\"Press Enter and measure power now (for time synchronization)\")\n t_orig = time.time()\n time.sleep(skip_cycles*self.pars[\"duration\"])\n for i in range(n_cycles):\n # don't let traffic into the FIFO, essentially measuring Static+InputIO\n HAL.driver.SetPostFIFOTrafficState(CORE, False)\n print(\"[{}]Set TAT to not receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n HAL.driver.SetPostFIFOTrafficState(CORE, True)\n print(\"[{}]Set TAT to receive inputs: {}\".format(i, time.time()-t_orig))\n time.sleep(self.pars[\"duration\"])\n\n print(\"Total test time: {}\".format(time.time()-t_orig))\n\n###########################################\n# decode-encode\nclass DecodeEncode(Experiment):\n # need to map twice for this\n # counting setup:\n # neurons -> AERTX -> PAT -> accumulator -> TAT -> funnel -> out\n #\n # power setup:\n # neurons -> AERTX -> PAT -> accumulator -> FIFO -> TAT -> AERRX -> neurons\n # take care that there is no FIFO overflow in this setup, which would indicate TAT/AERRX/synapse backup\n\n def __init__(self, width=32, height=32, soma_bias=2, d_val=.1, Dint=10, taps_per_dim=8, duration=Experiment.duration, is_recur=False):\n self.pars = names_to_dict([\"soma_bias\", \"duration\", \"d_val\", \"Dint\", \"taps_per_dim\", \"width\", \"height\", \"is_recur\"], locals())\n self.results = {}\n self.description = \"measure power for decode operation: AER xmitter + PAT + accumulator. Pars: \" + str(self.pars)\n\n def count_after_experiment(self, net):\n HAL.stop_traffic()\n time.sleep(.1)\n # all decoders are 1, output count is spike count\n outputs = HAL.get_outputs()\n output_obj = net.get_outputs()[0]\n tag_rate = self.compute_output_rate(outputs, output_obj, self.pars[\"Dint\"])\n print(\"[OUTPUT] measured\", tag_rate, \"accumulator outputs per second\")\n spike_rate = tag_rate / self.pars[\"d_val\"] / self.pars[\"Dint\"]\n print(\"[OUTPUT] inferred\", spike_rate, \"spikes per second\")\n return spike_rate, tag_rate\n\n def run(self):\n #####################################\n # measure rates\n measure_net = create_decode_encode_network(\n width=self.pars[\"width\"],\n height=self.pars[\"height\"],\n Dint=self.pars[\"Dint\"],\n d_range=(self.pars[\"d_val\"], self.pars[\"d_val\"]),\n taps_per_dim=self.pars[\"taps_per_dim\"],\n measure_tags=True,\n is_recur=self.pars[\"is_recur\"])\n HAL.map(measure_net)\n\n # give the neurons some juice\n self.set_neurons_fast()\n self.make_enabled_neurons_spike(self.pars[\"soma_bias\"])\n self.make_fast_synapse()\n\n # turn on traffic\n print(\"enabling traffic, counting tags out\")\n HAL.start_traffic()\n HAL.enable_output_recording()\n\n time.sleep(self.pars[\"duration\"])\n\n spike_rate, tag_rate = self.count_after_experiment(measure_net)\n self.results[\"spike_rate\"] = spike_rate\n self.results[\"tag_rate\"] = tag_rate\n\n #for i in range(100):\n # print(\"[%d] enabling traffic, counting tags out\" % i)\n # HAL.start_traffic()\n # HAL.enable_output_recording()\n\n # time.sleep(self.pars[\"duration\"])\n\n # spike_rate, tag_rate = self.count_after_experiment(measure_net)\n # self.results[\"spike_rate\"] = spike_rate\n # self.results[\"tag_rate\"] = tag_rate\n # print(\"[OUTPUT] sanity check: FIFO should not overflow\")\n # print(\"[OUTPUT] total overflows:\", HAL.get_overflow_counts())\n\n #####################################\n # go to power measurement configuration, make sure not overflowing\n power_net = create_decode_encode_network(\n width=self.pars[\"width\"],\n height=self.pars[\"height\"],\n Dint=self.pars[\"Dint\"],\n d_range=(self.pars[\"d_val\"], self.pars[\"d_val\"]),\n taps_per_dim=self.pars[\"taps_per_dim\"],\n measure_tags=False,\n is_recur=self.pars[\"is_recur\"])\n HAL.map(power_net)\n\n # give the neurons some juice\n self.set_neurons_fast()\n self.make_enabled_neurons_spike(self.pars[\"soma_bias\"])\n self.make_fast_synapse()\n\n # turn on traffic\n HAL.start_traffic()\n HAL.enable_output_recording(flush=True)\n\n time.sleep(self.pars[\"duration\"])\n\n print(\"[OUTPUT] sanity check: should expect no outputs with remapped network\")\n outputs = HAL.get_outputs()\n total_count = np.sum(outputs[:,3])\n print(\"[OUTPUT] total outputs:\", total_count)\n\n print(\"[OUTPUT] sanity check: FIFO should not overflow\")\n print(\"[OUTPUT] total overflows:\", HAL.get_overflow_counts())\n\n #####################################\n # remap network again in power measurement configuration, measure power\n HAL.map(power_net)\n\n # give the neurons some juice\n self.set_neurons_fast()\n self.make_enabled_neurons_spike(self.pars[\"soma_bias\"])\n self.make_fast_synapse()\n\n # turn on traffic\n HAL.start_traffic()\n HAL.enable_output_recording(flush=True)\n\n print(\"remapped network, measure power now\")\n\n\n###########################################\n# run tests\n\nsomaBias = 875\nSG_rate = 4500000\ndim = 3\n\ntests = [\n #Static(),\n #InputIO(input_rate=SG_rate),\n #Static(),\n #InputIO(input_rate=SG_rate),\n #Static(),\n #InputIO(input_rate=SG_rate),\n #Static(),\n #InputIO(input_rate=SG_rate),\n #FIFO(input_rate=SG_rate),\n # Note: num_tap_points = width * height / 4\n # Advice: Ensure that total spike rate of TAT is < ~100MHz (input_rate * num_tap_points < 100MHz)\n #TapPointAndAERRX(input_rate=8000, width=64, height=64),\n #Decode(soma_bias=somaBias, d_val=.0078125/dim, Dout=dim),\n #AERTX(soma_bias=somaBias, d_val=.0078125),\n AERRX(input_rate=SG_rate, width=64, height=64),\n # Don't worry about doing the DecodeEncode tests for paper power measurements\n #DecodeEncode(soma_bias=75, d_val=.00125, Dint=16, taps_per_dim=8),\n #DecodeEncode(width=64, height=64, soma_bias=100, d_val=.0015, Dint=16, taps_per_dim=16, is_recur=True),\n #Static(),\n ]\n\n#input(\"Press Enter to start experiments...\\n\")\n\nfor idx, test in enumerate(tests):\n print(\"================================================================================\")\n print(\"EXP: running test\", idx)\n print(\"EXP: \" + test.description)\n print(\"================================================================================\")\n\n test.results[\"start_time\"] = time.time()\n test.run()\n input(\"Press Enter to continue experiments...\\n\")\n test.results[\"end_time\"] = time.time()\n\n #V = input(\"please input mean voltage during trial > \")\n #try:\n # V = float(V)\n #except:\n # print(\"ERROR: that wasn't a number, try again\")\n # V = input(\"please input mean voltage during trial > \")\n\n #test.results[\"V\"] = V\n\n print(\"EXP: done\")\n\nimport pickle\n\nfname = \"trial_data.pck\"\n\n# load old tests to append to\nyn = input(\"append to old results (\" + str(fname) + \") ? (y/n) > \")\nif yn == \"n\":\n old_tests = []\nelse:\n pfile = open(fname, \"rb\")\n old_tests = pickle.load(pfile)\n pfile.close()\n\n# save test parameters and results\npfile = open(fname, \"wb\")\npickle.dump(old_tests + tests, pfile)\npfile.close()\n\n\ndef print_pickle(fname):\n pfile = open(fname, \"rb\")\n old_tests = pickle.load(pfile)\n pfile.close()\n\n print(\"Length of old_tests: %d\" % len(old_tests))\n for idx, test in enumerate(old_tests):\n print(\"Test Description: \" + test.description)\n print(\"Test Results: \")\n print(test.results)\n\nprint_pickle(fname)\n\n"
] |
[
[
"numpy.dot",
"numpy.linalg.svd",
"numpy.abs",
"numpy.nonzero",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.random.randn",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
],
[
"numpy.random.seed",
"numpy.linspace",
"numpy.min",
"numpy.tile",
"numpy.ones",
"numpy.all",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MahdadJafarzadeh/ssccoorriinngg
|
[
"63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3",
"63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3"
] |
[
"Quality analysis/Sync_data.py",
"Zmax_autoscoring_controlled_train_test_split.py"
] |
[
"# -*- codiEEG_dsddsdsng: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 29 20:08:11 2020\n\n@author: mahjaf\n\"\"\"\n\n#%% Import libs\n#####===================== Importiung libraries =========================#####\nimport mne\nimport numpy as np\nfrom scipy.integrate import simps\nfrom numpy import loadtxt\nimport h5py\nimport time\nimport os \nfrom ssccoorriinngg import ssccoorriinngg\nimport matplotlib.pyplot as plt\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report\nimport pandas as pd\nimport tensorflow as tf\nfrom scipy import signal\nfrom scipy.signal import butter, lfilter, periodogram, spectrogram, welch, filtfilt, iirnotch\nfrom scipy.stats import pearsonr\nimport matplotlib.mlab as mlab\n\n#%% Read in data (Somno + Zmax)\n#####=========================== Reading data ============================#####\n# Main path\nfolder_zmax = \"F:/Zmax_Data/Zmax_Data/P_18/night2/\"\nfolder_somno = \"F:/Zmax_Data/Somnoscreen_Data/P_18/P18 night2_B.12.12.2018/\"\n \n# Reading EEG left and right (Zmax)\ndata_L = mne.io.read_raw_edf(folder_zmax + \"EEG L.edf\", preload = True)\ndata_R = mne.io.read_raw_edf(folder_zmax + \"EEG R.edf\", preload = True)\n\n# Read somno data \nSOMNO_path = folder_somno+\"P18_night2_B_Markers_(1).edf\"\nEEG_somno = mne.io.read_raw_edf(SOMNO_path, preload = True)\n\n# Reading info header (Somno)\nSomnoInfo = EEG_somno.info\nAvailableChannels = SomnoInfo['ch_names']\nZmaxInfo = data_R.info\n\n# Fs\nfs_zmax = int(ZmaxInfo['sfreq'])\nfs_somno = int(SomnoInfo['sfreq'])\n\n#%% Plot filtered signals \n#####======================== Data representation ========================#####\n# =============================================================================\n# data_L.plot(duration = 30, highpass = .1 , lowpass = 30 )\n# data_R.plot(duration = 30, highpass = .1 , lowpass = 30 )\n# EEG_somno.plot(duration = 30, highpass = .1 , lowpass = 30,n_channels = 4 )\n# =============================================================================\n\n#%% Resampling higher freq to lower\nif fs_zmax != fs_somno:\n \n if fs_zmax < fs_somno:\n EEG_somno = EEG_somno.resample(int(fs_zmax), npad=\"auto\")\n \n else:\n data_L = data_L.resample(int(fs_somno), npad=\"auto\")\n data_R = data_R.resample(int(fs_somno), npad=\"auto\")\n \n# Define resampled fs\nfs_res = np.min([fs_zmax, fs_somno])\n\n\n#%% Get data (resampled)\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order = 2):\n nyq = 0.5 * fs\n low = lowcut /nyq\n high = highcut/nyq\n b, a = butter(order, [low, high], btype='band')\n #print(b,a)\n y = filtfilt(b, a, data)\n return y\n\ndata_L_get = data_L.get_data()\ndata_R_get = data_R.get_data()\ndata_somno_get = EEG_somno.get_data()\n\n#%% Filtering resampled data\ndata_L_resampled_filtered = butter_bandpass_filter(data_L_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)\ndata_R_resampled_filtered = butter_bandpass_filter(data_R_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)\nEEG_somno_resampled_filtered = butter_bandpass_filter(data_somno_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)\n\n#%% Synchronization section\n\n# ===================== start of LRLR for sync ============================== #\n\n# Zmax\nLRLR_start_zmax = 71 #sec\nLRLR_end_zmax = 91 #sec\n\n# Somno\nLRLR_start_somno = 7925 #sec\nLRLR_end_somno = 7945 #sec\n\n# Define a period around sync point ro perform alignment\nzmax_plotting_secs = [LRLR_start_zmax,LRLR_end_zmax]\nsomno_plotting_secs = [LRLR_start_somno, LRLR_end_somno]\n\n# Finding corresponding samples of sync period\nzmax_plotting_samples = np.arange(zmax_plotting_secs[0] *fs_res, zmax_plotting_secs[1] * fs_res)\nsomno_plotting_samples = np.arange(somno_plotting_secs[0] *fs_res, somno_plotting_secs[1] * fs_res)\n\n# Convert (probable) floats int o int\nsomno_plotting_samples = somno_plotting_samples.astype(np.int32)\nzmax_plotting_samples = zmax_plotting_samples.astype(np.int32)\n\n# R EEG (Zmax) --> sync period\nzmax_data_R = np.ravel(data_R_resampled_filtered)\n\n# L EEG (Zmax) --> sync period\nzmax_data_L = np.ravel(data_L_resampled_filtered)\n\n# Define channel of interest\nRequiredChannels = ['F4:A1'] # main electrodes\n\n# init index of reeuired channel(s) \nIdx = []\nIdx_Mastoids = []\n\n# Find index of required channel(s)\nfor indx, c in enumerate(AvailableChannels):\n if c in RequiredChannels:\n Idx.append(indx)\n \n# pick Somno channel\nSomno_reqChannel = EEG_somno_resampled_filtered[Idx,:]\n\n# np.ravel somno signal(s)\nSomno_reqChannel = np.ravel(Somno_reqChannel)\n\n# plt R EEG (zmax) and required channel of Somno BEFORE sync\nplt.figure()\nsig_zmax = zmax_data_R[zmax_plotting_samples]\nsig_somno = Somno_reqChannel[somno_plotting_samples]\n\n# Compute correlation\ncorr = signal.correlate(sig_zmax, sig_somno)\n\n# find lag\nlag = np.argmax(np.abs(corr)) - len(zmax_data_L[zmax_plotting_samples]) + 1\n\n# Plot before lag correction\nplt.plot(np.arange(0, len(zmax_plotting_samples)), sig_zmax,label = 'Zmax R EEG', color = 'black')\nplt.plot(np.arange(0, len(somno_plotting_samples)), sig_somno, label = 'Somno F4', color = 'gray', linestyle = ':')\nplt.title('Syncing Somno and Zmax data (Sync period only)', size = 15)\n\n# Plot after lag correction\nplt.plot(np.arange(0+lag, len(somno_plotting_samples)+lag), sig_somno, label = 'Somno F4 - synced',color = 'red')\n#plt.plot(np.arange(0-lag, len(zmax_plotting_samples)-lag), sig_zmax, label = 'zmax - synced',color = 'cyan')\n\nplt.legend(prop={\"size\":20})\n\n#%% Plot cross-correlation\nfig, ax = plt.subplots(1,1, figsize=(26, 14))\n\n# Plot original Zmax sig (sync period)\nax.plot(np.arange(-len(zmax_data_R[zmax_plotting_samples])+1,len(zmax_data_R[zmax_plotting_samples])), corr, color = 'blue')\nplt.title('Cross-correlation to find lag between Zmax & Somno during eye movements', size=15)\n\n# Marking max correlation value to find lag\nymax = np.max(np.abs(corr)) \n\n# check if the argmax <0 --> arrow comes below figure\nif np.max(np.abs(corr)) != np.max(corr) :\n ymax = -ymax\nxpos = lag\nxmax = lag\n\n# Creating arrot to point to max\nax.annotate('max correlation', xy=(xmax, ymax), xytext=(xmax, ymax+ymax/10),\n arrowprops=dict(facecolor='red', shrink=0.05),\n )\nplt.show()\n# pearson correlation\nsig_somno_new = Somno_reqChannel[somno_plotting_samples-lag]\npearson_corr,pval = pearsonr(sig_zmax, sig_somno_new)\n\n#%% Plotting COMPLETE signals after synchronization\n\n# rough lag \nrough_lag = (LRLR_start_somno - LRLR_start_zmax) * fs_res\n\n# Total lag = rough lag +- lag during sync\ntotal_lag = int(rough_lag - lag)\n\n# truncate the lag period from somno BEGINNING\ntruncated_beginning_somno = Somno_reqChannel[total_lag:]\n\n# Truncate the end of LONGER signal\nlen_s = len(truncated_beginning_somno)\nlen_z = len(zmax_data_R)\n\n# if somno data is larger\nif len_s > len_z:\n somno_final = truncated_beginning_somno[:len_z]\n zmax_final = zmax_data_R\nelse: \n zmax_final = zmax_data_R[:len_s]\n somno_final = truncated_beginning_somno\n\n# Calculate final length\ncommon_length = np.min([len_s, len_z]) \n\n# Plot truncated sigs\nplt.figure()\nplt.plot(np.arange(0, common_length) / fs_res / 60, zmax_final, color = 'blue', label = 'Zmax R EEG')\nplt.plot(np.arange(0, common_length) / fs_res / 60, somno_final, \\\n color = 'red', label = 'Somno F4-A1')\nplt.title('Complete Zmax and Somno data after full sync', size = 20)\nplt.xlabel('Time (mins)', size = 15)\nplt.ylabel('Amplitude (v)', size = 15)\nplt.legend(prop={\"size\":20}, loc = \"upper right\")\n\n#%% Plot PSD\n\ndef spectrogram_creation(sig1,sig2, fs):\n from lspopt import spectrogram_lspopt\n import numpy as np\n import matplotlib.pyplot as plt\n\n #==== plot 1st sig ======= \n f, t, Sxx = spectrogram_lspopt(x=sig1, fs=fs, c_parameter=20.0, nperseg=int(30*fs), \\\n scaling='density')\n Sxx = 10 * np.log10(Sxx) #power to db\n \n # Limit Sxx to the largest freq of interest:\n f_sig1 = f[0:750]\n Sxx_sig1 = Sxx[0:750, :]\n fig, axs = plt.subplots(2,1, figsize=(26, 14))\n plt.axes(axs[0])\n \n plt.pcolormesh(t, f_sig1, Sxx_sig1)\n plt.ylabel('Frequency [Hz]', size=15)\n #plt.xlabel('Time [sec]', size=15)\n plt.title('Somnoscreeen data (F4) - Multi-taper Spectrogram', size=20)\n plt.colorbar()\n # ==== plot 2nd sig ==== #\n plt.axes(axs[1])\n f, t, Sxx = spectrogram_lspopt(x=sig2, fs=fs, c_parameter=20.0, nperseg=int(30*fs), \\\n scaling='density')\n Sxx = 10 * np.log10(Sxx) #power to db\n \n # Limit Sxx to the largest freq of interest:\n f_sig2 = f[0:750]\n Sxx_sig2 = Sxx[0:750, :]\n plt.pcolormesh(t, f_sig2, Sxx_sig2)\n plt.ylabel('Frequency [Hz]', size=15)\n plt.xlabel('Time [sec]', size=15)\n plt.title('Zmax data (EEG right) - Multi-taper Spectrogram ', size=20)\n\n plt.colorbar()\n #==== 1st Way =======\n \n #=== Maximize ====\n figure = plt.gcf() # get current figure\n figure.set_size_inches(32, 18)\n plt.show()\n #=== Maximize ====\n\nspectrogram_creation(somno_final, zmax_final, fs = fs_res)\n\n\n# =============================================================================\n# #%% PERIODOGRAM\n# \n# # Defining EEG bands:\n# eeg_bands = {'Delta' : (0.5, 4),\n# 'Theta' : (4 , 8),\n# 'Alpha' : (8 , 11),\n# 'Beta' : (16 , 24),\n# 'Sigma' : (12 , 15),\n# 'Sigma_slow': (10 , 12)}\n# # Settings of peridogram \n# Window = 'hann'\n# \n# # Compute pxx (SOMNO)\n# fm, pxx_somno = periodogram(x = somno_full_sig, fs = fs_res , window = Window)\n# \n# # Compute pxx (Zmax)\n# fm, pxx_zmax = periodogram(x = zmax_full_sig, fs = fs_res , window = Window)\n# freq_resolu_per= fm[1] - fm[0]\n# \n# # Finding the index of different freq bands with respect to \"fm\" PERIODOGRAM #\n# freq_ix = dict()\n# for band in eeg_bands:\n# freq_ix[band] = np.where((fm >= eeg_bands[band][0]) & \n# (fm <= eeg_bands[band][1]))[0] \n# \n# # Periodogram\n# plt.figure()\n# plt.plot(fm[freq_ix['Delta']], pxx_zmax[freq_ix['Delta']], label = 'Zmax') \n# plt.plot(fm[freq_ix['Delta']], pxx_somno[freq_ix['Delta']],label = 'Somno') \n# =============================================================================\n\n#%% Plot PSD\nplt.figure()\n\n# Global setting for axes values size\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\n\n# Plot power spectrums\npsd_z, f_psd_z = plt.psd(x=zmax_final,Fs = fs_res, label = 'Zmax', NFFT = 2 ** 11, scale_by_freq= True, linewidth = 2, color = 'blue') \npsd_s, f_psd_s = plt.psd(x=somno_final,Fs = fs_res, label = 'Zmax',NFFT = 2 ** 11, scale_by_freq= True, linewidth = 2, color = 'red') \n# ================== plot dashed lines of freq bins ========================= #\n\n#Delta\nplt.axvline(.5, linestyle = '--', color = 'black')\nplt.axvline(4, linestyle = '--', color = 'black')\n\n#Theta\nplt.axvline(8, linestyle = '--', color = 'black')\n\n# Alpha\nplt.axvline(12, linestyle = '--', color = 'black')\n\n# Title and labels\nplt.title('Power spectral density throughout the night', size = 20)\nplt.xlabel('Frequency (Hz)', size = 20)\nplt.ylabel('Power spectral density (dB/ Hz)', size = 20)\n\n# Legend \nplt.legend(['Zmax EEG R', 'Somno F4'], prop = {'size':20})\n\n# Deactivate grid\nplt.grid(False)\n\n# Adding labels\nplt.text(1.5, -89, 'Delta',size =18)\nplt.text(5, -89, 'Theta',size =18)\nplt.text(9, -89, 'Alpha',size =18)\nplt.text(13, -89, 'Beta',size =18)\n\n# Limiting x-axis to 0-30 Hz\nplt.xlim([0, 30])\n\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 6 22:41:54 2020\n\n@author: mahjaf\n\nAutomatic sleep scoring implemented for Zmax headband.\n\n\"\"\"\n#%% Reading EDF section\n#####===================== Importiung libraries =========================#####\nimport mne\nimport numpy as np\nfrom numpy import loadtxt\nimport h5py\nimport time\nimport os \nfrom ssccoorriinngg import ssccoorriinngg\nimport matplotlib.pyplot as plt\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report\nimport pandas as pd\nimport tensorflow as tf\nfrom scipy import signal\n#####==================== Defining required paths r=======================#####\n\nMain_path = \"P:/3013080.01/\"\nsubject_Id_folder = Main_path + \"Autoscoring/ssccoorriinngg/\"\nData_folder = Main_path + \"Zmax_Data/\"\nHypnogram_folder = Main_path + \"somno_scorings/Rathiga/\"\n\n#####===================== Reading EDF data files=========================#####\n\nsubject_ids = loadtxt(subject_Id_folder+\"Zmax/Subject_ids_excluding 22_2.txt\", dtype = 'str',delimiter='\\n')\n\n#####============= create an object of ssccoorriinngg class ==============#####\n\nObject = ssccoorriinngg(filename='', channel='', fs = 256, T = 30)\n\n#%% Load featureset and labels\n\npath = \"P:/3013080.01/Autoscoring/features/\"\nfilename = \"Zmax_Rathiga_scorings_ch-ch2+AccFeats_190620\"\nsubjects_dic, hyp_dic = Object.load_dictionary(path, filename)\n \n#%% ================================Training part==============================\n\n# Training perentage\ntrain_size = .7\nn_train = round(train_size * len(subject_ids))\n\n#######=== Randomly shuffle subjects to choose train and test splits ===#######\n\nsubject_ids = np.random.RandomState(seed=0).permutation(subject_ids)\n\n#######=============== Initialize train and test arrays ================#######\nsample_subject = \"subjectP_12_night1_scoring.csv.spisop.new - Copy\"\nsample_hyp = \"hypP_12_night1_scoring.csv.spisop.new - Copy\" \nX_train = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))\nX_test = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))\ny_train = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))\ny_test = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))\n\n########======= Picking the train subjetcs and concatenate them =======########\ntic = time.time()\ntrain_subjects_list = [\"P_12_night1_scoring.csv.spisop.new - Copy\",\n \"P_13_night2_scoring.csv.spisop.new - Copy\",\n \"P_15_night2_scoring.csv.spisop.new - Copy\",\n \"P_16_night1_scoring.csv.spisop.new - Copy\",\n \"P_18_night1_scoring.csv.spisop.new - Copy\",\n \"P_20_night1_scoring.csv.spisop.new - Copy\",\n \"P_21_night1_scoring.csv.spisop.new - Copy\",\n \"P_23_night1_scoring.csv.spisop.new - Copy\"]\n\nfor c_subj in train_subjects_list:\n \n # train hypnogram\n str_train_hyp = 'hyp' + str(c_subj)\n \n # train featureset\n str_train_feat = 'subject' + str(c_subj)\n \n # create template arrays for featurs and label\n tmp_x = subjects_dic[str_train_feat]\n tmp_y = hyp_dic[str_train_hyp]\n \n # Concatenate features and labels\n X_train = np.row_stack((X_train, tmp_x))\n y_train = np.row_stack((y_train, tmp_y))\n \n del tmp_x, tmp_y\n \nprint('Training set was successfully created in : {} secs'.format(time.time()-tic))\n\n #%% ================================Test part==============================%%#\n\n########======== Picking the test subjetcs and concatenate them =======########\ntic = time.time()\ntest_subjects_list = []\ntst_subj_list = [\"P_12_night2_scoring.csv.spisop.new - Copy\",\n \"P_12_night3_scoring.csv.spisop.new - Copy\",\n \"P_13_night3_scoring.csv.spisop.new - Copy\",\n \"P_14_night3_scoring.csv.spisop.new - Copy\",\n \"P_15_night3_scoring.csv.spisop.new - Copy\",\n \"P_16_night3_scoring.csv.spisop.new - Copy\",\n \"P_18_night2_scoring.csv.spisop.new - Copy\",\n \"P_18_night3_scoring.csv.spisop.new - Copy\",\n \"P_20_night2_scoring.csv.spisop.new - Copy\",\n \"P_20_night3_scoring.csv.spisop.new - Copy\",\n \"P_21_night2_scoring.csv.spisop.new - Copy\",\n \"P_21_night3_scoring.csv.spisop.new - Copy\"]\n\nfor c_subj in tst_subj_list:\n \n # test hypnogram\n str_test_hyp = 'hyp' + str(c_subj)\n \n # test featureset\n str_test_feat = 'subject' + str(c_subj)\n \n # create template arrays for featurs and label\n tmp_x = subjects_dic[str_test_feat]\n tmp_y = hyp_dic[str_test_hyp]\n \n # Concatenate features and labels\n X_test = np.row_stack((X_test, tmp_x))\n y_test = np.row_stack((y_test, tmp_y))\n \n # keep the subject id\n test_subjects_list.append(str_test_feat)\n \n # remove for next iteration\n del tmp_x, tmp_y, str_test_feat, str_test_hyp\n \nprint('Test set was successfully created in : {} secs'.format(time.time()-tic))\n\nprint(f'Raw train and test data were created.')\n\n########================== Replace any probable NaN ===================########\n\nX_train = Object.replace_NaN_with_mean(X_train)\nX_test = Object.replace_NaN_with_mean(X_test)\n\n########================== Replace any probable inf ===================########\n\nX_train = Object.replace_inf_with_mean(X_train)\nX_test = Object.replace_inf_with_mean(X_test)\n\n########==================== Z-score of features ======================########\n\nX_train, X_test = Object.Standardadize_features(X_train, X_test)\n\n########========== select features only on first iteration ============########\n\ntd = 5 # Time dependence: number of epochs of memory\n\nX_train_td = Object.add_time_dependence_backward(X_train, n_time_dependence=td,\n padding_type = 'sequential')\n\nX_test_td = Object.add_time_dependence_backward(X_test, n_time_dependence=td,\n padding_type = 'sequential')\n\n########====================== Feature Selection ======================########\n\ny_train_td = Object.binary_to_single_column_label(y_train)\n\n########========== select features only on first iteration ============########\n\n# =============================================================================\n# ranks, Feat_selected, selected_feats_ind = Object.FeatSelect_Boruta(X_train_td,\n# y_train_td[:,0], max_iter = 50, max_depth = 7)\n# \n# #######===================== Save selected feats =======================#######\n# \n# path = \"P:/3013080.01/Autoscoring/features/\"\n# filename = \"Selected_Features_BoturaNoTimeDependency_5_Backward_Zmax_ch1-ch2+Acc_200620\"\n# with open(path+filename+'.pickle',\"wb\") as f:\n# pickle.dump(selected_feats_ind, f)\n# =============================================================================\n \n########################### Load selected feats ###############################\n\npath = \"P:/3013080.01/Autoscoring/features/\"\nfilename = \"Selected_Features_BoturaAfterTD=5_Backward_Zmax_ch1-ch2+Acc_200620\"\n#filename = \"sleep_scoring_NoArousal_8channels_selected_feats_NEW\"\nwith open(path + filename + '.pickle', \"rb\") as f: \n selected_feats_ind = pickle.load(f)\n \n########=================== Apply selected features ===================########\n\nX_train = X_train_td[:, selected_feats_ind]\nX_test = X_test_td[:, selected_feats_ind]\n\n########============== Define classifier of interest ==================########\ny_pred = Object.XGB_Modelling(X_train, y_train,X_test, y_test, n_estimators = 500)\n#y_pred = Object.KernelSVM_Modelling(X_train, y_train,X_test, y_test, kernel='rbf')\ny_pred = Object.ANN_classifier(X_train, y_train, X_test, units_h1=600, units_h2 = 300, units_output = 5,\n activation_out = 'softmax',\n init = 'uniform', activation = 'relu', optimizer = 'adam',\n loss = 'categorical_crossentropy', metrics=[tf.keras.metrics.Recall()],\n h3_status = 'deactive', units_h3 = 50, epochs = 100, batch_size = 100)\n\n########===== Metrics to assess the model performance on test data ====########\n\nAcc, Recall, prec, f1_sc, kappa, mcm= Object.multi_label_confusion_matrix(y_test, y_pred)\n\n########================= Creating subjective outputs =================########\n\nObject.create_subjecive_results(y_true=y_test, y_pred=y_pred, \n test_subjects_list = test_subjects_list,\n subjects_data_dic = subjects_dic,\n fname_save = \"results\")\n\n########============= find number of epochs per stage =================########\n\nObject.find_number_of_samples_per_class(y_test, including_artefact = False)\n\n########================== Comparative hypnogram ======================########\n\nhyp_true = Object.binary_to_single_column_label(y_test) \nObject.plot_comparative_hyp(hyp_true = hyp_true, hyp_pred = y_pred, mark_REM = 'active')\n\n########==================== Plot subjectve hypnos ====================########\n\nObject.plot_subjective_hypno(y_true=y_test, y_pred=y_pred, \n test_subjects_list=test_subjects_list,\n subjects_data_dic=subjects_dic,\n save_fig = False, \n directory=\"P:/3013080.01/Autoscoring/ssccoorriinngg/\")\n\n########=================== Plot overall conf-mat =======================######\n\nObject.plot_confusion_matrix(y_test,y_pred, target_names = ['Wake','N1','N2','SWS','REM'],\n title='Confusion matrix of ssccoorriinngg algorithm',\n cmap = None,\n normalize=True)\n\n########================== Plot subjective conf-mat ==================########\n\nObject.plot_confusion_mat_subjective(y_true=y_test, y_pred=y_pred, \n test_subjects_list=test_subjects_list,\n subjects_data_dic=subjects_dic)\n\n########========================== Save figure =======================#########\nObject.save_figure(saving_format = '.png',\n directory=\"P:/3013080.02/Mahdad/Github/ssccoorriinngg/\",\n saving_name = 'test_subject_all' + str(c_subj), dpi = 900,\n full_screen = False)\n\n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.psd",
"scipy.signal.correlate",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.axes",
"numpy.max",
"numpy.arange",
"matplotlib.pyplot.gcf",
"scipy.signal.butter",
"numpy.ravel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"scipy.stats.pearsonr",
"numpy.log10",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"scipy.signal.filtfilt",
"numpy.abs",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
],
[
"numpy.shape",
"numpy.row_stack",
"tensorflow.keras.metrics.Recall",
"numpy.random.RandomState",
"numpy.loadtxt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
SX-Aurora/nlcpy
|
[
"0a53eec8778073bc48b12687b7ce37ab2bf2b7e0",
"0a53eec8778073bc48b12687b7ce37ab2bf2b7e0"
] |
[
"tests/pytest/manipulation_tests/test_copy.py",
"bench/stencil_bench/gen_graph.py"
] |
[
"#\n# * The source code in this file is based on the soure code of CuPy.\n#\n# # NLCPy License #\n#\n# Copyright (c) 2020-2021 NEC Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither NEC Corporation nor the names of its contributors may be\n# used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# # CuPy License #\n#\n# Copyright (c) 2015 Preferred Infrastructure, Inc.\n# Copyright (c) 2015 Preferred Networks, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nimport unittest\n\nimport numpy\nimport nlcpy\nfrom nlcpy import testing\n\n\nclass DummyError(Exception):\n pass\n\n\[email protected](*(\n testing.product({\n 'val': [0, 3, -5.2, complex(1.2, -3.4)],\n 'casting': ['no', 'equiv', 'safe', 'same_kind', 'unsafe'],\n })\n))\nclass TestCopyScalar(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_scalar(self, xp, dst_dtype):\n if numpy.can_cast(self.val, dst_dtype, casting=self.casting):\n dst = xp.asanyarray(-999, dtype=dst_dtype) # make some 0-dim array\n src = self.val\n xp.copyto(dst, src, casting=self.casting)\n return dst\n else:\n return -1\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_scalar_masked(self, xp, dst_dtype):\n if numpy.can_cast(self.val, dst_dtype, casting=self.casting):\n dst = xp.asanyarray(-999, dtype=dst_dtype) # make some 0-dim array\n src = self.val\n where = xp.asanyarray(1, dtype='bool')\n xp.copyto(dst, src, where=where, casting=self.casting)\n return dst\n else:\n return -1\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.numpy_nlcpy_raises()\n def test_copyto_scalar_fail_casting(self, xp, dst_dtype):\n if not numpy.can_cast(self.val, dst_dtype, casting=self.casting):\n dst = xp.asanyarray(-999, dtype=dst_dtype) # make some 0-dim array\n src = self.val\n xp.copyto(dst, src, casting=self.casting)\n else:\n raise DummyError()\n\n\[email protected](*(\n testing.product({\n 'shape': [\n (5,),\n (3, 3),\n (4, 5, 5),\n (0, 4, 4),\n (4, 3, 6, 6),\n ],\n 'casting': ['no', 'equiv', 'safe', 'same_kind', 'unsafe']\n })\n))\nclass TestCopyNdarray(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.for_orders('CF', name='dst_order')\n @testing.for_orders('CF', name='src_order')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_ndarray(self, xp, dst_dtype, src_dtype, dst_order, src_order):\n if numpy.can_cast(src_dtype, dst_dtype, casting=self.casting):\n dst = xp.empty(self.shape, dtype=dst_dtype, order=dst_order)\n src = xp.asarray(\n testing.shaped_random(self.shape, xp, src_dtype), order=src_order)\n xp.copyto(dst, src, casting=self.casting)\n return dst\n else:\n return -1\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.for_orders('CF', name='dst_order')\n @testing.for_orders('CF', name='src_order')\n @testing.for_orders('CF', name='where_order')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_ndarray_masked(self, xp, dst_dtype, src_dtype,\n dst_order, src_order, where_order):\n if numpy.can_cast(src_dtype, dst_dtype, casting=self.casting):\n dst = xp.zeros(self.shape, dtype=dst_dtype, order=dst_order)\n src = xp.asarray(\n testing.shaped_random(self.shape, xp, src_dtype), order=src_order)\n where = xp.asarray(\n testing.shaped_random(self.shape, xp, 'bool'), order=where_order)\n xp.copyto(dst, src, where=where, casting=self.casting)\n return dst\n else:\n return -1\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.numpy_nlcpy_raises()\n def test_copyto_scalar_fail_casting(self, xp, dst_dtype, src_dtype):\n if not numpy.can_cast(src_dtype, dst_dtype, casting=self.casting):\n dst = xp.empty(self.shape, dtype=dst_dtype)\n src = testing.shaped_random(self.shape, xp, src_dtype)\n xp.copyto(dst, src, casting=self.casting)\n else:\n raise DummyError()\n\n\[email protected](*(\n testing.product({\n 'pat_shapes': [\n # (src_shape, dst_shape)\n ((1,), (3,)),\n ((3, 1), (3, 4)),\n ((), (5, 6)),\n ((2, 4), (3, 2, 4)),\n ((1,), (3, 6, 9)),\n ((2, 1, 4), (5, 2, 4, 4)),\n ((4, 1, 1, 6), (4, 3, 7, 6)),\n ],\n })\n))\nclass TestCopyNdarrayBroadcast(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.for_orders('CF', name='dst_order')\n @testing.for_orders('CF', name='src_order')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_ndarray_broadcast(self, xp, dst_dtype, src_dtype,\n dst_order, src_order):\n if numpy.can_cast(src_dtype, dst_dtype):\n src_shape, dst_shape = self.pat_shapes\n dst = xp.empty(dst_shape, dtype=dst_dtype, order=dst_order)\n src = xp.asarray(\n testing.shaped_random(src_shape, xp, src_dtype), order=src_order)\n xp.copyto(dst, src)\n return dst\n else:\n return -1\n\n\[email protected](*(\n testing.product({\n 'pat_shapes': [\n # (src_shape, where_shape, dst_shape)\n # (src_shape, where_shape, dst_shape)\n ((), (1,), (3,)),\n ((1,), (1,), (3,)),\n ((3, 1), (3, 4), (3, 4)),\n ((2, 4), (1, 4), (2, 4)),\n ((), (3, 4), (2, 3, 4)),\n ((2, 4, 4), (4, 4), (2, 4, 4)),\n ((5, 2, 3, 4), (2, 1, 4), (5, 2, 3, 4)),\n ],\n })\n))\nclass TestCopyNdarrayBroadcastMasked(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.for_orders('CF', name='dst_order')\n @testing.for_orders('CF', name='src_order')\n @testing.for_orders('CF', name='where_order')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_ndarray_broadcast_masked(self, xp, dst_dtype, src_dtype,\n dst_order, src_order, where_order):\n if numpy.can_cast(src_dtype, dst_dtype):\n src_shape, where_shape, dst_shape = self.pat_shapes\n dst = xp.zeros(dst_shape, dtype=dst_dtype, order=dst_order)\n src = xp.asarray(\n testing.shaped_random(src_shape, xp, src_dtype), order=src_order)\n where = xp.asarray(\n testing.shaped_random(where_shape, xp, 'bool'), order=where_order)\n xp.copyto(dst, src, where=where)\n return dst\n else:\n return -1\n\n\nclass TestCopyOtherDst(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.for_orders('CF', name='dst_order')\n @testing.for_orders('CF', name='src_order')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_other_dst1(self, xp, src_dtype, dst_dtype, dst_order, src_order):\n if numpy.can_cast(src_dtype, dst_dtype):\n dst = xp.empty(\n (2, 4, 3), dtype=dst_dtype, order=dst_order).transpose(0, 2, 1)\n src = xp.asarray(\n testing.shaped_random((2, 3, 4), xp, src_dtype), order=src_order)\n xp.copyto(dst, src)\n return dst\n else:\n return -1\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.for_all_dtypes(name='src_dtype')\n @testing.for_orders('CF', name='dst_order')\n @testing.for_orders('CF', name='src_order')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_other_dst2(self, xp, src_dtype, dst_dtype, dst_order, src_order):\n if numpy.can_cast(src_dtype, dst_dtype):\n dst = xp.empty((2, 3, 5, 4), dtype=dst_dtype, order=dst_order)[:, :, 3, :]\n src = xp.asarray(\n testing.shaped_random((2, 3, 4), xp, src_dtype), order=src_order)\n xp.copyto(dst, src)\n return dst\n else:\n return -1\n\n\[email protected](*(\n testing.product({\n 'dst': [\n None,\n [[1, 2, 3], [4, 5, 6]],\n ((0.0, 0.1, 0.2), (1.0, 1.1, 1.2)),\n ]\n })\n))\nclass TestCopyIllegaldst1(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_raises()\n def test_copyto_illegal_dst1(self, xp):\n dst = self.dst\n src = xp.ones((2, 3))\n xp.copyto(dst, src)\n\n\nclass TestCopyIllegaldst2(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_raises()\n def test_copyto_illegal_dst2(self, xp):\n # make opposite ndarray\n if xp == numpy:\n dst = nlcpy.empty(2, 3)\n else: # xp == \"nlcpy\"\n dst = numpy.empty(2, 3)\n src = xp.ones((2, 3))\n xp.copyto(dst, src)\n\n\[email protected](*(\n testing.product({\n 'src': [\n # some array-like whose shape is (2, 3)\n ((0.0, 0.1, 0.2), (1.0, 1.1, 1.2)), # tuple (float)\n [[0, 1, 2], [11, 12, 13]], # list (int)\n [(0, 1j, 2j), (1, 1 + 1j, 1 + 2j)], # tuple in list (complex)\n numpy.arange(6).reshape(2, 3), # numpy.ndarray (float64)\n ],\n 'dst_shape': [\n (2, 3),\n (2, 2, 3) # broadcast\n ],\n 'casting': ['no', 'equiv', 'safe', 'same_kind', 'unsafe']\n })\n))\nclass TestCopyOtherSrc(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_other_src(self, xp, dst_dtype):\n src_dtype = numpy.asanyarray(self.src).dtype\n if numpy.can_cast(src_dtype, dst_dtype, casting=self.casting):\n dst = xp.empty(self.dst_shape, dtype=dst_dtype)\n xp.copyto(dst, self.src, casting=self.casting)\n return dst\n else:\n return -1\n\n @testing.with_requires('numpy>=1.10')\n @testing.for_all_dtypes(name='dst_dtype')\n @testing.numpy_nlcpy_raises()\n def test_copyto_other_src_fail_casting(self, xp, dst_dtype):\n src_dtype = numpy.asanyarray(self.src).dtype\n if not numpy.can_cast(src_dtype, dst_dtype, casting=self.casting):\n dst = xp.empty(self.dst_shape, dtype=dst_dtype)\n xp.copyto(dst, self.src, casting=self.casting)\n else:\n raise DummyError()\n\n\nclass TestCopyIllegalSrc(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_raises()\n def test_copyto_illegal_src1(self, xp):\n dst = xp.empty((2, 3))\n src = None\n xp.coptyto(dst, src, where=self.where)\n\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_raises()\n def test_copyto_illegal_src2(self, xp):\n dst = xp.empty((2, 3))\n src = xp.ones((2, 4)) # cann not broadcast\n xp.coptyto(dst, src, where=self.where)\n\n\[email protected](*(\n testing.product({\n 'where': [\n None,\n True,\n [False, True, False],\n ((1,), (0,),),\n numpy.ones((2, 3), dtype='bool'), # numpy.ndarray\n ]\n })\n))\nclass TestCopyOtherWhere(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_array_equal()\n def test_copyto_other_where(self, xp):\n dst = xp.zeros((2, 3))\n src = xp.ones((2, 3))\n where = self.where\n xp.copyto(dst, src, where=where)\n return dst\n\n\nclass TestCopyIllegalWhere(unittest.TestCase):\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_raises()\n def test_copyto_illegal_where1(self, xp):\n dst = xp.empty((2, 3))\n src = xp.ones((2, 3))\n where = xp.ones((2, 4), dtype=\"bool\") # cann not braodcast\n xp.copyto(dst, src, where=where)\n\n @testing.with_requires('numpy>=1.10')\n @testing.numpy_nlcpy_raises()\n def test_copyto_illegal_where2(self, xp):\n dst = xp.empty((2, 3))\n src = xp.ones((2, 3))\n where = xp.ones((2, 3), dtype=\"int64\") # dtype is not \"bool\"\n xp.copyto(dst, src, where=where)\n",
"import argparse\nimport subprocess\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\nimport numpy as np\nimport seaborn as sns\n\n\ndef gen_graph_bar(n, nx, ny, nz, gf_nb, gf_vp_naive, gf_vp_sca, label):\n index = np.arange(len(n))\n if label is 'xa':\n labels = [\"{}xa\".format(_n) for _n in n]\n elif label is 'xya':\n labels = [\"{}x{}ya\".format(_n, _n) for _n in n]\n elif label is 'xyza':\n labels = [\"{}x{}y{}za\".format(_n, _n, _n) for _n in n]\n else:\n raise NotImplementedError\n\n fig, ax = plt.subplots()\n \n plt.rcParams[\"font.size\"] = 12\n ax.xaxis.grid(ls=\"--\")\n ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=True))\n ax.ticklabel_format(style='sci',axis='x',scilimits=(0,0))\n\n bar_height = 0.25\n alpha = 0.8\n \n plt.title(\n 'Single Precision (NX={}, NY={}, NZ={})'\n .format(int(nx[0]), int(ny[0]), int(nz[0])))\n plt.xlabel('GFLOPS', fontsize=14)\n \n plt.barh(index + bar_height * 2, gf_nb, bar_height,\n alpha=alpha, label='Numba(CPU)', align='center', color='dodgerblue')\n \n plt.barh(index + bar_height * 1, gf_vp_naive, bar_height,\n alpha=alpha, label='NLCPy(naive)', align='center', color='pink')\n\n plt.barh(index + bar_height * 0, gf_vp_sca, bar_height,\n alpha=alpha, label='NLCPy(SCA)', align='center', color='red')\n\n plt.yticks(index + bar_height/2, labels)\n plt.tick_params(labelsize=14)\n ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=14)\n plt.savefig('result/perf-{}.png'.format(label), bbox_inches='tight', dpi=600)\n\n\n\nif __name__ == \"__main__\":\n labels = ['xa', 'xya', 'xyza']\n for l in labels:\n n = []\n nx = []\n ny = []\n nz = []\n gf_nb = []\n gf_vp_naive = []\n gf_vp_sca = []\n for i in range(1, 5):\n n.append(i)\n\n size = np.fromfile(\n 'result/size-{}{}.dat'.format(i, l))\n nx.append(size[0])\n ny.append(size[1])\n nz.append(size[2])\n \n gf_nb_tmp = np.fromfile(\n 'result/numba-{}{}.dat'.format(i, l))\n gf_nb.append(gf_nb_tmp[0])\n \n gf_vp_naive_tmp = np.fromfile(\n 'result/nlcpy_naive-{}{}.dat'.format(i, l))\n gf_vp_naive.append(gf_vp_naive_tmp[0])\n \n gf_vp_sca_tmp = np.fromfile(\n 'result/nlcpy_sca-{}{}.dat'.format(i, l))\n gf_vp_sca.append(gf_vp_sca_tmp[0])\n print(\"label:\", l)\n print(\"n:\", n)\n print(\"nx:\", nx)\n print(\"ny:\", ny)\n print(\"nz:\", nz)\n print(\"gf_nb:\", gf_nb)\n print(\"gf_vp_naive:\", gf_vp_naive)\n print(\"gf_vp_sca:\", gf_vp_sca)\n print()\n gen_graph_bar(\n n[::-1], nx[::-1], ny[::-1], nz[::-1],\n gf_nb[::-1], gf_vp_naive[::-1], gf_vp_sca[::-1], l)\n"
] |
[
[
"numpy.can_cast",
"numpy.arange",
"numpy.ones",
"numpy.asanyarray",
"numpy.empty"
],
[
"matplotlib.pyplot.barh",
"matplotlib.pyplot.subplots",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tick_params"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeffdaily/horovod
|
[
"49fd2ce76dec43cc2c3cafb983956beb298c8ef4"
] |
[
"horovod/spark/common/util.py"
] |
[
"# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\n\nimport horovod.spark.common._namedtuple_fix\n\nimport contextlib\n\nimport pyarrow as pa\nimport numpy as np\nimport pyspark.sql.functions as f\nfrom pyspark.ml.linalg import DenseVector, SparseVector, Vector, VectorUDT\nfrom pyspark.sql.types import ArrayType, BinaryType, BooleanType, FloatType, DoubleType, \\\n IntegerType, LongType, NullType, StringType\nfrom pyspark.sql.types import from_arrow_type\n\nfrom horovod.spark.common import cache, constants\n\n_training_cache = cache.TrainingDataCache()\n\n\ndef data_type_to_str(dtype):\n if dtype == VectorUDT:\n return 'Vector'\n elif dtype == IntegerType:\n return 'Int'\n elif dtype == StringType:\n return 'String'\n elif dtype == FloatType:\n return 'Float'\n elif dtype == BinaryType:\n return 'Binary'\n elif dtype == DoubleType:\n return 'Double'\n elif dtype == LongType:\n return 'Long'\n elif dtype == BooleanType:\n return 'Boolean'\n else:\n raise ValueError('Unrecognized data type: {}'.format(dtype))\n\n\ndef numpy_type_to_str(dtype):\n if dtype == np.int32:\n return 'Int'\n elif dtype == np.float32:\n return 'Float'\n elif dtype == np.uint8:\n return 'Binary'\n elif dtype == np.float64:\n return 'Double'\n elif dtype == np.int64:\n return 'Long'\n elif dtype == np.bool:\n return 'Boolean'\n else:\n raise ValueError('Cannot convert numpy data type to Spark string: {}'.format(dtype))\n\n\ndef spark_scalar_to_python_type(dtype):\n if dtype == IntegerType:\n return int\n elif dtype == StringType:\n return str\n elif dtype == FloatType:\n return float\n elif dtype == DoubleType:\n return float\n elif dtype == LongType:\n return int\n elif dtype == BooleanType:\n return bool\n elif dtype == BinaryType:\n return bytes\n else:\n raise ValueError('cannot convert Spark data Type {} to native python type'.format(dtype))\n\n\ndef pyarrow_to_spark_data_type(dtype):\n # PySpark will interpret list types as Arrays, but for ML applications we want to default to\n # treating these as DenseVectors.\n if pa.types.is_list(dtype):\n return DenseVector\n return type(from_arrow_type(dtype))\n\n\ndef data_type_to_numpy(dtype):\n if dtype == VectorUDT or dtype == SparseVector or dtype == DenseVector:\n return np.float64\n elif dtype == ArrayType:\n return np.float64\n elif dtype == IntegerType:\n return np.int32\n elif dtype == StringType:\n return np.uint8\n elif dtype == FloatType:\n return np.float32\n elif dtype == BinaryType:\n return np.uint8\n elif dtype == DoubleType:\n return np.float64\n elif dtype == LongType:\n return np.int64\n elif dtype == BooleanType:\n return np.bool\n else:\n raise ValueError('Unrecognized data type: {}'.format(dtype))\n\n\ndef check_shape_compatibility(metadata, feature_columns, label_columns,\n input_shapes=None, output_shapes=None):\n # Check for model and input type incompatibility. Columns must have the same size\n # (total number of elements) of the corresponding inputs.\n feature_count = len(feature_columns)\n if input_shapes is not None:\n if feature_count != len(input_shapes):\n raise ValueError('Feature column count {features} must equal '\n 'model inputs count {inputs}'\n .format(features=feature_count, inputs=len(input_shapes)))\n\n for idx, col, input_shape in zip(range(feature_count), feature_columns, input_shapes):\n col_size = metadata[col]['shape']\n if col_size is None:\n # When training directly on Parquet, we do not compute shape metadata\n continue\n\n input_size = abs(np.prod(input_shape))\n if col_size != input_size:\n raise ValueError(\n 'Feature column \\'{col}\\' with size {feature} must equal that of the '\n 'model input at index {idx} with size {input}'\n .format(col=col, feature=col_size, idx=idx, input=input_size))\n\n if output_shapes is not None:\n label_count = len(label_columns)\n if label_count != len(output_shapes):\n raise ValueError('Label column count {labels} must equal '\n 'model outputs count {outputs}'\n .format(labels=label_count, outputs=len(output_shapes)))\n\n for idx, col, output_shape in zip(range(label_count), label_columns, output_shapes):\n col_size = metadata[col]['shape']\n if col_size is None:\n # When training directly on Parquet, we do not compute shape metadata\n continue\n\n output_size = abs(np.prod(output_shape))\n if col_size != output_size:\n raise ValueError('Label column \\'{col}\\' with size {label} must equal that of the '\n 'model output at index {idx} with size {output}'\n .format(col=col, label=col_size, idx=idx, output=output_size))\n\n\ndef _get_col_info(df):\n \"\"\"\n Infer the type and shape of all the columns.\n\n NOTE: This function processes the entire DataFrame, and can therefore be very expensive to run.\n\n TODO(travis): Only run this if user sets compress_sparse param, otherwise convert all to Array.\n \"\"\"\n\n def get_meta(row):\n row_dict = row.asDict()\n row_schema = []\n for col_name, data_col in row_dict.items():\n dtype = type(data_col)\n if isinstance(data_col, DenseVector):\n # shape and size of dense vector are the same\n shape = size = data_col.array.shape[0]\n elif isinstance(data_col, SparseVector):\n # shape is the total size of vector\n shape = data_col.size\n # size is the number of nonzero elements in the sparse vector\n size = data_col.indices.shape[0]\n elif isinstance(data_col, list):\n shape = size = len(data_col)\n elif isinstance(data_col, type(None)):\n # Python 2.7 compat: NoneType is not pickleable\n # see: https://bugs.python.org/issue6477\n dtype = NullType\n shape = size = 1\n else:\n shape = size = 1\n row_schema.append((col_name, ({dtype}, {shape}, {size})))\n return row_schema\n\n def merge(x, y):\n x_dtypes, x_shapes, x_sizes = x\n y_dtypes, y_shapes, y_sizes = y\n dtypes = x_dtypes | y_dtypes\n shapes = x_shapes | y_shapes\n sizes = x_sizes | y_sizes\n return dtypes, {min(shapes), max(shapes)}, {min(sizes), max(sizes)}\n\n raw_col_info_list = df.rdd.flatMap(get_meta).reduceByKey(merge).collect()\n\n all_col_types = {}\n col_shapes = {}\n col_max_sizes = {}\n\n for col_info in raw_col_info_list:\n col_name, col_meta = col_info\n dtypes, shapes, sizes = col_meta\n\n all_col_types[col_name] = dtypes\n col_shapes[col_name] = shapes\n col_max_sizes[col_name] = sizes\n\n for col in df.schema.names:\n # All rows in every column must have the same shape\n shape_set = col_shapes[col]\n if len(shape_set) != 1:\n raise ValueError(\n 'Column {col} does not have uniform shape. '\n 'shape set: {shapes_set}'.format(col=col, shapes_set=shape_set))\n col_shapes[col] = shape_set.pop()\n\n # All rows in every column must have the same size unless they have SparseVectors\n sizes = col_max_sizes[col]\n if len(sizes) > 1 and not (SparseVector in all_col_types[col]):\n raise ValueError(\n 'Rows of column {col} have varying sizes. This is only allowed if datatype is '\n 'SparseVector or a mix of Sparse and DenseVector.'.format(col=col))\n col_max_sizes[col] = max(sizes)\n\n return all_col_types, col_shapes, col_max_sizes\n\n\ndef _get_metadata(df):\n \"\"\"\n Infer the type and shape of all the columns and determines if what intermediate format they\n need to be converted to in case they are a vector.\n\n Example return value:\n {\n 'col1': {\n 'dtype': <type 'float'>,\n 'intermediate_format': 'nochange',\n 'max_size': 1,\n 'shape': 1\n },\n 'col2': {\n 'dtype': <type 'float'>,\n 'intermediate_format': 'nochange',\n 'max_size': 1,\n 'shape': 1\n },\n 'col3': {\n 'dtype': <class 'pyspark.ml.linalg.SparseVector'>,\n 'intermediate_format': 'custom_sparse_format',\n 'max_size': 37,\n 'shape': 56\n }\n }\n \"\"\"\n all_col_types, col_shapes, col_max_sizes = _get_col_info(df)\n\n metadata = dict()\n for field in df.schema.fields:\n col = field.name\n col_types = all_col_types[col].copy()\n\n if DenseVector in col_types:\n # If a col has DenseVector type (whether it is mixed sparse and dense vector or just\n # DenseVector), convert all of the values to dense vector\n is_sparse_vector_only = False\n spark_data_type = DenseVector\n convert_to_target = constants.ARRAY\n elif SparseVector in col_types:\n # If a col has only sparse vectors, convert all the data into custom dense vectors\n is_sparse_vector_only = True\n spark_data_type = SparseVector\n convert_to_target = constants.CUSTOM_SPARSE\n else:\n is_sparse_vector_only = False\n spark_data_type = type(field.dataType)\n convert_to_target = constants.NOCHANGE\n\n # Explanation of the fields in metadata\n # dtype:\n #\n # spark_data_type:\n # The spark data type from dataframe schema: type(field.dataType). If column has\n # mixed SparseVector and DenseVector we categorize it as DenseVector.\n #\n # is_sparse_vector_only:\n # If all the rows in the column were sparse vectors.\n #\n # shape:\n # Determines the shape of the data in the spark dataframe. It is useful for sparse\n # vectors.\n #\n # intermediate_format:\n # Specifies if the column need to be converted to a different format so that\n # petastorm can read it. It can be one of ARRAY, CUSTOM_SPARSE, or NOCHANGE. It is\n # required because petastorm cannot read DenseVector and SparseVectors. We need to\n # identify these types and convert them to petastorm compatible type of array.\n\n metadata[col] = {'spark_data_type': spark_data_type,\n 'is_sparse_vector_only': is_sparse_vector_only,\n 'shape': col_shapes[col],\n 'intermediate_format': convert_to_target,\n 'max_size': col_max_sizes[col]}\n\n return metadata\n\n\ndef to_petastorm_fn(schema_cols, metadata):\n ARRAY = constants.ARRAY\n CUSTOM_SPARSE = constants.CUSTOM_SPARSE\n\n # Convert Spark Vectors into arrays so Petastorm can read them\n def to_petastorm(row):\n import numpy as np\n from pyspark import Row\n\n converted = {}\n for col in schema_cols:\n col_data = row[col]\n if isinstance(col_data, Vector):\n intermediate_format = metadata[col]['intermediate_format'] if metadata else ARRAY\n if intermediate_format == ARRAY:\n converted[col] = col_data.toArray().tolist()\n elif intermediate_format == CUSTOM_SPARSE:\n # Currently petastorm does not support reading pyspark sparse vector. We put\n # the indices and values into one array. when consuming the data, we re-create\n # the vector from this format.\n size = len(col_data.indices)\n padding_zeros = 2 * (metadata[col]['max_size'] - len(col_data.indices))\n\n converted[col] = np.concatenate(\n (np.array([size]), col_data.indices, col_data.values,\n np.zeros(padding_zeros))).tolist()\n\n if converted:\n row = row.asDict().copy()\n row.update(converted)\n return Row(**row)\n\n return to_petastorm\n\n\ndef _has_vector_column(df):\n for field in df.schema.fields:\n if isinstance(field.dataType, VectorUDT):\n return True\n return False\n\n\ndef _get_dataset_info(dataset, dataset_id, path):\n total_rows = 0\n total_byte_size = 0\n for piece in dataset.pieces:\n metadata = piece.get_metadata()\n total_rows += metadata.num_rows\n for row_group_index in range(metadata.num_row_groups):\n row_group = metadata.row_group(row_group_index)\n total_byte_size += row_group.total_byte_size\n\n if total_rows == 0:\n raise ValueError('No rows found in {} dataset: {}'.format(dataset_id, path))\n\n if total_byte_size == 0:\n raise ValueError('No data found in {} dataset: {}'.format(dataset_id, path))\n\n if total_rows > total_byte_size:\n raise ValueError('Found {} bytes in {} rows; {} dataset may be corrupted.'\n .format(total_byte_size, total_rows, dataset_id))\n\n return total_rows, total_byte_size\n\n\ndef get_simple_meta_from_parquet(store, label_columns, feature_columns, sample_weight_col, dataset_idx=None):\n train_data_path = store.get_train_data_path(dataset_idx)\n validation_data_path = store.get_val_data_path(dataset_idx)\n\n if not store.exists(train_data_path):\n raise ValueError(\"{} path does not exist in the store\".format(train_data_path))\n\n train_data = store.get_parquet_dataset(train_data_path)\n schema = train_data.schema.to_arrow_schema()\n train_rows, total_byte_size = _get_dataset_info(train_data, 'training', train_data_path)\n\n val_rows = 0\n if store.exists(validation_data_path):\n val_data = store.get_parquet_dataset(validation_data_path)\n val_rows, _ = _get_dataset_info(val_data, 'validation', validation_data_path)\n\n schema_cols = feature_columns + label_columns\n if sample_weight_col:\n schema_cols.append(sample_weight_col)\n\n metadata = {}\n for col in schema_cols:\n col_schema = schema.field_by_name(col)\n col_info = {\n 'spark_data_type': pyarrow_to_spark_data_type(col_schema.type),\n 'is_sparse_vector_only': False,\n 'shape': None, # Only used by SparseVector columns\n 'intermediate_format': constants.NOCHANGE,\n 'max_size': None # Only used by SparseVector columns\n }\n metadata[col] = col_info\n\n avg_row_size = total_byte_size / train_rows\n return train_rows, val_rows, metadata, avg_row_size\n\n\ndef _train_val_split(df, validation):\n train_df = df\n val_df = None\n validation_ratio = 0.0\n\n if isinstance(validation, float) and validation > 0:\n train_df, val_df = train_df.randomSplit([1.0 - validation, validation])\n validation_ratio = validation\n elif isinstance(validation, str):\n dtype = [field.dataType for field in df.schema.fields if field.name == validation][0]\n bool_dtype = isinstance(dtype, BooleanType)\n val_df = train_df.filter(\n f.col(validation) if bool_dtype else f.col(validation) > 0).drop(validation)\n train_df = train_df.filter(\n ~f.col(validation) if bool_dtype else f.col(validation) == 0).drop(validation)\n\n # Approximate ratio of validation data to training data for proportionate scale\n # of partitions\n timeout_ms = 1000\n confidence = 0.90\n train_rows = train_df.rdd.countApprox(timeout=timeout_ms, confidence=confidence)\n val_rows = val_df.rdd.countApprox(timeout=timeout_ms, confidence=confidence)\n validation_ratio = val_rows / (val_rows + train_rows)\n elif validation:\n raise ValueError('Unrecognized validation type: {}'.format(type(validation)))\n\n return train_df, val_df, validation_ratio\n\n\ndef _get_or_create_dataset(key, store, df, feature_columns, label_columns,\n validation, sample_weight_col, compress_sparse,\n num_partitions, num_processes, verbose):\n with _training_cache.lock:\n if _training_cache.is_cached(key, store):\n dataset_idx = _training_cache.get_dataset(key)\n train_rows, val_rows, metadata, avg_row_size = _training_cache.get_dataset_properties(dataset_idx)\n train_data_path = store.get_train_data_path(dataset_idx)\n val_data_path = store.get_val_data_path(dataset_idx)\n if verbose:\n print('using cached dataframes for key: {}'.format(key))\n print('train_data_path={}'.format(train_data_path))\n print('train_rows={}'.format(train_rows))\n print('val_data_path={}'.format(val_data_path))\n print('val_rows={}'.format(val_rows))\n else:\n dataset_idx = _training_cache.next_dataset_index(key)\n train_data_path = store.get_train_data_path(dataset_idx)\n val_data_path = store.get_val_data_path(dataset_idx)\n if verbose:\n print('writing dataframes')\n print('train_data_path={}'.format(train_data_path))\n print('val_data_path={}'.format(val_data_path))\n\n schema_cols = feature_columns + label_columns\n if sample_weight_col:\n schema_cols.append(sample_weight_col)\n if isinstance(validation, str):\n schema_cols.append(validation)\n df = df[schema_cols]\n\n metadata = None\n if _has_vector_column(df):\n if compress_sparse:\n metadata = _get_metadata(df)\n to_petastorm = to_petastorm_fn(schema_cols, metadata)\n df = df.rdd.map(to_petastorm).toDF()\n\n train_df, val_df, validation_ratio = _train_val_split(df, validation)\n\n train_partitions = max(int(num_partitions * (1.0 - validation_ratio)),\n num_processes)\n if verbose:\n print('train_partitions={}'.format(train_partitions))\n\n train_df \\\n .coalesce(train_partitions) \\\n .write \\\n .mode('overwrite') \\\n .parquet(train_data_path)\n\n if val_df:\n val_partitions = max(int(num_partitions * validation_ratio),\n num_processes)\n if verbose:\n print('val_partitions={}'.format(val_partitions))\n\n val_df \\\n .coalesce(val_partitions) \\\n .write \\\n .mode('overwrite') \\\n .parquet(val_data_path)\n\n train_rows, val_rows, pq_metadata, avg_row_size = get_simple_meta_from_parquet(\n store, label_columns, feature_columns, sample_weight_col, dataset_idx)\n\n if verbose:\n print('train_rows={}'.format(train_rows))\n if val_df:\n if val_rows == 0:\n raise ValueError(\n 'Validation DataFrame does not any samples with validation param {}'\n .format(validation))\n if verbose:\n print('val_rows={}'.format(val_rows))\n\n metadata = metadata or pq_metadata\n _training_cache.set_dataset_properties(\n dataset_idx, (train_rows, val_rows, metadata, avg_row_size))\n return dataset_idx\n\n\ndef check_validation(validation, df=None):\n if validation:\n if isinstance(validation, float):\n if validation < 0 or validation >= 1:\n raise ValueError('Validation split {} must be in the range: [0, 1)'\n .format(validation))\n elif isinstance(validation, str):\n if df is not None and validation not in df.columns:\n raise ValueError('Validation column {} does not exist in the DataFrame'\n .format(validation))\n else:\n raise ValueError('Param validation must be of type \"float\" or \"str\", found: {}'\n .format(type(validation)))\n\n\[email protected]\ndef prepare_data(num_processes, store, df, label_columns, feature_columns,\n validation=None, sample_weight_col=None, compress_sparse=False,\n partitions_per_process=10, verbose=0):\n check_validation(validation, df=df)\n if num_processes <= 0 or partitions_per_process <= 0:\n raise ValueError('num_proc={} and partitions_per_process={} must both be > 0'\n .format(num_processes, partitions_per_process))\n\n if not label_columns:\n raise ValueError('Parameter label_columns cannot be None or empty')\n\n num_partitions = num_processes * partitions_per_process\n if verbose:\n print('num_partitions={}'.format(num_partitions))\n\n for col in label_columns:\n if col not in df.columns:\n raise ValueError('Label column {} does not exist in the DataFrame'.format(col))\n\n if feature_columns is None:\n feature_columns = [col for col in df.columns if col not in set(label_columns)]\n else:\n for col in feature_columns:\n if col not in df.columns:\n raise ValueError('Feature column {} does not exist in the DataFrame'.format(col))\n\n key = _training_cache.create_key(df, store, validation)\n with _training_cache.use_key(key):\n dataset_idx = _get_or_create_dataset(key, store, df, feature_columns, label_columns,\n validation, sample_weight_col, compress_sparse,\n num_partitions, num_processes, verbose)\n yield dataset_idx\n\n\ndef get_dataset_properties(dataset_idx):\n return _training_cache.get_dataset_properties(dataset_idx)\n\n\ndef clear_training_cache():\n _training_cache.clear()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.prod"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ezhil-Language-Foundation/mannangatti
|
[
"2146274c730f00749b76123a7c839c6fb3111e81",
"2146274c730f00749b76123a7c839c6fb3111e81"
] |
[
"tfdemo.py",
"sample_image_dataset.py"
] |
[
"# Freely adapted from Aymeric Damien's code\n# This code is attributed to original author with modifications to load Tamil MNIST like data.\n\"\"\" Neural Network.\nA 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)\nimplementation with TensorFlow. This example is using the MNIST database\nof handwritten digits (http://yann.lecun.com/exdb/mnist/).\nLinks:\n [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n\n\"\"\"\n\nimport os\nimport numpy as np\n\nimport tensorflow as tf\n#from tensorflow import keras\n\n\ndef load_mnist_data(mode='train'):\n path = os.path.split(__file__)[0]\n labels_path = os.path.join(path,'data',mode+'-label-onehot.npy')\n images_path = os.path.join(path,'data',mode+'-image.npy')\n labels = np.load(labels_path)\n images = np.load(images_path)\n return labels,images\n\n# build model\nbatch_size = 128\nnum_classes = 13\nepochs = 12\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\nlabels,images = load_mnist_data('train')\ntest_labels,test_images = load_mnist_data('test')\n#images=images.squeeze()\noffset = 60000\nx_train = images[0:offset,:]\ny_train = labels[0:offset,:]\noffset=0\nx_test = test_images[offset:,:]\ny_test = test_labels[offset:,:]\n\nprint('deep dive!')\n\n# if K.image_data_format() == 'channels_first':\n# x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n# x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n# input_shape = (1, img_rows, img_cols)\n# else:\n# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n# input_shape = (img_rows, img_cols, 1)\ninput_shape=(img_rows*img_cols,)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# Parameters\nlearning_rate = 0.075\nnum_steps = 150\nbatch_size = 128\ndisplay_step = 10\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer number of neurons\nn_hidden_2 = 256 # 2nd layer number of neurons\nnum_input = 784 # MNIST data input (img shape: 28*28)\nnum_classes = 13 # MNIST total classes (0-9 digits)\n\n# tf Graph input\nX = tf.placeholder(\"float\", [None, num_input])\nY = tf.placeholder(\"float\", [None, num_classes])\n\n# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n}\n# Create model\ndef neural_net(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n# Construct model\nlogits = neural_net(X)\nprediction = tf.nn.softmax(logits)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\nfilename = os.path.abspath(__file__)\n#print(os.path.split(filename))\nbasedir = os.path.split(filename)[0]\nmodel_name = 'tamil_model_ckpt'\nmodel_path = os.path.join(basedir,'tamil_model_ckpt',model_name)\nexport_dir = os.path.join(basedir, 'exported' )\n# Start training\nwith tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n for step in range(1, num_steps+1):\n batch_x, batch_y = x_train[(step-1)*batch_size+1:step*batch_size,:],\\\n y_train[(step-1)*batch_size+1:step*batch_size,:]\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for MNIST test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: x_test,\n Y: y_test}))\n\n # save the model to disk\n saver= tf.train.Saver()\n saver.save(sess,model_path)\n",
"import numpy as np\nfrom matplotlib import pyplot as plt\nimport sys\nimport random\nimport PIL\nfrom PIL import Image\nfrom time import sleep\n\ndef process(npyfile):\n \"\"\" Build a 16x16 tiny squares in an image.\"\"\"\n data = np.load(npyfile)\n data = data.astype(np.uint8)\n assert data.shape[1] == 784\n img=Image.new('L',(16*28,16*28))\n for i in range(16):\n for j in range(16):\n img28=Image.new('L',(28,28))\n while True:\n row=random.choice(range(data.shape[0]))\n img_row=data[row,:].reshape(28,28)\n hasTopFilled=any(img_row[0,:])\n hasBotFilled=any(img_row[27,:])\n hasLeftFilled=any(img_row[:,0])\n hasRightFilled=any(img_row[:,27])\n if sum([hasBotFilled, hasTopFilled, hasLeftFilled, hasRightFilled]) < 1:\n break\n for l in range(28):\n for m in range(28):\n img28.putpixel((m,l),(img_row[l,m],))\n img.paste(img28,(i*28,j*28))\n img.show()\n sleep(5)\nif __name__ == \"__main__\":\n process(sys.argv[1])\n"
] |
[
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.nn.softmax",
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.load",
"tensorflow.argmax",
"tensorflow.random_normal"
],
[
"numpy.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RussJH/udacity
|
[
"d8d08544927cc03ae1782700e172e0fc34b4cd85"
] |
[
"Navigation/Model.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\" QNetwork model\"\"\"\n\n def __init__(self, state_size, action_size, seed=0):\n \"\"\"Constructor for QNetwork model to initialize states, actions and random seed\n Args:\n state_size: number of states\n action_size: number of actions\n seed: rng seed value\n \"\"\"\n\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, 64) # First Layer\n self.fc2 = nn.Linear(64, 64) # Second Layer\n self.fc3 = nn.Linear(64, action_size) # Third Layer\n\n def forward(self, state):\n \"\"\"Network of state to action values\n Args:\n state: state to map to an action\n Returns:\n mapped state to action values\n \"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)"
] |
[
[
"torch.nn.Linear",
"torch.manual_seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YHRen/NGFP
|
[
"b1d7912846557f53d4492f8adebec7778c989a74",
"b1d7912846557f53d4492f8adebec7778c989a74",
"b1d7912846557f53d4492f8adebec7778c989a74"
] |
[
"examples/evaluate_nfp_regression.py",
"generate_nfp.py",
"reproduce_main_results.py"
] |
[
"import torch\nimport argparse\nimport hashlib\nimport pandas as pd\nimport numpy as np\nimport itertools as its\nfrom tabulate import tabulate\nfrom torch.utils.data import DataLoader\nfrom functools import partial\nfrom tqdm import tqdm\nfrom pathlib import Path, PurePath\nfrom warnings import warn\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import AllChem\nfrom sklearn.metrics import (\n r2_score,\n mean_absolute_error as mae,\n mean_squared_error as mse\n)\nfrom scipy.stats import pearsonr\ntry:\n import NeuralGraph\nexcept:\n import sys\n sys.path.insert(1,str(PurePath(Path.cwd()).parent))\n sys.path.insert(1,str(PurePath(Path.cwd())))\nfrom NeuralGraph.dataset import MolData\nfrom NeuralGraph.util import dev\n\nBSZ = 32 # batch_size\nSHUFFLE_SIG = None # random shuffle signature\ndef split_train_valid_test(n, p=0.8, v=0.1, seed=None):\n global SHUFFLE_SIG\n if seed:\n np.random.seed(seed)\n idx = np.arange(n)\n np.random.shuffle(idx)\n s = int(n*p)\n t = int(n*v)\n m = hashlib.sha256()\n m.update(idx.tobytes())\n SHUFFLE_SIG = m.hexdigest()\n # train, valid, test\n return idx[:s], idx[s:(s+t)], idx[(s+t):]\n\n\ndef load_multiclass_csv(data_file, dem=\",\", target_name=None, sample=None):\n df = pd.read_csv(data_file, delimiter=dem)\n if \"name\" in df.columns: df = df.drop(columns=[\"name\"])\n if 'smiles' in df.columns:\n df = df.set_index('smiles')\n elif 'SMILES' in df.columns:\n df = df.set_index('SMILES')\n elif 'canonical_smiles' in df.columns:\n df = df.set_index('canonical_smiles')\n else:\n raise RuntimeError(\"No smile column detected\")\n return None\n if target_name:\n clms = [clm for clm in df.columns if clm.startswith(target_name)]\n else:\n clms = [clm for clm in df.columns]\n if len(clms) == 0:\n raise RunTimeError(f\"{target_name} not in the dataset\")\n return\n clms.sort()\n df = df[clms]\n df = df.apply(pd.to_numeric, errors='coerce')\n df = df.fillna(0) # otherwise conflicts with xuefeng's assignment\n df = df.apply(np.abs) # otherwise different from previous results.\n if sample is not None:\n df = df.sample(sample) if isinstance(sample,int) else df.sample(frac=sample)\n return df.index, df.values, df.columns\n\n\ndef try_load_net(model_file=None):\n model_file = Path(model_file)\n if model_file.exists() and model_file.is_file():\n net = torch.load(args.model, map_location=dev)\n else:\n raise FileNotFoundError\n return net.to(dev)\n\n\ndef normalize_array(A):\n mean, std = np.mean(A), np.std(A)\n def norm_func(X): return (X-mean) / std\n def restore_func(X): return X * std + mean\n return norm_func, restore_func\n\n\ndef is_valid_smile_for_NFP(sml, max_degree=6):\n \"\"\"\n NFP requires a valid smile string. \n \"\"\"\n try:\n mol = Chem.MolFromSmiles(sml)\n atoms = mol.GetAtoms()\n except:\n warn(f\"Not a valid SMILE: {sml}\")\n return False\n\n for atom in atoms:\n if atom.GetDegree() >= max_degree:\n warn(f\"larger than max degree {max_degree} {sml}\")\n return False\n return True\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This program assumes the canonical smile inputs:\n <three_letter_dataset_short_name>, <molecule_ID_name>, <smiles>\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\",\"--input_file\", help=\"choose the input csv file\",\n type=str, required=True)\n parser.add_argument(\"--split_seed\", type=int,\n help=\"random seed for splitting dataset\")\n parser.add_argument(\"--model\", help=\"choose the pretrained model file for nfp\\\n method.\", type=str, required=True)\n parser.add_argument(\"--target_name\", type=str,\n help=\"specify the column name\")\n parser.add_argument(\"--tqdm\", help=\"use tqdm progress bar\",\n action=\"store_true\")\n args = parser.parse_args()\n print(\"#\", args)\n\n INPUT = Path(args.input_file)\n if not INPUT.exists(): raise FileNotFoundError\n SMILES, TARGET, KEYS = load_multiclass_csv(INPUT,\n target_name=args.target_name)\n print(f\"column names {INPUT.stem} with {len(KEYS)} columns:\\\n {KEYS.tolist()}\")\n NCLASS = len(KEYS)\n print(f\"NCLASS: {NCLASS}\")\n net = try_load_net(args.model)\n train_idx, valid_idx, test_idx = \\\n split_train_valid_test(len(TARGET), seed=args.split_seed)\n print(f\"split_sig: {SHUFFLE_SIG}\")\n norm_func, restore_func = normalize_array(\n np.concatenate([TARGET[train_idx], TARGET[valid_idx]], axis=0))\n target = norm_func(TARGET)\n test_data = MolData(SMILES[test_idx], target[test_idx], use_tqdm=args.tqdm)\n test_loader = DataLoader(test_data, batch_size=BSZ, shuffle=False)\n score = net.predict(test_loader)\n gt = TARGET[test_idx]\n prd = restore_func(score)\n\n res_r2 = []\n res_cor = []\n res_mae = []\n res_mse = []\n if len(prd.shape) == 1: # for single class\n prd = np.expand_dims(prd, 1)\n for idx, k in enumerate(KEYS):\n print(f\"idx, k, {idx}, {k}, {prd.shape}, {gt.shape}\")\n gt_i, prd_i = gt[:, idx], prd[:, idx]\n res_r2.append(r2_score(gt_i, prd_i))\n res_cor.append(pearsonr(gt_i, prd_i)[0])\n res_mae.append(mae(gt_i, prd_i))\n res_mse.append(mse(gt_i, prd_i))\n \n output_df = pd.DataFrame.from_dict({\n \"target\": KEYS,\n \"r2_score\": res_r2,\n \"corr_coef\": res_cor,\n \"mae\": res_mae,\n \"mse\": res_mse})\n output_df.set_index(\"target\")\n table = tabulate(output_df, headers='keys', tablefmt='github',\n floatfmt=\".4f\")\n print(table)\n output_df.to_csv('./eval.csv', index=False, float_format=\"%.4f\")\n\n prd_df = pd.DataFrame.from_dict({k:prd[:,idx] for idx,k in enumerate(KEYS)})\n gt_df = pd.DataFrame.from_dict({k:gt[:,idx] for idx,k in enumerate(KEYS)})\n prd_df.to_csv('./predict.csv', index=False)\n gt_df.to_csv('./ground_truth.csv', index=False)\n",
"import torch\nimport argparse\nimport numpy as np\nimport itertools as its\nfrom functools import partial\nimport multiprocessing as mp\nfrom tqdm import tqdm\nfrom pathlib import Path, PurePath\nfrom warnings import warn\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import AllChem\nfrom NeuralGraph.model import QSAR\nfrom NeuralGraph.nfp import nfp_net\nfrom NeuralGraph.util import dev, enlarge_weights\n\ndef try_load_net(model_file=None):\n if model_file is not None:\n model_file = Path(model_file)\n if model_file.exists() and model_file.is_file():\n net = torch.load(args.model, map_location=dev)\n else:\n raise FileNotFoundError\n else: \n net = nfp_net(pretrained=True, protein=\"Mpro\", progress=True)\n if False: # random large weights\n net = QSAR(hid_dim=128, n_class=1, max_degree=6)\n enlarge_weights(net, -1e4, 1e4)\n return net.to(dev)\n\n\ndef canonical_line_parser(line, **kwargs):\n \"\"\"\n <three_letter_dataset_short_name>, <molecule_ID_name>, <smiles>\n \"\"\"\n data_name, mol_name, smiles = line.split(',')\n smiles = smiles.strip('\\n')\n return data_name, mol_name, smiles\n\n\ndef oscillator(period):\n x, y = 1, period\n def f():\n nonlocal x\n z = x==0\n x = (x+1)%y\n return z\n return f\n\ndef is_valid_smile_for_NFP(sml, max_degree=6):\n \"\"\"\n NFP requires a valid smile string. \n \"\"\"\n try:\n mol = Chem.MolFromSmiles(sml)\n atoms = mol.GetAtoms()\n except:\n warn(f\"Not a valid SMILE: {sml}\")\n return None\n\n for atom in atoms:\n if atom.GetDegree() >= max_degree:\n warn(f\"larger than max degree {max_degree} {sml}\")\n return None\n return mol\n\n\ndef get_file_name(line_id, CHUNK_SZ, OUTPUT):\n res = '-'.join((OUTPUT, str(line_id//CHUNK_SZ*CHUNK_SZ),\n str(line_id//CHUNK_SZ*CHUNK_SZ+CHUNK_SZ)))\n return res+\".csv\"\n\nif __name__ == \"__main__\":\n \"\"\"\n This program assumes the canonical smile inputs:\n <three_letter_dataset_short_name>, <molecule_ID_name>, <smiles>\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\",\"--input_file\", help=\"choose the input csv file\",\n type=str, required=True)\n parser.add_argument(\"-o\",\"--output_dir\", help=\"specify the output directory\",\n type=str, required=True)\n parser.add_argument(\"--model\", help=\"choose the pretrained model file for nfp\\\n method. If not specified, large random weights would\\\n be used\", type=str, required=False)\n\n parser.add_argument(\"-c\", \"--chunk_size\", help=\"output chunk size. \\\n default=1000000\", type=int, default=1000000)\n parser.add_argument(\"-b\", \"--batch_size\", help=\"batch size for processing \\\n through NFP\", type=int, default=32)\n parser.add_argument(\"-n\", \"--num_workers\", type=int, default=1,\n help=\"number of workers. default 1 core.\\\n 0 use all cores, \")\n parser.add_argument(\"--dataset_name\", help=\"specify the stem of output\\\n files\", type=str)\n parser.add_argument(\"--tqdm\", help=\"use tqdm progress bar\",\n action=\"store_true\")\n args = parser.parse_args()\n\n OUTPUT_DIR = Path(args.output_dir)\n INPUT = Path(args.input_file)\n OUTPUT = args.dataset_name\n CHUNK_SZ = args.chunk_size\n if not INPUT.exists(): raise FileNotFoundError\n if OUTPUT is None: OUTPUT = INPUT.stem\n if OUTPUT_DIR.exists() and OUTPUT_DIR.is_dir(): pass\n else:\n warn(f\"dir {str(OUTPUT_DIR)} does not exists.\")\n warn(f\"creating {str(OUTPUT_DIR)}...\")\n OUTPUT_DIR.mkdir(parents=True, exist_ok=True)\n MISSING_DIR = OUTPUT_DIR/\"missing\"\n MISSING_DIR.mkdir(exist_ok=True)\n\n worker_pool = None\n if args.num_workers == 0:\n worker_pool = mp.Pool(mp.cpu_count()//2)\n elif args.num_workers > 1:\n worker_pool = mp.Pool(args.num_workers)\n\n\n ds_names, mol_names, smls, fps = [], [], [], []\n cache, missings = [], []\n net = try_load_net(args.model)\n osc = oscillator(args.batch_size)\n \n with open(INPUT, 'r') as in_f:\n fp = tqdm(in_f) if args.tqdm else in_f \n last_line_id = 0\n for line_id, line in enumerate(fp):\n last_line_id = line_id\n if osc() and len(cache) > 0:\n # have enough strings in the batch, go through nfp.\n fps.append(net.calc_nfp(cache, worker_pool=worker_pool))\n smls.extend(cache)\n cache = []\n ds_name, mol_name, sml = canonical_line_parser(line)\n if is_valid_smile_for_NFP(sml, 6):\n ds_names.append(ds_name)\n mol_names.append(mol_name)\n cache.append(sml)\n else:\n missings.append(line)\n\n if (line_id+1)%CHUNK_SZ == 0:\n #output to file. for the rest in the cache\n if len(cache) > 0:\n fps.append(net.calc_nfp(cache))\n smls.extend(cache)\n cache = []\n\n #output file\n filename = get_file_name(line_id, CHUNK_SZ, OUTPUT)\n print(\"filename\", filename)\n with open(OUTPUT_DIR/filename, 'w') as fw:\n fps = np.concatenate(fps)\n for d_, m_, s_, f_ in zip(ds_names, mol_names, smls, fps):\n fp_ = ':'.join(\"{:.7f}\".format(x) for x in f_)\n fw.write(f\"{d_},{m_},{s_},{fp_}\\n\")\n with open(MISSING_DIR/filename, 'w') as fw:\n for ms_ in missings:\n fw.write(ms_)\n ds_names, mol_names, smls, fps, missings = [], [], [], [], []\n\n\n\n #for the rest of lines\n if (last_line_id+1)%CHUNK_SZ != 0:\n if last_line_id > CHUNK_SZ:\n filename = get_file_name(last_line_id, CHUNK_SZ, OUTPUT)\n else: # small dataset \n filename = OUTPUT+\".csv\"\n print(\"last filename:\", filename)\n if len(cache) > 0:\n fps.append(net.calc_nfp(cache))\n smls.extend(cache)\n cache = []\n with open(OUTPUT_DIR/filename, 'w') as fw:\n fps = np.concatenate(fps)\n for d_, m_, s_, f_ in zip(ds_names, mol_names, smls, fps):\n fp_ = ':'.join(\"{:.7f}\".format(x) for x in f_)\n fw.write(f\"{d_},{m_},{s_},{fp_}\\n\")\n with open(MISSING_DIR/filename, 'w') as fw:\n for ms_ in missings:\n fw.write(ms_)\n if worker_pool:\n worker_pool.close()\n worker_pool.join()\n",
"from pathlib import Path\nfrom torch.utils.data import DataLoader, Subset\nfrom NeuralGraph.dataset import MolData, SmileData\nfrom NeuralGraph.model import QSAR, MLP\nimport torch.nn as nn\nimport pandas as pd\nimport numpy as np\nimport argparse\n\nFP_METHODS = [\"morgan\", \"nfp\"]\nEXP_NAMES = [\"solubility\", \"drug_efficacy\", \"photovoltaic\"]\nFP_LEN = 1<<9 # fingerprint length for circular FP\n\n\ndef split_train_valid_test(n, p=0.8, v=0.1, seed=None):\n if seed:\n np.random.seed(seed)\n idx = np.arange(n)\n np.random.shuffle(idx)\n s = int(n*p)\n t = int(n*v)\n # train, valid, test\n return idx[:s], idx[s:(s+t)], idx[(s+t):]\n\n\ndef normalize_array(A):\n mean, std = np.mean(A), np.std(A)\n def norm_func(X): return (X-mean) / std\n def restore_func(X): return X * std + mean\n return norm_func, restore_func\n\n\ndef load_csv(data_file, target_name):\n df = pd.read_csv(data_file)\n return df['smiles'], df[target_name].values\n\n\ndef mse(x, y):\n return ((x-y)**2).mean()\n\n\ndef main(args):\n BSZ, RUNS, LR, N_EPOCH = args.batch_size, args.runs, args.lr, args.epochs\n OUTPUT, SMILES, TARGET = [None]*3\n if args.experiment == EXP_NAMES[0]:\n OUTPUT = './output/best_delaney.pkl'\n DATAFILE = Path('./dataset/solubility/delaney-processed.csv')\n TGT_COL_NAME = 'measured log solubility in mols per litre'\n SMILES, TARGET = load_csv(DATAFILE, TGT_COL_NAME)\n elif args.experiment == EXP_NAMES[1]:\n OUTPUT = './output/best_efficacy.pkl'\n DATAFILE = Path('./dataset/drug_efficacy/malaria-processed.csv')\n TGT_COL_NAME = 'activity'\n SMILES, TARGET = load_csv(DATAFILE, TGT_COL_NAME)\n elif args.experiment == EXP_NAMES[2]:\n OUTPUT = './output/best_photovoltaic.pkl'\n DATAFILE = Path('./dataset/photovoltaic_efficiency/cep-processed.csv')\n TGT_COL_NAME = 'PCE'\n SMILES, TARGET = load_csv(DATAFILE, TGT_COL_NAME)\n else:\n raise NotImplementedError\n\n def build_data_net(args, target):\n if args.fp_method == FP_METHODS[0]:\n #\"\"\" CFP \"\"\"\n data = SmileData(SMILES, target, fp_len=FP_LEN, radius=4)\n net = lambda : MLP(hid_dim=FP_LEN, n_class=1)\n return data, net\n elif args.fp_method == FP_METHODS[1]: \n #\"\"\" NFP \"\"\"\n net = lambda : QSAR(hid_dim=128, n_class=1)\n data = MolData(SMILES, target)\n return data, net\n else:\n raise NotImplementedError\n\n res = []\n for _ in range(RUNS):\n train_idx, valid_idx, test_idx = split_train_valid_test(len(TARGET),\n seed=None)\n norm_func, restore_func = normalize_array(\n np.concatenate([TARGET[train_idx], TARGET[valid_idx]], axis=0))\n target = norm_func(TARGET)\n data, net = build_data_net(args, target)\n train_loader = DataLoader(Subset(data, train_idx), batch_size=BSZ,\n shuffle=True, drop_last=True)\n valid_loader = DataLoader(Subset(data, valid_idx), batch_size=BSZ,\n shuffle=False)\n test_loader = DataLoader(Subset(data, test_idx), batch_size=BSZ,\n shuffle=False)\n net = net()\n net = net.fit(train_loader, valid_loader, epochs=N_EPOCH, path=OUTPUT,\n criterion=nn.MSELoss(), lr=LR)\n score = net.predict(test_loader)\n gt = restore_func(target[test_idx])\n prd = restore_func(score)\n res.append(mse(gt, prd))\n print(mse(gt,prd))\n\n avg_mse, std_mse = np.asarray(res).mean(), np.asarray(res).std()\n return avg_mse, std_mse\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"experiment\", default=\"solubility\", type=str,\n help=\"Specify the experiment name\",\n choices=EXP_NAMES)\n parser.add_argument(\"fp_method\", default=\"nfp\", type=str,\n help=\"Specify the fingerprint method\",\n choices=FP_METHODS)\n parser.add_argument(\"-b\", \"--batch-size\", help=\"batch size\",\n default=64, type=int)\n parser.add_argument(\"-e\", \"--epochs\", help=\"number of epochs\",\n default=500, type=int)\n parser.add_argument(\"-r\", \"--runs\", help=\"number of runs\",\n default=5, type=int)\n parser.add_argument(\"-l\", \"--lr\", help=\"learning rate\",\n default=1e-3, type=float)\n parsed_args = parser.parse_args()\n print(main(parsed_args))\n"
] |
[
[
"pandas.read_csv",
"numpy.expand_dims",
"sklearn.metrics.r2_score",
"numpy.random.seed",
"torch.load",
"numpy.arange",
"sklearn.metrics.mean_absolute_error",
"scipy.stats.pearsonr",
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"sklearn.metrics.mean_squared_error",
"numpy.concatenate",
"numpy.std",
"numpy.mean",
"pandas.DataFrame.from_dict"
],
[
"numpy.concatenate",
"torch.load"
],
[
"pandas.read_csv",
"numpy.random.seed",
"numpy.asarray",
"numpy.arange",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.std",
"numpy.mean",
"torch.utils.data.Subset",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Kaminyou/110-1-NTU-DBME5028
|
[
"5aaef62cb5a3be4cbba28c2d252964a614183132",
"5aaef62cb5a3be4cbba28c2d252964a614183132"
] |
[
"week5-machine_learning/scripts/run_tune_example.py",
"week5-machine_learning/src/utils.py"
] |
[
"\"\"\"\npython ./scripts/run_tune_example.py\n\n# To see results\nfrom ray.tune import Analysis\nanalysis = Analysis(PATH_TO_EXP_DIR)\ndf = analysis.trial_dataframes\n\n\"\"\"\nimport sys\nimport os\nimport numpy as np\nfrom random import shuffle\nfrom collections import deque\nfrom dataclasses import dataclass, asdict\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom ray import tune\n\nsys.path.append(\".\")\nfrom src.utils import load_and_process_digits\nfrom src.models import LogisticRegressionTorch\n\n\ndef simple_loader(inputs, targets, batch_size=128, shuffle_per_iteration=20):\n index = 0\n while True:\n indexes_get = np.arange(index * batch_size, (index + 1) * batch_size) % len(inputs)\n x_ = np.take(inputs, indexes_get, axis=0)\n y_ = np.take(targets, indexes_get, axis=0)\n\n index += 1\n if index % shuffle_per_iteration == 0:\n full_index = np.arange(len(x_))\n shuffle(full_index)\n inputs = np.take(inputs, full_index, axis=0)\n targets = np.take(targets, full_index, axis=0)\n yield x_, y_\n\ndef train_digits(config: dict):\n x_train, y_train, x_valid, y_valid, x_test, y_test = load_and_process_digits()\n train_loader = simple_loader(x_train, y_train, batch_size=config[\"batch_size\"])\n\n model = LogisticRegressionTorch(input_dim=x_train.shape[-1], output_dim=10)\n\n optimizer = optim.SGD(model.parameters(), lr=config[\"learning_rate\"])\n loss_fn = nn.CrossEntropyLoss()\n\n train_losses, valid_losses = [], []\n bst_loss = 1e+4\n patient_counter = 0\n for i_epoch in range(config[\"num_epochs\"]):\n loss_record = deque(maxlen=100)\n for _ in range(len(x_train) // config[\"batch_size\"]):\n x, y = next(train_loader)\n\n logits = model(torch.from_numpy(x))\n loss_train = loss_fn(logits, torch.from_numpy(y))\n\n ### Do regularization\n if config[\"l1_alpha\"] > 0:\n l1_term = torch.tensor(0.)\n for model_params in model.parameters():\n reg = torch.abs(model_params).sum()\n l1_term += reg\n loss_train = loss_train + config[\"l1_alpha\"] * l1_term\n\n if config[\"l2_alpha\"] > 0:\n l2_term = torch.tensor(0.)\n for model_params in model.parameters():\n reg = torch.norm(model_params)\n l2_term += reg\n loss_train = loss_train + config[\"l2_alpha\"] * l2_term\n\n optimizer.zero_grad()\n loss_train.backward()\n optimizer.step()\n loss_record.append(loss_train.detach().cpu().numpy())\n\n with torch.no_grad():\n yp_logits = model(torch.from_numpy(x_valid))\n loss_valid = loss_fn(yp_logits, torch.from_numpy(y_valid))\n loss_valid = loss_valid.detach().cpu().numpy()\n\n print(\"Epoch: {}/{}, Training Loss: {:.3f}, Validation Loss: {:.3f}\".format(\n str(i_epoch + 1).zfill(4),\n config[\"num_epochs\"],\n np.mean(loss_record),\n loss_valid\n ), flush=True, end=\"\\r\")\n train_losses.append(np.mean(loss_record))\n valid_losses.append(loss_valid)\n\n tune.report(validation_loss=loss_valid) # validation_loss can be keywords you want\n\n ### Do earlystopping\n if patient_counter >= config[\"n_earlystopping_rounds\"]:\n return model, train_losses, valid_losses\n\n if loss_valid < bst_loss:\n bst_loss = loss_valid\n patient_counter = 0\n else:\n patient_counter += 1\n\n return model, train_losses, valid_losses\n\n\n@dataclass\nclass TrainConfig:\n batch_size: int\n learning_rate: float\n num_epochs: int = 500\n l1_alpha: float = 0.\n l2_alpha: float = 0.\n n_earlystopping_rounds: int = 1e+8\n\n def to_dict(self):\n return asdict(self)\n\n\nif __name__ == \"__main__\":\n # Force use CPU\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n train_config = TrainConfig(\n batch_size=tune.choice([64, 128]),\n learning_rate=tune.grid_search([0.5, 1, 1.5]),\n num_epochs=1000,\n l1_alpha=tune.grid_search([0, 0.001, 0.01]),\n l2_alpha=tune.grid_search([0, 0.001, 0.01]),\n # n_earlystopping_rounds\n )\n\n analysis = tune.run(\n train_digits,\n config=train_config.to_dict(),\n num_samples=3,\n progress_reporter=tune.CLIReporter(max_error_rows=20)\n ) # Total num_trials = num_samples**tunable_params\n",
"\"\"\"utils.py\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import (\n roc_curve,\n auc,\n f1_score,\n confusion_matrix,\n recall_score,\n precision_score\n)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_digits\n\nimport h5py\n\n\ndef load_h5df(filepath):\n with h5py.File(filepath, \"r\") as f:\n key = list(f.keys())[0]\n data = list(f.get(key))\n return data\n\n\ndef load_image_targets(input_path, target_path):\n inputs = load_h5df(input_path) # list of array\n targets = load_h5df(target_path) # list of array\n\n return np.array(inputs), np.concatenate(targets).ravel().astype(np.float32)\n\n\ndef show_example_images(image_array, figsize=(8, 8), n_grid_x=10):\n n_images = len(image_array)\n n_grid_y = (n_images // n_grid_x)\n\n fig = plt.figure(figsize=figsize)\n for counter, img in enumerate(image_array):\n if (counter >= n_grid_x * n_grid_y):\n continue\n fig.add_subplot(n_grid_y, n_grid_x, counter + 1)\n plt.imshow(img)\n plt.axis('off')\n plt.tight_layout()\n plt.show()\n\n\ndef get_result_metrics(y_true, y_score, score_threshold=0.5):\n # For binary cases\n y_pred_binary = y_score >= score_threshold\n\n fpr, tpr, _ = roc_curve(y_true=y_true, y_score=y_score)\n auc_score = auc(fpr, tpr)\n\n f1sc = f1_score(y_true=y_true, y_pred=y_pred_binary)\n cm_ = confusion_matrix(y_true=y_true, y_pred=y_pred_binary)\n recall_ = recall_score(y_true=y_true, y_pred=y_pred_binary)\n precision_ = precision_score(y_true=y_true, y_pred=y_pred_binary)\n\n output = {\n \"fpr\": fpr,\n \"tpr\": tpr,\n \"auc\": auc_score,\n \"f1_score\": f1sc,\n \"confusion_matrix\": cm_,\n \"recall\": recall_,\n \"precision\": precision_\n }\n return output\n\n\ndef load_and_process_digits():\n digits_data = load_digits()\n x, y = digits_data[\"images\"], digits_data[\"target\"]\n x = x.reshape((len(x), -1)) # convert to vector\n x = np.array(x, dtype=np.float32) / 255. # do min/max normalization\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)\n x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=42)\n\n return x_train, y_train, x_valid, y_valid, x_test, y_test\n"
] |
[
[
"torch.abs",
"torch.nn.CrossEntropyLoss",
"torch.norm",
"numpy.take",
"numpy.arange",
"torch.from_numpy",
"torch.tensor",
"numpy.mean",
"torch.no_grad"
],
[
"numpy.array",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.roc_curve",
"sklearn.model_selection.train_test_split",
"numpy.concatenate",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.axis",
"sklearn.metrics.auc",
"sklearn.metrics.f1_score",
"matplotlib.pyplot.show",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nidhidamodaran/katib
|
[
"57a20ecfe679c661264cd9e28583c671512ab054"
] |
[
"pkg/manager/v1alpha1/file-metricscollector/tf-event/tfevent_loader.py"
] |
[
"import tensorflow as tf\nimport os\nfrom datetime import datetime\nimport rfc3339\nimport grpc\nimport api_pb2\nimport api_pb2_grpc\nimport sys\nfrom logging import getLogger, StreamHandler, INFO\nclass TFEventFileParser:\n def find_all_files(self, directory):\n for root, dirs, files in tf.gfile.Walk(directory):\n yield root\n for f in files:\n yield os.path.join(root, f)\n# TFEventFileParser parse tfevent and get metrics you specified.\n# When the event file under a directory(e.g. test dir), please specify \"{{dirname}}/{{metrics name}}\"\n# For example, in the TensorFlow official tutorial for mnist with summary (https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py), it will save \"accracy\" metrics under \"train\" dir and \"test\" dir.\n# Then in Katib, please specify name of metrics \"train/accuracy\" and \"test/accuracy\".\n\n def parse_summary(self, tfefile, metrics):\n metrics_log = {}\n for m in metrics:\n metrics_log[m] = api_pb2.MetricsLog(name=m,values=[])\n for summary in tf.train.summary_iterator(tfefile):\n paths=tfefile.split(\"/\")\n for v in summary.summary.value:\n for m in metrics:\n tag = str(v.tag)\n if len(paths) >= 2 and len(m.split(\"/\")) >= 2:\n tag = str(paths[-2]+\"/\"+v.tag)\n if tag.startswith(m):\n mv = metrics_log[m].values.add()\n mv.time=rfc3339.rfc3339(datetime.fromtimestamp(summary.wall_time))\n mv.value=str(v.simple_value)\n return metrics_log\n\nclass MetricsCollector:\n def __init__(self, manager_addr, manager_port, study_id, worker_id):\n self.logger = getLogger(__name__)\n handler = StreamHandler()\n handler.setLevel(INFO)\n self.logger.setLevel(INFO)\n self.logger.addHandler(handler)\n self.logger.propagate = False\n self.manager_addr = manager_addr\n self.study_id = study_id\n self.worker_id = worker_id\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, manager_port)\n\n with api_pb2.beta_create_Manager_stub(channel) as client:\n gsrep = client.GetStudy(api_pb2.GetStudyRequest(study_id=study_id), 10)\n self.metrics = gsrep.study_config.metrics\n self.parser = TFEventFileParser()\n\n def parse_file(self, directory):\n mls = []\n for f in self.parser.find_all_files(directory):\n if tf.gfile.IsDirectory(f):\n continue\n try:\n self.logger.info(f+\" will be parsed.\")\n ml = self.parser.parse_summary(f, self.metrics)\n for m in ml:\n mls.append(ml[m])\n except:\n self.logger.warning(\"Unexpected error:\"+ str(sys.exc_info()[0]))\n continue\n return mls\n"
] |
[
[
"tensorflow.gfile.Walk",
"tensorflow.gfile.IsDirectory",
"tensorflow.train.summary_iterator"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Krishna00111/Stock-Price-Prediction
|
[
"4db6af067013ba1980c4664536cb5108f945f4c8"
] |
[
"scripts/Algorithms/svm.py"
] |
[
"#! /usr/bin/python\r\n'''\r\n Running Support Vector Regression Model.\r\n'''\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport sys\r\nimport pandas as pd\r\nfrom sklearn.svm import SVR\r\nfrom sklearn import cross_validation\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nfrom sklearn.cross_validation import train_test_split\r\n\r\ndef convert_to_integer(dt_time):\r\n return 10000*dt_time.year + 1000*dt_time.month + dt_time.day\r\n\r\n\r\ndef preprocess(file_dataframe, cols=['date', 'open']):\r\n \r\n if 'date' in cols:\r\n file_dataframe['date'].applymap(convert_to_integer)\r\n\r\n X = file_dataframe['open']\r\n y = file_dataframe['date']\r\n\r\n return X, y\r\n\r\n\r\ndef svm(file_dataframe, test_size=0.2, cols=['date', 'open']):\r\n '''\r\n Run Logistic Regression\r\n '''\r\n\r\n print('Loading data...')\r\n\r\n if 'date' in file_dataframe:\r\n file_dataframe['new_col'] = pd.to_datetime(file_dataframe['date']).astype(datetime)\r\n #file_dataframe['date'] = pd.to_datetime(file_dataframe['date'])\r\n file_dataframe['new_col'].apply(lambda dt_time:10000*dt_time.year + 1000*dt_time.month + dt_time.day).astype(int)\r\n\r\n print(file_dataframe['new_col'])\r\n\r\n X = file_dataframe['open']\r\n y = file_dataframe['new_col']\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\r\n\r\n #svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)\r\n svr_lin = SVR(kernel='linear', C=1e3)\r\n #svr_poly = SVR(kernel='poly', C=1e3, degree=2)\r\n\r\n #parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}\r\n\r\n #loo = cross_validation.LeaveOneOut(len(y_train) - 1)\r\n #clf = grid_search.GridSearchCV(svr_rbf, parameters)\r\n scores = []\r\n\r\n #svr_rbf.fit(X_train, y_train)\r\n svr_lin.fit(X_train, y_train)\r\n #svr_poly.fit(X_train, y_train)\r\n\r\n #scores.append(cross_validation.cross_val_score(svr_rbf, \\\r\n # X_test, y_test, scoring='mean_squared_error', cv=loo).mean())\r\n scores.append(cross_validation.cross_val_score(svr_lin, \\\r\n X_test, y_test, scoring='mean_squared_error', cv=loo).mean())\r\n #scores.append(cross_validation.cross_val_score(svr_poly, \\\r\n # X_test, y_test, scoring='mean_squared_error', cv=loo).mean())\r\n \r\n return scores\r\n\r\ndef main(dir_path):\r\n '''\r\n Run Pipeline of processes on file one by one.\r\n '''\r\n files = os.listdir(dir_path)\r\n\r\n for file_name in files:\r\n print(file_name)\r\n\r\n file_dataframe = pd.read_csv(os.path.join(dir_path, file_name), parse_dates=[1])\r\n\r\n print(svm(file_dataframe, 0.2, 'high'))\r\n\r\n break\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1])\r\n"
] |
[
[
"sklearn.svm.SVR",
"sklearn.cross_validation.cross_val_score",
"sklearn.cross_validation.train_test_split",
"pandas.to_datetime"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cjshearer/project-athena
|
[
"3394da6cd6dbe1c1c2b84f0a2f58c5168c4c6775",
"3394da6cd6dbe1c1c2b84f0a2f58c5168c4c6775"
] |
[
"src/scripts/zhymir_scripts/task2_load_from_data.py",
"src/scripts/zhymir_scripts/train_model.py"
] |
[
"\"\"\" Loads raw prediction data from npy files\n to train model \"\"\"\nimport os\nimport keras\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom scripts.zhymir_scripts.task2 import make_ensemble, add_checkpoint, train_model\nfrom utils.file import load_from_json\n\nmodel_config = load_from_json('../../configs/task2/cody_configs/model-mnist.json')\ndata_config = load_from_json('../../configs/task2/cody_configs/data-mnist.json')\nWD_config = load_from_json('../../configs/task2/cody_configs/athena-mnist.json')\nfilepath = os.path.join('../../../Task2/models', 'zhymir_model_batch_size_10_corrected.h5')\ndata_path_root = '../../../Task2/data'\nlabels = np.load(os.path.join(data_config.get('dir'), data_config.get('label_file')))\ndata = np.load(os.path.join(data_path_root, 'arr_0.npy'))\ndata = np.transpose(data, (0, 2, 1, 3))\n# for i in range(19):\n# print(data[i].shape)\n# wds = make_ensemble(wd_config=WD_config, model_config=model_config)\n# batch_size = 10\n# model = keras.models.Sequential([\n# keras.layers.InputLayer(input_shape=(wds._nb_classifiers, 10), name='WD_layer'),\n# keras.layers.Flatten(),\n# keras.layers.Dense(units=100, activation='relu', name='D1'),\n# keras.layers.Dense(10, name='output_layer', activation='softmax')\n# ])\n# model.compile('adam', 'categorical_crossentropy')\ntrain_x, test_x, train_y, test_y = train_test_split(data[0], labels, test_size=0.2)\n# print(train_x.shape)\ntotal_train_x, total_test_x = train_x, test_x\ntotal_train_y, total_test_y = train_y, test_y\n# print(total_train_x.shape)\n# model.fit(train_x, train_y, epochs=10, batch_size=10, validation_split=0.1)\n# call_back = []\n# add_checkpoint(filepath_p=filepath, callback_list=call_back)\nfor idx in range(len(data_config.get('ae_files'))):\n train_x, test_x, train_y, test_y = train_test_split(data[idx], labels, test_size=0.2)\n total_train_x = np.concatenate((total_train_x, train_x))\n # print(total_train_x.shape)\n # exit()\n total_test_x = np.concatenate((total_test_x, test_x))\n total_train_y = np.concatenate((total_train_y, train_y))\n total_test_y = np.concatenate((total_test_y, test_y))\n # model.fit(train_x, train_y, callbacks=call_back, epochs=10, batch_size=batch_size)\n # add_checkpoint(filepath_p=filepath, callback_list=call_back)\nnp.savez_compressed('../../../Task2/data/train_test', train_data=total_train_x, test_data=total_test_x,\n train_labels=total_train_y, test_labels=total_test_y)\n# model.save(filepath)\n",
"import os\nimport keras\nimport numpy as np\n\nfrom utils.file import dump_to_json\n\ndef train_model(data, labels, model_p, save=False, filename=None, save_history=False, h_filename=None):\n model_history = model_p.fit(data, labels, batch_size=10)\n if save and filename:\n model_p.save(filename)\n if save_history and h_filename:\n dump_to_json(model_history.history, h_filename)\n\nif __name__ == '__main__':\n train_data = np.load('../../../Task2/data/train_test/train_data.npy')\n train_labels = np.load('../../../Task2/data/train_test/train_labels.npy')\n print(train_data.shape)\n print(train_labels.shape)\n # exit()\n model_root = '../../../Task2/models'\n history_root = '../../../Task2/data'\n filepath = os.path.join(model_root, 'zhymir_model_2_layer.h5')\n filepath2 = os.path.join(model_root, 'zhymir_model_4_layer.h5')\n filepath3 = os.path.join(model_root, 'zhymir_model_batch_8_4_layer.h5')\n history_filename = os.path.join(history_root, 'zhymir_model_2_layer_history')\n history_filename2 = os.path.join(history_root, 'zhymir_model_4_layer_history')\n history_filename3 = os.path.join(history_root, 'zhymir_model_batch_8_4_layer_history')\n\n batch_size = 10\n num_classifiers = 16\n model = keras.models.Sequential([\n keras.layers.Dense(units=100, input_shape=(num_classifiers, 10), activation='relu', name='D1'),\n keras.layers.Flatten(),\n keras.layers.Dense(10, name='output_layer', activation='softmax')\n ])\n model.compile('adam', 'categorical_crossentropy', metrics=[keras.metrics.CategoricalAccuracy(dtype='float64')])\n # train_model(train_data, train_labels, model, True, 'temp', True, 'h_filename ')\n # exit()\n model2 = keras.models.Sequential([\n keras.layers.Dense(units=32,input_shape=(16, 10), name='D1', activation='relu'),\n keras.layers.Flatten(),\n keras.layers.Dense(units=100, activation='relu', name='D2'),\n keras.layers.Dense(units=50, activation='relu', name='D3'),\n keras.layers.Dense(10, name='output_layer', activation='softmax')\n ])\n model2.compile('adam', 'categorical_crossentropy', metrics=[keras.metrics.CategoricalAccuracy(dtype='float64')])\n model3 = keras.models.Sequential([\n keras.layers.Dense(units=32, input_shape=(16, 10), name='D1', activation='relu'),\n keras.layers.Flatten(),\n keras.layers.Dense(units=100, activation='relu', name='D2'),\n keras.layers.Dense(units=50, activation='relu', name='D3'),\n keras.layers.Dense(10, name='output_layer', activation='softmax')\n ])\n model3.compile('adam', 'categorical_crossentropy', metrics=[keras.metrics.CategoricalAccuracy(dtype='float64')])\n history = model.fit(train_data, train_labels, epochs=20, batch_size=10, validation_split=0.1, verbose=0)\n history2 = model2.fit(train_data, train_labels, epochs=20, batch_size=10, validation_split=0.1, verbose=0)\n history3 = model3.fit(train_data, train_labels, epochs=20, batch_size=8, validation_split=0.1, verbose=0)\n model.save(filepath)\n model2.save(filepath2)\n model3.save(filepath3)\n # print(history.history)\n # print(history.history['loss'][0].dtype)\n # print(history.history['accuracy'][0].dtype)\n # dump_to_json(history.history, 'temp')\n # exit()\n dump_to_json(history.history, history_filename)\n dump_to_json(history2.history, history_filename2)\n dump_to_json(history3.history, history_filename3)\n"
] |
[
[
"numpy.concatenate",
"numpy.savez_compressed",
"sklearn.model_selection.train_test_split",
"numpy.transpose"
],
[
"numpy.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ItaloDias-stack/paintApplication
|
[
"9a6e2ce522352e304a05617bc1c504ca2fb1dfe8"
] |
[
"main.py"
] |
[
"from cv2 import cv2 as cv\nimport numpy as np\nfrom Pincel import Pincel\nfrom Cor import Cor\n\ndesenhando = False\ncor = Cor(0, 0, 0)\npincel = Pincel(0, 0, cor, 0)\n\ndef nada(x):\n pass\n\n\ndef desenho(event, x, y, flags, param):\n global pincel,cor,desenhando\n pincel.x = x\n pincel.y = y\n if event == cv.EVENT_LBUTTONDOWN:\n desenhando = True\n elif event == cv.EVENT_MOUSEMOVE:\n if desenhando:\n cv.circle(img, (pincel.x, pincel.y), pincel.espessura, (pincel.cor.b, pincel.cor.g, pincel.cor.r), -1)\n elif event == cv.EVENT_LBUTTONUP:\n desenhando = False\n cv.circle(img, (pincel.x, pincel.y), pincel.espessura, (pincel.cor.b, pincel.cor.g, pincel.cor.r), -1)\n\n\nif __name__ == \"__main__\":\n\n img = np.zeros((400, 612, 3), np.uint8)\n cv.namedWindow(\"Paint\")\n\n # Criando as trackBars par as cores\n cv.createTrackbar(\"R\", \"Paint\", 0, 255, nada)\n cv.createTrackbar(\"G\", \"Paint\", 0, 255, nada)\n cv.createTrackbar(\"B\", \"Paint\", 0, 255, nada)\n cv.createTrackbar(\"Espessura\", \"Paint\", 10, 50, nada)\n cv.setMouseCallback('Paint', desenho)\n\n while True:\n cv.imshow(\"Paint\", img)\n k = cv.waitKey(1) & 0xFF\n if k == 27:\n break\n # Pega a posição atual do trackbar\n r = cv.getTrackbarPos('R', 'Paint')\n g = cv.getTrackbarPos('G', 'Paint')\n b = cv.getTrackbarPos('B', 'Paint')\n pincel.cor.r = r\n pincel.cor.g = g\n pincel.cor.b = b\n\n raio = cv.getTrackbarPos(\"Espessura\", 'Paint')\n pincel.espessura = raio\n #img[:] = [b, g, r]\n cv.destroyAllWindows()\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DenDen047/d3rlpy
|
[
"6184518d52f961ba6ca9f045761f810706110aa7",
"6184518d52f961ba6ca9f045761f810706110aa7"
] |
[
"d3rlpy/algos/torch/awr_impl.py",
"examples/atari/train_dqn.py"
] |
[
"import torch\n\nfrom torch.optim import SGD\nfrom d3rlpy.models.torch.v_functions import create_value_function\nfrom d3rlpy.models.torch.policies import squash_action, create_normal_policy\nfrom d3rlpy.models.torch.policies import create_categorical_policy\nfrom .utility import torch_api, train_api, eval_api\nfrom .utility import compute_augemtation_mean\nfrom .base import TorchImplBase\n\n\nclass AWRImpl(TorchImplBase):\n def __init__(self, observation_shape, action_size, actor_learning_rate,\n critic_learning_rate, momentum, use_batch_norm, use_gpu,\n scaler, augmentation, n_augmentations, encoder_params):\n self.observation_shape = observation_shape\n self.action_size = action_size\n self.actor_learning_rate = actor_learning_rate\n self.critic_learning_rate = critic_learning_rate\n self.use_batch_norm = use_batch_norm\n self.momentum = momentum\n self.scaler = scaler\n self.augmentation = augmentation\n self.n_augmentations = n_augmentations\n self.encoder_params = encoder_params\n self.use_gpu = use_gpu\n\n def build(self):\n # setup torch models\n self._build_critic()\n self._build_actor()\n\n if self.use_gpu:\n self.to_gpu(self.use_gpu)\n else:\n self.to_cpu()\n\n # setup optimizer after the parameters move to GPU\n self._build_critic_optim()\n self._build_actor_optim()\n\n def _build_critic(self):\n self.v_func = create_value_function(self.observation_shape,\n use_batch_norm=self.use_batch_norm,\n encoder_params=self.encoder_params)\n\n def _build_critic_optim(self):\n self.critic_optim = SGD(self.v_func.parameters(),\n lr=self.critic_learning_rate,\n momentum=self.momentum)\n\n def _build_actor(self):\n self.policy = create_normal_policy(self.observation_shape,\n self.action_size,\n self.use_batch_norm,\n encoder_params=self.encoder_params)\n\n def _build_actor_optim(self):\n self.actor_optim = SGD(self.policy.parameters(),\n lr=self.actor_learning_rate,\n momentum=self.momentum)\n\n @train_api\n @torch_api(scaler_targets=['observation'])\n def update_critic(self, observation, value):\n loss = compute_augemtation_mean(self.augmentation,\n self.n_augmentations,\n self._compute_critic_loss, {\n 'observation': observation,\n 'value': value\n }, ['observation'])\n\n self.critic_optim.zero_grad()\n loss.backward()\n self.critic_optim.step()\n\n return loss.cpu().detach().numpy()\n\n def _compute_critic_loss(self, observation, value):\n return self.v_func.compute_error(observation, value)\n\n @train_api\n @torch_api(scaler_targets=['observation'])\n def update_actor(self, observation, action, weight):\n loss = compute_augemtation_mean(self.augmentation,\n self.n_augmentations,\n self._compute_actor_loss, {\n 'observation': observation,\n 'action': action,\n 'weight': weight\n }, ['observation'])\n\n self.actor_optim.zero_grad()\n loss.backward()\n self.actor_optim.step()\n\n return loss.cpu().detach().numpy()\n\n def _compute_actor_loss(self, observation, action, weight):\n dist = self.policy.dist(observation)\n\n # unnormalize action via inverse tanh function\n unnormalized_action = torch.atanh(action.clamp(-0.999999, 0.999999))\n\n # compute log probability\n _, log_probs = squash_action(dist, unnormalized_action)\n\n return -(weight * log_probs).mean()\n\n def _predict_best_action(self, x):\n return self.policy.best_action(x)\n\n @eval_api\n @torch_api(scaler_targets=['x'])\n def predict_value(self, x, *args, **kwargs):\n with torch.no_grad():\n return self.v_func(x).view(-1).cpu().detach().numpy()\n\n @eval_api\n @torch_api(scaler_targets=['x'])\n def sample_action(self, x):\n with torch.no_grad():\n return self.policy.sample(x).cpu().detach().numpy()\n\n\nclass DiscreteAWRImpl(AWRImpl):\n def _build_actor(self):\n self.policy = create_categorical_policy(\n self.observation_shape,\n self.action_size,\n self.use_batch_norm,\n encoder_params=self.encoder_params)\n\n def _compute_actor_loss(self, observation, action, weight):\n dist = self.policy.dist(observation)\n log_probs = dist.log_prob(action).view(observation.shape[0], -1)\n return -(weight * log_probs.sum(dim=1, keepdims=True)).mean()\n",
"import argparse\nimport d3rlpy\n\nfrom d3rlpy.algos import DQN\nfrom d3rlpy.datasets import get_atari\nfrom d3rlpy.metrics.scorer import evaluate_on_environment\nfrom d3rlpy.metrics.scorer import td_error_scorer\nfrom d3rlpy.metrics.scorer import discounted_sum_of_advantage_scorer\nfrom d3rlpy.metrics.scorer import average_value_estimation_scorer\nfrom sklearn.model_selection import train_test_split\n\n\ndef main(args):\n dataset, env = get_atari(args.dataset)\n\n d3rlpy.seed(args.seed)\n\n train_episodes, test_episodes = train_test_split(dataset, test_size=0.2)\n\n dqn = DQN(\n n_frames=4, # frame stacking\n q_func_type=args.q_func_type,\n scaler='pixel',\n use_gpu=args.gpu)\n\n dqn.fit(train_episodes,\n eval_episodes=test_episodes,\n n_epochs=100,\n scorers={\n 'environment': evaluate_on_environment(env, epsilon=0.05),\n 'td_error': td_error_scorer,\n 'discounted_advantage': discounted_sum_of_advantage_scorer,\n 'value_scale': average_value_estimation_scorer\n })\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, default='breakout-mixed-v0')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--q-func-type',\n type=str,\n default='mean',\n choices=['mean', 'qr', 'iqn', 'fqf'])\n parser.add_argument('--gpu', type=int)\n args = parser.parse_args()\n main(args)\n"
] |
[
[
"torch.no_grad"
],
[
"sklearn.model_selection.train_test_split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hpi-sam/GNN-TiborMaxTiago
|
[
"986b3cf1e15328f6a03aa1e7f979b3435fc98910"
] |
[
"hypertune.py"
] |
[
"from gnn.argparser import parse_arguments\nfrom utils import load_adjacency_matrix, get_device\nfrom gnn.dataset import TrafficDataset\nfrom torch.utils.data import DataLoader\nfrom gnn import models\nimport torch.optim as optim\nfrom run import run_epoch\nimport optuna\nimport logging\nimport inspect\nimport re\nfrom datetime import datetime\n\nlogger = logging.getLogger(__name__)\n\n\nclass ObjectiveCreator:\n def __init__(self, args):\n self.args = args\n self.device = get_device(args.gpu)\n self.dataset_train = TrafficDataset(args, split='train')\n self.dataset_val = TrafficDataset(args, split='val')\n self.adj = load_adjacency_matrix(args, self.device)\n self.ht_var = re.compile(\"^h_\")\n\n @staticmethod\n def get_list_type(lst):\n types = set([type(element) for element in lst])\n if len(types) > 1:\n raise TypeError(\"List has inconsistent types\")\n return types.pop()\n\n @staticmethod\n def get_list_size(lst):\n if len(lst) > 2:\n return \"categorical\"\n elif len(lst) == 2:\n return \"range\"\n else:\n raise ValueError(\"list should be either a range (2 elements) or categorical (3+ elements)\")\n\n def get_tunable_parameters(self, trial, args):\n type_to_suggestion_map = {int: trial.suggest_int, float: trial.suggest_float}\n tune_param = {}\n for key, val in inspect.getmembers(args):\n if self.ht_var.match(key) and val:\n sugest_type = self.get_list_size(val)\n if sugest_type == \"categorical\":\n tune_param[self.ht_var.sub(\"\", key)] = trial.suggest_categorical(key, val)\n if sugest_type == \"range\":\n tune_param[self.ht_var.sub(\"\", key)] = type_to_suggestion_map[self.get_list_type(val)](key, *val)\n tune_param[\"spatial_channels\"] = int(tune_param[\"bottleneck_channels\"] * tune_param[\"spatial_channels\"])\n return tune_param\n\n def objective(self, trial):\n for (param, value) in self.get_tunable_parameters(trial, self.args).items():\n setattr(self.args, param, value)\n\n model = getattr(models, args.model)(self.adj, self.args).to(self.device)\n optimizer = optim.Adam(model.parameters(), lr=self.args.lr)\n # Training\n if args.log_file:\n logging.basicConfig(filename=args.log_file, level=logging.INFO)\n else:\n logging.basicConfig(level=logging.INFO, format='# %(message)s')\n val_loss_list = []\n logger.info(f\"model: {trial.params}\")\n dataloader_train = DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=1)\n dataloader_val = DataLoader(self.dataset_val, batch_size=self.args.batch_size, shuffle=False, num_workers=1)\n for epoch in range(self.args.n_epochs):\n train_loss = run_epoch(model, optimizer, dataloader_train)\n val_loss = run_epoch(model, optimizer, dataloader_val, training=False)\n logger.info(f\"epoch: {epoch}, train:{train_loss}, val:{val_loss}\")\n trial.report(val_loss, epoch)\n if trial.should_prune():\n raise optuna.TrialPruned()\n val_loss_list.append(val_loss)\n return min(val_loss_list)\n\n\nif __name__ == '__main__':\n parser = parse_arguments()\n args = parser.parse_args()\n\n objective = ObjectiveCreator(args).objective\n\n study = optuna.create_study(direction=\"minimize\",\n sampler=optuna.samplers.TPESampler(n_startup_trials=3),\n pruner=optuna.pruners.SuccessiveHalvingPruner(min_resource='auto',\n reduction_factor=4,\n min_early_stopping_rate=0))\n study.optimize(objective, n_trials=args.n_trials)\n df_study = study.trials_dataframe()\n tstamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M\")\n mode = 'free' if args.learnable_l else 'base'\n df_study.to_csv(f'./studies/{args.model}-{args.convolution_operator}-{tstamp}-{mode}.csv')\n"
] |
[
[
"torch.utils.data.DataLoader"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Philyzh8/scTAG
|
[
"4c4a5b66787e8ef144049c881a4c6087c14eb131"
] |
[
"train.py"
] |
[
"import os\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom numpy.random import seed\nfrom preprocess import *\nfrom utils import *\nimport argparse\n\nfrom sklearn.metrics import adjusted_rand_score, normalized_mutual_info_score\nfrom sklearn import metrics\nimport scipy.io as scio\nseed(1)\ntf.random.set_seed(1)\n\nfrom scipy import sparse as sp\n\n\n# Remove warnings\nimport warnings\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\nfrom sctag import SCTAG\nfrom loss import *\nfrom graph_function import *\n\n# Compute cluster centroids, which is the mean of all points in one cluster.\ndef computeCentroids(data, labels):\n n_clusters = len(np.unique(labels))\n return np.array([data[labels == i].mean(0) for i in range(n_clusters)])\n\ndef cluster_acc(y_true, y_pred):\n \"\"\"\n Calculate clustering accuracy. Require scikit-learn installed\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n # Return\n accuracy, in [0,1]\n \"\"\"\n y_true = y_true.astype(np.int64)\n assert y_pred.size == y_true.size\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n from sklearn.utils.linear_assignment_ import linear_assignment\n ind = linear_assignment(w.max() - w)\n return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"train\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--dataname\", default = \"Quake_Smart-seq2_Limb_Muscle\", type = str)\n parser.add_argument(\"--highly_genes\", default = 500, type=int)\n parser.add_argument(\"--pretrain_epochs\", default = 1000, type=int)\n parser.add_argument(\"--maxiter\", default = 300, type=int)\n parser.add_argument(\"--gpu_option\", default = \"0\")\n args = parser.parse_args()\n \n # [\"Adam\",\"Bach\",\"Klein\",\"Muraro\",\"Plasschaert\",\"Pollen\",\"Quake_10x_Bladder\",\"Quake_10x_Limb_Muscle\",\n # \"Quake_10x_Spleen\",\"Quake_10x_Trachea\",\"Quake_Smart-seq2_Diaphragm\",\"Quake_Smart-seq2_Heart\",\n # \"Quake_Smart-seq2_Limb_Muscle\",\"Quake_Smart-seq2_Lung\",\"Quake_Smart-seq2_Trachea\",\"Romanov\",\n # \"Wang_Lung\",\"Young\"]\n\n # Load data\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_option\n x, y = prepro('./data/' + args.dataname + '/data.h5')\n \n x = np.ceil(x).astype(np.int)\n cluster_number = int(max(y) - min(y) + 1)\n adata = sc.AnnData(x)\n adata.obs['Group'] = y\n adata = normalize(adata, copy=True, highly_genes=args.highly_genes, size_factors=True, normalize_input=True, logtrans_input=True)\n count = adata.X\n \n # Build model\n adj, adj_n = get_adj(count)\n model = SCTAG(count, adj=adj, adj_n=adj_n)\n\n # Pre-training\n model.pre_train(epochs=args.pretrain_epochs)\n\n Y = model.embedding(count, adj_n)\n from sklearn.cluster import SpectralClustering\n labels = SpectralClustering(n_clusters=cluster_number,affinity=\"precomputed\", assign_labels=\"discretize\",random_state=0).fit_predict(adj)\n centers = computeCentroids(Y, labels)\n \n # Clustering training\n Cluster_predicted=model.alt_train(y, epochs=args.maxiter, centers=centers)\n if y is not None:\n acc = np.round(cluster_acc(y, Cluster_predicted.y_pred), 5)\n y = list(map(int, y))\n Cluster_predicted.y_pred = np.array(Cluster_predicted.y_pred)\n nmi = np.round(metrics.normalized_mutual_info_score(y, Cluster_predicted.y_pred), 5)\n ari = np.round(metrics.adjusted_rand_score(y, Cluster_predicted.y_pred), 5)\n print('ACC= %.4f, NMI= %.4f, ARI= %.4f'\n % (acc, nmi, ari))"
] |
[
[
"numpy.random.seed",
"numpy.unique",
"sklearn.metrics.normalized_mutual_info_score",
"tensorflow.compat.v1.logging.set_verbosity",
"sklearn.cluster.SpectralClustering",
"numpy.ceil",
"sklearn.metrics.adjusted_rand_score",
"numpy.array",
"numpy.zeros",
"tensorflow.random.set_seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OP2/PyOP2
|
[
"be8adeab3f3a3dfee979843c9444249220cc0ae7",
"be8adeab3f3a3dfee979843c9444249220cc0ae7"
] |
[
"pyop2/types/mat.py",
"pyop2/parloop.py"
] |
[
"import abc\nimport ctypes\nimport itertools\n\nimport numpy as np\nfrom petsc4py import PETSc\n\nfrom pyop2 import (\n caching,\n configuration as conf,\n datatypes as dtypes,\n exceptions as ex,\n mpi,\n profiling,\n sparsity,\n utils\n)\nfrom pyop2.types.access import Access\nfrom pyop2.types.data_carrier import DataCarrier\nfrom pyop2.types.dataset import DataSet, GlobalDataSet, MixedDataSet\nfrom pyop2.types.map import Map\nfrom pyop2.types.set import MixedSet, Set, Subset\n\n\nclass Sparsity(caching.ObjectCached):\n\n \"\"\"OP2 Sparsity, the non-zero structure a matrix derived from the union of\n the outer product of pairs of :class:`Map` objects.\n\n Examples of constructing a Sparsity: ::\n\n Sparsity(single_dset, single_map, 'mass')\n Sparsity((row_dset, col_dset), (single_rowmap, single_colmap))\n Sparsity((row_dset, col_dset),\n [(first_rowmap, first_colmap), (second_rowmap, second_colmap)])\n\n .. _MatMPIAIJSetPreallocation: http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatMPIAIJSetPreallocation.html\n \"\"\"\n\n def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, block_sparse=None):\n r\"\"\"\n :param dsets: :class:`DataSet`\\s for the left and right function\n spaces this :class:`Sparsity` maps between\n :param maps: :class:`Map`\\s to build the :class:`Sparsity` from\n :type maps: a pair of :class:`Map`\\s specifying a row map and a column\n map, or an iterable of pairs of :class:`Map`\\s specifying multiple\n row and column maps - if a single :class:`Map` is passed, it is\n used as both a row map and a column map\n :param iteration_regions: regions that select subsets of extruded maps to iterate over.\n :param string name: user-defined label (optional)\n :param nest: Should the sparsity over mixed set be built as nested blocks?\n :param block_sparse: Should the sparsity for datasets with\n cdim > 1 be built as a block sparsity?\n \"\"\"\n # Protect against re-initialization when retrieved from cache\n if self._initialized:\n return\n\n self._block_sparse = block_sparse\n # Split into a list of row maps and a list of column maps\n maps, iteration_regions = zip(*maps)\n self._rmaps, self._cmaps = zip(*maps)\n self._dsets = dsets\n\n if isinstance(dsets[0], GlobalDataSet) or isinstance(dsets[1], GlobalDataSet):\n self._dims = (((1, 1),),)\n self._d_nnz = None\n self._o_nnz = None\n self._nrows = None if isinstance(dsets[0], GlobalDataSet) else self._rmaps[0].toset.size\n self._ncols = None if isinstance(dsets[1], GlobalDataSet) else self._cmaps[0].toset.size\n self.lcomm = dsets[0].comm if isinstance(dsets[0], GlobalDataSet) else self._rmaps[0].comm\n self.rcomm = dsets[1].comm if isinstance(dsets[1], GlobalDataSet) else self._cmaps[0].comm\n else:\n self.lcomm = self._rmaps[0].comm\n self.rcomm = self._cmaps[0].comm\n\n rset, cset = self.dsets\n # All rmaps and cmaps have the same data set - just use the first.\n self._nrows = rset.size\n self._ncols = cset.size\n\n self._has_diagonal = (rset == cset)\n\n tmp = itertools.product([x.cdim for x in self._dsets[0]],\n [x.cdim for x in self._dsets[1]])\n\n dims = [[None for _ in range(self.shape[1])] for _ in range(self.shape[0])]\n for r in range(self.shape[0]):\n for c in range(self.shape[1]):\n dims[r][c] = next(tmp)\n\n self._dims = tuple(tuple(d) for d in dims)\n\n if self.lcomm != self.rcomm:\n raise ValueError(\"Haven't thought hard enough about different left and right communicators\")\n self.comm = self.lcomm\n\n self._name = name or \"sparsity_#x%x\" % id(self)\n\n self.iteration_regions = iteration_regions\n # If the Sparsity is defined on MixedDataSets, we need to build each\n # block separately\n if (isinstance(dsets[0], MixedDataSet) or isinstance(dsets[1], MixedDataSet)) \\\n and nest:\n self._nested = True\n self._blocks = []\n for i, rds in enumerate(dsets[0]):\n row = []\n for j, cds in enumerate(dsets[1]):\n row.append(Sparsity((rds, cds), [(rm.split[i], cm.split[j]) for\n rm, cm in maps],\n iteration_regions=iteration_regions,\n block_sparse=block_sparse))\n self._blocks.append(row)\n self._d_nnz = tuple(s._d_nnz for s in self)\n self._o_nnz = tuple(s._o_nnz for s in self)\n elif isinstance(dsets[0], GlobalDataSet) or isinstance(dsets[1], GlobalDataSet):\n # Where the sparsity maps either from or to a Global, we\n # don't really have any sparsity structure.\n self._blocks = [[self]]\n self._nested = False\n else:\n for dset in dsets:\n if isinstance(dset, MixedDataSet) and any([isinstance(d, GlobalDataSet) for d in dset]):\n raise ex.SparsityFormatError(\"Mixed monolithic matrices with Global rows or columns are not supported.\")\n self._nested = False\n with profiling.timed_region(\"CreateSparsity\"):\n nnz, onnz = sparsity.build_sparsity(self)\n self._d_nnz = nnz\n self._o_nnz = onnz\n self._blocks = [[self]]\n self._initialized = True\n\n _cache = {}\n\n @classmethod\n @utils.validate_type(('dsets', (Set, DataSet, tuple, list), ex.DataSetTypeError),\n ('maps', (Map, tuple, list), ex.MapTypeError))\n def _process_args(cls, dsets, maps, *, iteration_regions=None, name=None, nest=None, block_sparse=None):\n \"Turn maps argument into a canonical tuple of pairs.\"\n from pyop2.parloop import IterationRegion\n\n # A single data set becomes a pair of identical data sets\n dsets = [dsets, dsets] if isinstance(dsets, (Set, DataSet)) else list(dsets)\n # Upcast Sets to DataSets\n dsets = [s ** 1 if isinstance(s, Set) else s for s in dsets]\n\n # Check data sets are valid\n for dset in dsets:\n if not isinstance(dset, DataSet) and dset is not None:\n raise ex.DataSetTypeError(\"All data sets must be of type DataSet, not type %r\" % type(dset))\n\n # A single map becomes a pair of identical maps\n maps = (maps, maps) if isinstance(maps, Map) else maps\n # A single pair becomes a tuple of one pair\n maps = (maps,) if isinstance(maps[0], Map) else maps\n\n # Check maps are sane\n for pair in maps:\n if pair[0] is None or pair[1] is None:\n # None of this checking makes sense if one of the\n # matrix operands is a Global.\n continue\n for m in pair:\n if not isinstance(m, Map):\n raise ex.MapTypeError(\n \"All maps must be of type map, not type %r\" % type(m))\n if len(m.values_with_halo) == 0 and m.iterset.total_size > 0:\n raise ex.MapValueError(\n \"Unpopulated map values when trying to build sparsity.\")\n # Make sure that the \"to\" Set of each map in a pair is the set of\n # the corresponding DataSet set\n if not (pair[0].toset == dsets[0].set\n and pair[1].toset == dsets[1].set):\n raise RuntimeError(\"Map to set must be the same as corresponding DataSet set\")\n\n # Each pair of maps must have the same from-set (iteration set)\n if not pair[0].iterset == pair[1].iterset:\n raise RuntimeError(\"Iterset of both maps in a pair must be the same\")\n\n rmaps, cmaps = zip(*maps)\n if iteration_regions is None:\n iteration_regions = tuple((IterationRegion.ALL, ) for _ in maps)\n else:\n iteration_regions = tuple(tuple(sorted(region)) for region in iteration_regions)\n if not len(rmaps) == len(cmaps):\n raise RuntimeError(\"Must pass equal number of row and column maps\")\n\n if rmaps[0] is not None and cmaps[0] is not None:\n # Each row map must have the same to-set (data set)\n if not all(m.toset == rmaps[0].toset for m in rmaps):\n raise RuntimeError(\"To set of all row maps must be the same\")\n\n # Each column map must have the same to-set (data set)\n if not all(m.toset == cmaps[0].toset for m in cmaps):\n raise RuntimeError(\"To set of all column maps must be the same\")\n\n # Need to return the caching object, a tuple of the processed\n # arguments and a dict of kwargs (empty in this case)\n if isinstance(dsets[0], GlobalDataSet):\n cache = None\n elif isinstance(dsets[0].set, MixedSet):\n cache = dsets[0].set[0]\n else:\n cache = dsets[0].set\n if nest is None:\n nest = conf.configuration[\"matnest\"]\n if block_sparse is None:\n block_sparse = conf.configuration[\"block_sparsity\"]\n\n maps = frozenset(zip(maps, iteration_regions))\n kwargs = {\"name\": name,\n \"nest\": nest,\n \"block_sparse\": block_sparse}\n return (cache,) + (tuple(dsets), maps), kwargs\n\n @classmethod\n def _cache_key(cls, dsets, maps, name, nest, block_sparse, *args, **kwargs):\n return (dsets, maps, nest, block_sparse)\n\n def __getitem__(self, idx):\n \"\"\"Return :class:`Sparsity` block with row and column given by ``idx``\n or a given row of blocks.\"\"\"\n try:\n i, j = idx\n return self._blocks[i][j]\n except TypeError:\n return self._blocks[idx]\n\n @utils.cached_property\n def dsets(self):\n r\"\"\"A pair of :class:`DataSet`\\s for the left and right function\n spaces this :class:`Sparsity` maps between.\"\"\"\n return self._dsets\n\n @utils.cached_property\n def maps(self):\n \"\"\"A list of pairs (rmap, cmap) where each pair of\n :class:`Map` objects will later be used to assemble into this\n matrix. The iterset of each of the maps in a pair must be the\n same, while the toset of all the maps which appear first\n must be common, this will form the row :class:`Set` of the\n sparsity. Similarly, the toset of all the maps which appear\n second must be common and will form the column :class:`Set` of\n the ``Sparsity``.\"\"\"\n return list(zip(self._rmaps, self._cmaps))\n\n @utils.cached_property\n def cmaps(self):\n \"\"\"The list of column maps this sparsity is assembled from.\"\"\"\n return self._cmaps\n\n @utils.cached_property\n def rmaps(self):\n \"\"\"The list of row maps this sparsity is assembled from.\"\"\"\n return self._rmaps\n\n @utils.cached_property\n def dims(self):\n \"\"\"A tuple of tuples where the ``i,j``th entry\n is a pair giving the number of rows per entry of the row\n :class:`Set` and the number of columns per entry of the column\n :class:`Set` of the ``Sparsity``. The extents of the first\n two indices are given by the :attr:`shape` of the sparsity.\n \"\"\"\n return self._dims\n\n @utils.cached_property\n def shape(self):\n \"\"\"Number of block rows and columns.\"\"\"\n return (len(self._dsets[0] or [1]),\n len(self._dsets[1] or [1]))\n\n @utils.cached_property\n def nrows(self):\n \"\"\"The number of rows in the ``Sparsity``.\"\"\"\n return self._nrows\n\n @utils.cached_property\n def ncols(self):\n \"\"\"The number of columns in the ``Sparsity``.\"\"\"\n return self._ncols\n\n @utils.cached_property\n def nested(self):\n r\"\"\"Whether a sparsity is monolithic (even if it has a block structure).\n\n To elaborate, if a sparsity maps between\n :class:`MixedDataSet`\\s, it can either be nested, in which\n case it consists of as many blocks are the product of the\n length of the datasets it maps between, or monolithic. In the\n latter case the sparsity is for the full map between the mixed\n datasets, rather than between the blocks of the non-mixed\n datasets underneath them.\n \"\"\"\n return self._nested\n\n @utils.cached_property\n def name(self):\n \"\"\"A user-defined label.\"\"\"\n return self._name\n\n def __iter__(self):\n r\"\"\"Iterate over all :class:`Sparsity`\\s by row and then by column.\"\"\"\n for row in self._blocks:\n for s in row:\n yield s\n\n def __str__(self):\n return \"OP2 Sparsity: dsets %s, rmaps %s, cmaps %s, name %s\" % \\\n (self._dsets, self._rmaps, self._cmaps, self._name)\n\n def __repr__(self):\n return \"Sparsity(%r, %r, %r)\" % (self.dsets, self.maps, self.name)\n\n @utils.cached_property\n def nnz(self):\n \"\"\"Array containing the number of non-zeroes in the various rows of the\n diagonal portion of the local submatrix.\n\n This is the same as the parameter `d_nnz` used for preallocation in\n PETSc's MatMPIAIJSetPreallocation_.\"\"\"\n return self._d_nnz\n\n @utils.cached_property\n def onnz(self):\n \"\"\"Array containing the number of non-zeroes in the various rows of the\n off-diagonal portion of the local submatrix.\n\n This is the same as the parameter `o_nnz` used for preallocation in\n PETSc's MatMPIAIJSetPreallocation_.\"\"\"\n return self._o_nnz\n\n @utils.cached_property\n def nz(self):\n return self._d_nnz.sum()\n\n @utils.cached_property\n def onz(self):\n return self._o_nnz.sum()\n\n def __contains__(self, other):\n \"\"\"Return true if other is a pair of maps in self.maps(). This\n will also return true if the elements of other have parents in\n self.maps().\"\"\"\n\n for maps in self.maps:\n if tuple(other) <= maps:\n return True\n\n return False\n\n\nclass SparsityBlock(Sparsity):\n \"\"\"A proxy class for a block in a monolithic :class:`.Sparsity`.\n\n :arg parent: The parent monolithic sparsity.\n :arg i: The block row.\n :arg j: The block column.\n\n .. warning::\n\n This class only implements the properties necessary to infer\n its shape. It does not provide arrays of non zero fill.\"\"\"\n def __init__(self, parent, i, j):\n self._dsets = (parent.dsets[0][i], parent.dsets[1][j])\n self._rmaps = tuple(m.split[i] for m in parent.rmaps)\n self._cmaps = tuple(m.split[j] for m in parent.cmaps)\n self._nrows = self._dsets[0].size\n self._ncols = self._dsets[1].size\n self._has_diagonal = i == j and parent._has_diagonal\n self._parent = parent\n self._dims = tuple([tuple([parent.dims[i][j]])])\n self._blocks = [[self]]\n self.iteration_regions = parent.iteration_regions\n self.lcomm = self.dsets[0].comm\n self.rcomm = self.dsets[1].comm\n # TODO: think about lcomm != rcomm\n self.comm = self.lcomm\n\n @classmethod\n def _process_args(cls, *args, **kwargs):\n return (None, ) + args, kwargs\n\n @classmethod\n def _cache_key(cls, *args, **kwargs):\n return None\n\n def __repr__(self):\n return \"SparsityBlock(%r, %r, %r)\" % (self._parent, self._i, self._j)\n\n\ndef masked_lgmap(lgmap, mask, block=True):\n if block:\n indices = lgmap.block_indices.copy()\n bsize = lgmap.getBlockSize()\n else:\n indices = lgmap.indices.copy()\n bsize = 1\n indices[mask] = -1\n return PETSc.LGMap().create(indices=indices, bsize=bsize, comm=lgmap.comm)\n\n\nclass AbstractMat(DataCarrier, abc.ABC):\n r\"\"\"OP2 matrix data. A ``Mat`` is defined on a sparsity pattern and holds a value\n for each element in the :class:`Sparsity`.\n\n When a ``Mat`` is passed to :func:`pyop2.op2.par_loop`, the maps via which\n indirection occurs for the row and column space, and the access\n descriptor are passed by `calling` the ``Mat``. For instance, if a\n ``Mat`` named ``A`` is to be accessed for reading via a row :class:`Map`\n named ``R`` and a column :class:`Map` named ``C``, this is accomplished by::\n\n A(pyop2.READ, (R[pyop2.i[0]], C[pyop2.i[1]]))\n\n Notice that it is `always` necessary to index the indirection maps\n for a ``Mat``. See the :class:`Mat` documentation for more\n details.\n\n .. note ::\n\n After executing :func:`par_loop`\\s that write to a ``Mat`` and\n before using it (for example to view its values), you must call\n :meth:`assemble` to finalise the writes.\n \"\"\"\n @utils.cached_property\n def pack(self):\n from pyop2.codegen.builder import MatPack\n return MatPack\n\n ASSEMBLED = \"ASSEMBLED\"\n INSERT_VALUES = \"INSERT_VALUES\"\n ADD_VALUES = \"ADD_VALUES\"\n\n _modes = [Access.WRITE, Access.INC]\n\n @utils.validate_type(('sparsity', Sparsity, ex.SparsityTypeError),\n ('name', str, ex.NameTypeError))\n def __init__(self, sparsity, dtype=None, name=None):\n self._sparsity = sparsity\n self.lcomm = sparsity.lcomm\n self.rcomm = sparsity.rcomm\n self.comm = sparsity.comm\n dtype = dtype or dtypes.ScalarType\n self._datatype = np.dtype(dtype)\n self._name = name or \"mat_#x%x\" % id(self)\n self.assembly_state = Mat.ASSEMBLED\n\n @utils.validate_in(('access', _modes, ex.ModeValueError))\n def __call__(self, access, path, lgmaps=None, unroll_map=False):\n from pyop2.parloop import Arg\n path_maps = utils.as_tuple(path, Map, 2)\n if conf.configuration[\"type_check\"] and tuple(path_maps) not in self.sparsity:\n raise ex.MapValueError(\"Path maps not in sparsity maps\")\n return Arg(data=self, map=path_maps, access=access, lgmaps=lgmaps, unroll_map=unroll_map)\n\n @utils.cached_property\n def _wrapper_cache_key_(self):\n return (type(self), self.dtype, self.dims)\n\n def assemble(self):\n \"\"\"Finalise this :class:`Mat` ready for use.\n\n Call this /after/ executing all the par_loops that write to\n the matrix before you want to look at it.\n \"\"\"\n raise NotImplementedError(\"Subclass should implement this\")\n\n def addto_values(self, rows, cols, values):\n \"\"\"Add a block of values to the :class:`Mat`.\"\"\"\n raise NotImplementedError(\n \"Abstract Mat base class doesn't know how to set values.\")\n\n def set_values(self, rows, cols, values):\n \"\"\"Set a block of values in the :class:`Mat`.\"\"\"\n raise NotImplementedError(\n \"Abstract Mat base class doesn't know how to set values.\")\n\n @utils.cached_property\n def nblocks(self):\n return int(np.prod(self.sparsity.shape))\n\n @utils.cached_property\n def _argtypes_(self):\n \"\"\"Ctypes argtype for this :class:`Mat`\"\"\"\n return tuple(ctypes.c_voidp for _ in self)\n\n @utils.cached_property\n def dims(self):\n \"\"\"A pair of integers giving the number of matrix rows and columns for\n each member of the row :class:`Set` and column :class:`Set`\n respectively. This corresponds to the ``cdim`` member of a\n :class:`DataSet`.\"\"\"\n return self._sparsity._dims\n\n @utils.cached_property\n def nrows(self):\n \"The number of rows in the matrix (local to this process)\"\n return sum(d.size * d.cdim for d in self.sparsity.dsets[0])\n\n @utils.cached_property\n def nblock_rows(self):\n \"\"\"The number \"block\" rows in the matrix (local to this process).\n\n This is equivalent to the number of rows in the matrix divided\n by the dimension of the row :class:`DataSet`.\n \"\"\"\n assert len(self.sparsity.dsets[0]) == 1, \"Block rows don't make sense for mixed Mats\"\n return self.sparsity.dsets[0].size\n\n @utils.cached_property\n def nblock_cols(self):\n \"\"\"The number of \"block\" columns in the matrix (local to this process).\n\n This is equivalent to the number of columns in the matrix\n divided by the dimension of the column :class:`DataSet`.\n \"\"\"\n assert len(self.sparsity.dsets[1]) == 1, \"Block cols don't make sense for mixed Mats\"\n return self.sparsity.dsets[1].size\n\n @utils.cached_property\n def ncols(self):\n \"The number of columns in the matrix (local to this process)\"\n return sum(d.size * d.cdim for d in self.sparsity.dsets[1])\n\n @utils.cached_property\n def sparsity(self):\n \"\"\":class:`Sparsity` on which the ``Mat`` is defined.\"\"\"\n return self._sparsity\n\n @utils.cached_property\n def _is_scalar_field(self):\n # Sparsity from Dat to MixedDat has a shape like (1, (1, 1))\n # (which you can't take the product of)\n return all(np.prod(d) == 1 for d in self.dims)\n\n @utils.cached_property\n def _is_vector_field(self):\n return not self._is_scalar_field\n\n def change_assembly_state(self, new_state):\n \"\"\"Switch the matrix assembly state.\"\"\"\n if new_state == Mat.ASSEMBLED or self.assembly_state == Mat.ASSEMBLED:\n self.assembly_state = new_state\n elif new_state != self.assembly_state:\n self._flush_assembly()\n self.assembly_state = new_state\n else:\n pass\n\n def _flush_assembly(self):\n \"\"\"Flush the in flight assembly operations (used when\n switching between inserting and adding values).\"\"\"\n pass\n\n @property\n def values(self):\n \"\"\"A numpy array of matrix values.\n\n .. warning ::\n This is a dense array, so will need a lot of memory. It's\n probably not a good idea to access this property if your\n matrix has more than around 10000 degrees of freedom.\n \"\"\"\n raise NotImplementedError(\"Abstract base Mat does not implement values()\")\n\n @utils.cached_property\n def dtype(self):\n \"\"\"The Python type of the data.\"\"\"\n return self._datatype\n\n @utils.cached_property\n def nbytes(self):\n \"\"\"Return an estimate of the size of the data associated with this\n :class:`Mat` in bytes. This will be the correct size of the\n data payload, but does not take into account the (presumably\n small) overhead of the object and its metadata. The memory\n associated with the sparsity pattern is also not recorded.\n\n Note that this is the process local memory usage, not the sum\n over all MPI processes.\n \"\"\"\n if self._sparsity._block_sparse:\n mult = np.sum(np.prod(self._sparsity.dims))\n else:\n mult = 1\n return (self._sparsity.nz + self._sparsity.onz) \\\n * self.dtype.itemsize * mult\n\n def __iter__(self):\n \"\"\"Yield self when iterated over.\"\"\"\n yield self\n\n def __mul__(self, other):\n \"\"\"Multiply this :class:`Mat` with the vector ``other``.\"\"\"\n raise NotImplementedError(\"Abstract base Mat does not implement multiplication\")\n\n def __str__(self):\n return \"OP2 Mat: %s, sparsity (%s), datatype %s\" \\\n % (self._name, self._sparsity, self._datatype.name)\n\n def __repr__(self):\n return \"Mat(%r, %r, %r)\" \\\n % (self._sparsity, self._datatype, self._name)\n\n\nclass Mat(AbstractMat):\n \"\"\"OP2 matrix data. A Mat is defined on a sparsity pattern and holds a value\n for each element in the :class:`Sparsity`.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.mat_type = kwargs.pop(\"mat_type\", None)\n super().__init__(*args, **kwargs)\n self._init()\n self.assembly_state = Mat.ASSEMBLED\n\n # Firedrake relies on this to distinguish between MatBlock and not for boundary conditions\n local_to_global_maps = (None, None)\n\n @utils.cached_property\n def _kernel_args_(self):\n return tuple(a.handle.handle for a in self)\n\n @mpi.collective\n def _init(self):\n if not self.dtype == PETSc.ScalarType:\n raise RuntimeError(\"Can only create a matrix of type %s, %s is not supported\"\n % (PETSc.ScalarType, self.dtype))\n if self.mat_type == \"dense\":\n self._init_dense()\n # If the Sparsity is defined on MixedDataSets, we need to build a MatNest\n elif self.sparsity.shape > (1, 1):\n if self.sparsity.nested:\n self._init_nest()\n self._nested = True\n else:\n self._init_monolithic()\n else:\n self._init_block()\n\n def _init_dense(self):\n mat = PETSc.Mat()\n rset, cset = self.sparsity.dsets\n rlgmap = rset.unblocked_lgmap\n clgmap = cset.unblocked_lgmap\n mat.createDense(size=((self.nrows, None), (self.ncols, None)),\n bsize=1,\n comm=self.comm)\n mat.setLGMap(rmap=rlgmap, cmap=clgmap)\n self.handle = mat\n self._blocks = []\n rows, cols = self.sparsity.shape\n for i in range(rows):\n row = []\n for j in range(cols):\n row.append(MatBlock(self, i, j))\n self._blocks.append(row)\n mat.setOption(mat.Option.IGNORE_OFF_PROC_ENTRIES, False)\n mat.setOption(mat.Option.SUBSET_OFF_PROC_ENTRIES, True)\n mat.setUp()\n # Put zeros in all the places we might eventually put a value.\n with profiling.timed_region(\"MatZeroInitial\"):\n mat.zeroEntries()\n mat.assemble()\n\n def _init_monolithic(self):\n mat = PETSc.Mat()\n rset, cset = self.sparsity.dsets\n rlgmap = rset.unblocked_lgmap\n clgmap = cset.unblocked_lgmap\n mat.createAIJ(size=((self.nrows, None), (self.ncols, None)),\n nnz=(self.sparsity.nnz, self.sparsity.onnz),\n bsize=1,\n comm=self.comm)\n mat.setLGMap(rmap=rlgmap, cmap=clgmap)\n self.handle = mat\n self._blocks = []\n rows, cols = self.sparsity.shape\n for i in range(rows):\n row = []\n for j in range(cols):\n row.append(MatBlock(self, i, j))\n self._blocks.append(row)\n mat.setOption(mat.Option.IGNORE_ZERO_ENTRIES, False)\n mat.setOption(mat.Option.KEEP_NONZERO_PATTERN, True)\n # We completely fill the allocated matrix when zeroing the\n # entries, so raise an error if we \"missed\" one.\n mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, True)\n mat.setOption(mat.Option.IGNORE_OFF_PROC_ENTRIES, False)\n mat.setOption(mat.Option.NEW_NONZERO_ALLOCATION_ERR, True)\n # The first assembly (filling with zeros) sets all possible entries.\n mat.setOption(mat.Option.SUBSET_OFF_PROC_ENTRIES, True)\n # Put zeros in all the places we might eventually put a value.\n with profiling.timed_region(\"MatZeroInitial\"):\n for i in range(rows):\n for j in range(cols):\n sparsity.fill_with_zeros(self[i, j].handle,\n self[i, j].sparsity.dims[0][0],\n self[i, j].sparsity.maps,\n self[i, j].sparsity.iteration_regions,\n set_diag=self[i, j].sparsity._has_diagonal)\n self[i, j].handle.assemble()\n\n mat.assemble()\n mat.setOption(mat.Option.NEW_NONZERO_LOCATION_ERR, True)\n mat.setOption(mat.Option.IGNORE_ZERO_ENTRIES, True)\n\n def _init_nest(self):\n mat = PETSc.Mat()\n self._blocks = []\n rows, cols = self.sparsity.shape\n rset, cset = self.sparsity.dsets\n for i in range(rows):\n row = []\n for j in range(cols):\n row.append(Mat(self.sparsity[i, j], self.dtype,\n '_'.join([self.name, str(i), str(j)])))\n self._blocks.append(row)\n # PETSc Mat.createNest wants a flattened list of Mats\n mat.createNest([[m.handle for m in row_] for row_ in self._blocks],\n isrows=rset.field_ises, iscols=cset.field_ises,\n comm=self.comm)\n self.handle = mat\n\n def _init_block(self):\n self._blocks = [[self]]\n\n rset, cset = self.sparsity.dsets\n if (isinstance(rset, GlobalDataSet) or isinstance(cset, GlobalDataSet)):\n self._init_global_block()\n return\n\n mat = PETSc.Mat()\n row_lg = rset.lgmap\n col_lg = cset.lgmap\n rdim, cdim = self.dims[0][0]\n\n if rdim == cdim and rdim > 1 and self.sparsity._block_sparse:\n # Size is total number of rows and columns, but the\n # /sparsity/ is the block sparsity.\n block_sparse = True\n create = mat.createBAIJ\n else:\n # Size is total number of rows and columns, sparsity is\n # the /dof/ sparsity.\n block_sparse = False\n create = mat.createAIJ\n create(size=((self.nrows, None),\n (self.ncols, None)),\n nnz=(self.sparsity.nnz, self.sparsity.onnz),\n bsize=(rdim, cdim),\n comm=self.comm)\n mat.setLGMap(rmap=row_lg, cmap=col_lg)\n # Stash entries destined for other processors\n mat.setOption(mat.Option.IGNORE_OFF_PROC_ENTRIES, False)\n # Any add or insertion that would generate a new entry that has not\n # been preallocated will raise an error\n mat.setOption(mat.Option.NEW_NONZERO_ALLOCATION_ERR, True)\n # Do not ignore zeros while we fill the initial matrix so that\n # petsc doesn't compress things out.\n if not block_sparse:\n mat.setOption(mat.Option.IGNORE_ZERO_ENTRIES, False)\n # When zeroing rows (e.g. for enforcing Dirichlet bcs), keep those in\n # the nonzero structure of the matrix. Otherwise PETSc would compact\n # the sparsity and render our sparsity caching useless.\n mat.setOption(mat.Option.KEEP_NONZERO_PATTERN, True)\n # We completely fill the allocated matrix when zeroing the\n # entries, so raise an error if we \"missed\" one.\n mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, True)\n # Put zeros in all the places we might eventually put a value.\n with profiling.timed_region(\"MatZeroInitial\"):\n sparsity.fill_with_zeros(mat, self.sparsity.dims[0][0],\n self.sparsity.maps, self.sparsity.iteration_regions,\n set_diag=self.sparsity._has_diagonal)\n mat.assemble()\n mat.setOption(mat.Option.NEW_NONZERO_LOCATION_ERR, True)\n # Now we've filled up our matrix, so the sparsity is\n # \"complete\", we can ignore subsequent zero entries.\n if not block_sparse:\n mat.setOption(mat.Option.IGNORE_ZERO_ENTRIES, True)\n self.handle = mat\n\n def _init_global_block(self):\n \"\"\"Initialise this block in the case where the matrix maps either\n to or from a :class:`Global`\"\"\"\n\n if (isinstance(self.sparsity._dsets[0], GlobalDataSet) and isinstance(self.sparsity._dsets[1], GlobalDataSet)):\n # In this case both row and column are a Global.\n mat = _GlobalMat(comm=self.comm)\n else:\n mat = _DatMat(self.sparsity)\n self.handle = mat\n\n def __call__(self, access, path, lgmaps=None, unroll_map=False):\n \"\"\"Override the parent __call__ method in order to special-case global\n blocks in matrices.\"\"\"\n from pyop2.parloop import Arg\n # One of the path entries was not an Arg.\n if path == (None, None):\n lgmaps, = lgmaps\n assert all(l is None for l in lgmaps)\n return Arg(data=self.handle.getPythonContext().global_, access=access)\n elif None in path:\n thispath = path[0] or path[1]\n return Arg(data=self.handle.getPythonContext().dat, map=thispath, access=access)\n else:\n return super().__call__(access, path, lgmaps=lgmaps, unroll_map=unroll_map)\n\n def __getitem__(self, idx):\n \"\"\"Return :class:`Mat` block with row and column given by ``idx``\n or a given row of blocks.\"\"\"\n try:\n i, j = idx\n return self.blocks[i][j]\n except TypeError:\n return self.blocks[idx]\n\n def __iter__(self):\n \"\"\"Iterate over all :class:`Mat` blocks by row and then by column.\"\"\"\n yield from itertools.chain(*self.blocks)\n\n @mpi.collective\n def zero(self):\n \"\"\"Zero the matrix.\"\"\"\n self.assemble()\n self.handle.zeroEntries()\n\n @mpi.collective\n def zero_rows(self, rows, diag_val=1.0):\n \"\"\"Zeroes the specified rows of the matrix, with the exception of the\n diagonal entry, which is set to diag_val. May be used for applying\n strong boundary conditions.\n\n :param rows: a :class:`Subset` or an iterable\"\"\"\n self.assemble()\n rows = rows.indices if isinstance(rows, Subset) else rows\n self.handle.zeroRowsLocal(rows, diag_val)\n\n def _flush_assembly(self):\n self.handle.assemble(assembly=PETSc.Mat.AssemblyType.FLUSH)\n\n @mpi.collective\n def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None):\n \"\"\"Set the diagonal entry in ``rows`` to a particular value.\n\n :param rows: a :class:`Subset` or an iterable.\n :param diag_val: the value to add\n\n The indices in ``rows`` should index the process-local rows of\n the matrix (no mapping to global indexes is applied).\n \"\"\"\n rows = np.asarray(rows, dtype=dtypes.IntType)\n rbs, _ = self.dims[0][0]\n if rbs > 1:\n if idx is not None:\n rows = rbs * rows + idx\n else:\n rows = np.dstack([rbs*rows + i for i in range(rbs)]).flatten()\n rows = rows.reshape(-1, 1)\n self.change_assembly_state(Mat.INSERT_VALUES)\n if len(rows) > 0:\n values = np.full(rows.shape, diag_val, dtype=dtypes.ScalarType)\n self.handle.setValuesLocalRCV(rows, rows, values,\n addv=PETSc.InsertMode.INSERT_VALUES)\n\n @mpi.collective\n def assemble(self):\n # If the matrix is nested, we need to check each subblock to\n # see if it needs assembling. But if it's monolithic then the\n # subblock assembly doesn't do anything, so we don't do that.\n if self.sparsity.nested:\n self.handle.assemble()\n for m in self:\n if m.assembly_state != Mat.ASSEMBLED:\n m.change_assembly_state(Mat.ASSEMBLED)\n else:\n # Instead, we assemble the full monolithic matrix.\n self.handle.assemble()\n for m in self:\n m.handle.assemble()\n self.change_assembly_state(Mat.ASSEMBLED)\n\n def addto_values(self, rows, cols, values):\n \"\"\"Add a block of values to the :class:`Mat`.\"\"\"\n self.change_assembly_state(Mat.ADD_VALUES)\n if len(values) > 0:\n self.handle.setValuesBlockedLocal(rows, cols, values,\n addv=PETSc.InsertMode.ADD_VALUES)\n\n def set_values(self, rows, cols, values):\n \"\"\"Set a block of values in the :class:`Mat`.\"\"\"\n self.change_assembly_state(Mat.INSERT_VALUES)\n if len(values) > 0:\n self.handle.setValuesBlockedLocal(rows, cols, values,\n addv=PETSc.InsertMode.INSERT_VALUES)\n\n @utils.cached_property\n def blocks(self):\n \"\"\"2-dimensional array of matrix blocks.\"\"\"\n return self._blocks\n\n @property\n def values(self):\n self.assemble()\n if self.nrows * self.ncols > 1000000:\n raise ValueError(\"Printing dense matrix with more than 1 million entries not allowed.\\n\"\n \"Are you sure you wanted to do this?\")\n if (isinstance(self.sparsity._dsets[0], GlobalDataSet) or isinstance(self.sparsity._dsets[1], GlobalDataSet)):\n return self.handle.getPythonContext()[:, :]\n else:\n return self.handle[:, :]\n\n\nclass MatBlock(AbstractMat):\n \"\"\"A proxy class for a local block in a monolithic :class:`.Mat`.\n\n :arg parent: The parent monolithic matrix.\n :arg i: The block row.\n :arg j: The block column.\n \"\"\"\n def __init__(self, parent, i, j):\n self._parent = parent\n self._i = i\n self._j = j\n self._sparsity = SparsityBlock(parent.sparsity, i, j)\n rset, cset = self._parent.sparsity.dsets\n rowis = rset.local_ises[i]\n colis = cset.local_ises[j]\n self.handle = parent.handle.getLocalSubMatrix(isrow=rowis,\n iscol=colis)\n self.comm = parent.comm\n self.local_to_global_maps = self.handle.getLGMap()\n\n @utils.cached_property\n def _kernel_args_(self):\n return (self.handle.handle, )\n\n @utils.cached_property\n def _wrapper_cache_key_(self):\n return (type(self._parent), self._parent.dtype, self.dims)\n\n @property\n def assembly_state(self):\n # Track our assembly state only\n return self._parent.assembly_state\n\n @assembly_state.setter\n def assembly_state(self, value):\n self._parent.assembly_state = value\n\n def __getitem__(self, idx):\n return self\n\n def __iter__(self):\n yield self\n\n def _flush_assembly(self):\n # Need to flush for all blocks\n for b in self._parent:\n b.handle.assemble(assembly=PETSc.Mat.AssemblyType.FLUSH)\n self._parent._flush_assembly()\n\n def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None):\n rows = np.asarray(rows, dtype=dtypes.IntType)\n rbs, _ = self.dims[0][0]\n if rbs > 1:\n if idx is not None:\n rows = rbs * rows + idx\n else:\n rows = np.dstack([rbs*rows + i for i in range(rbs)]).flatten()\n rows = rows.reshape(-1, 1)\n self.change_assembly_state(Mat.INSERT_VALUES)\n if len(rows) > 0:\n values = np.full(rows.shape, diag_val, dtype=dtypes.ScalarType)\n self.handle.setValuesLocalRCV(rows, rows, values,\n addv=PETSc.InsertMode.INSERT_VALUES)\n\n def addto_values(self, rows, cols, values):\n \"\"\"Add a block of values to the :class:`Mat`.\"\"\"\n self.change_assembly_state(Mat.ADD_VALUES)\n if len(values) > 0:\n self.handle.setValuesBlockedLocal(rows, cols, values,\n addv=PETSc.InsertMode.ADD_VALUES)\n\n def set_values(self, rows, cols, values):\n \"\"\"Set a block of values in the :class:`Mat`.\"\"\"\n self.change_assembly_state(Mat.INSERT_VALUES)\n if len(values) > 0:\n self.handle.setValuesBlockedLocal(rows, cols, values,\n addv=PETSc.InsertMode.INSERT_VALUES)\n\n def assemble(self):\n raise RuntimeError(\"Should never call assemble on MatBlock\")\n\n @property\n def values(self):\n rset, cset = self._parent.sparsity.dsets\n rowis = rset.field_ises[self._i]\n colis = cset.field_ises[self._j]\n self._parent.assemble()\n mat = self._parent.handle.createSubMatrix(isrow=rowis,\n iscol=colis)\n return mat[:, :]\n\n @property\n def dtype(self):\n return self._parent.dtype\n\n @property\n def nbytes(self):\n return self._parent.nbytes // (np.prod(self.sparsity.shape))\n\n def __repr__(self):\n return \"MatBlock(%r, %r, %r)\" % (self._parent, self._i, self._j)\n\n def __str__(self):\n return \"Block[%s, %s] of %s\" % (self._i, self._j, self._parent)\n\n\ndef _DatMat(sparsity, dat=None):\n \"\"\"A :class:`PETSc.Mat` with global size nx1 or nx1 implemented as a\n :class:`.Dat`\"\"\"\n if isinstance(sparsity.dsets[0], GlobalDataSet):\n dset = sparsity.dsets[1]\n sizes = ((None, 1), (dset.size*dset.cdim, None))\n elif isinstance(sparsity.dsets[1], GlobalDataSet):\n dset = sparsity.dsets[0]\n sizes = ((dset.size * dset.cdim, None), (None, 1))\n else:\n raise ValueError(\"Not a DatMat\")\n\n A = PETSc.Mat().createPython(sizes, comm=sparsity.comm)\n A.setPythonContext(_DatMatPayload(sparsity, dat))\n A.setUp()\n return A\n\n\nclass _DatMatPayload:\n\n def __init__(self, sparsity, dat=None, dset=None):\n from pyop2.types.dat import Dat\n if isinstance(sparsity.dsets[0], GlobalDataSet):\n self.dset = sparsity.dsets[1]\n self.sizes = ((None, 1), (self.dset.size * self.dset.cdim, None))\n elif isinstance(sparsity.dsets[1], GlobalDataSet):\n self.dset = sparsity.dsets[0]\n self.sizes = ((self.dset.size * self.dset.cdim, None), (None, 1))\n else:\n raise ValueError(\"Not a DatMat\")\n\n self.sparsity = sparsity\n self.dat = dat or Dat(self.dset, dtype=PETSc.ScalarType)\n self.dset = dset\n\n def __getitem__(self, key):\n shape = [s[0] or 1 for s in self.sizes]\n return self.dat.data_ro.reshape(*shape)[key]\n\n def zeroEntries(self, mat):\n self.dat.data[...] = 0.0\n\n def mult(self, mat, x, y):\n '''Y = mat x'''\n with self.dat.vec_ro as v:\n if self.sizes[0][0] is None:\n # Row matrix\n out = v.dot(x)\n if y.comm.rank == 0:\n y.array[0] = out\n else:\n y.array[...]\n else:\n # Column matrix\n if x.sizes[1] == 1:\n v.copy(y)\n a = np.zeros(1, dtype=dtypes.ScalarType)\n if x.comm.rank == 0:\n a[0] = x.array_r\n else:\n x.array_r\n x.comm.tompi4py().bcast(a)\n return y.scale(a)\n else:\n return v.pointwiseMult(x, y)\n\n def multTranspose(self, mat, x, y):\n with self.dat.vec_ro as v:\n if self.sizes[0][0] is None:\n # Row matrix\n if x.sizes[1] == 1:\n v.copy(y)\n a = np.zeros(1, dtype=dtypes.ScalarType)\n if x.comm.rank == 0:\n a[0] = x.array_r\n else:\n x.array_r\n x.comm.tompi4py().bcast(a)\n y.scale(a)\n else:\n v.pointwiseMult(x, y)\n else:\n # Column matrix\n out = v.dot(x)\n if y.comm.rank == 0:\n y.array[0] = out\n else:\n y.array[...]\n\n def multTransposeAdd(self, mat, x, y, z):\n ''' z = y + mat^Tx '''\n with self.dat.vec_ro as v:\n if self.sizes[0][0] is None:\n # Row matrix\n if x.sizes[1] == 1:\n v.copy(z)\n a = np.zeros(1, dtype=dtypes.ScalarType)\n if x.comm.rank == 0:\n a[0] = x.array_r\n else:\n x.array_r\n x.comm.tompi4py().bcast(a)\n if y == z:\n # Last two arguments are aliased.\n tmp = y.duplicate()\n y.copy(tmp)\n y = tmp\n z.scale(a)\n z.axpy(1, y)\n else:\n if y == z:\n # Last two arguments are aliased.\n tmp = y.duplicate()\n y.copy(tmp)\n y = tmp\n v.pointwiseMult(x, z)\n return z.axpy(1, y)\n else:\n # Column matrix\n out = v.dot(x)\n y = y.array_r\n if z.comm.rank == 0:\n z.array[0] = out + y[0]\n else:\n z.array[...]\n\n def duplicate(self, mat, copy=True):\n if copy:\n return _DatMat(self.sparsity, self.dat.duplicate())\n else:\n return _DatMat(self.sparsity)\n\n\ndef _GlobalMat(global_=None, comm=None):\n \"\"\"A :class:`PETSc.Mat` with global size 1x1 implemented as a\n :class:`.Global`\"\"\"\n A = PETSc.Mat().createPython(((None, 1), (None, 1)), comm=comm)\n A.setPythonContext(_GlobalMatPayload(global_, comm))\n A.setUp()\n return A\n\n\nclass _GlobalMatPayload:\n\n def __init__(self, global_=None, comm=None):\n from pyop2.types.glob import Global\n self.global_ = global_ or Global(1, dtype=PETSc.ScalarType, comm=comm)\n\n def __getitem__(self, key):\n return self.global_.data_ro.reshape(1, 1)[key]\n\n def zeroEntries(self, mat):\n self.global_.data[...] = 0.0\n\n def getDiagonal(self, mat, result=None):\n if result is None:\n result = self.global_.dataset.layout_vec.duplicate()\n if result.comm.rank == 0:\n result.array[...] = self.global_.data_ro\n else:\n result.array[...]\n return result\n\n def mult(self, mat, x, result):\n if result.comm.rank == 0:\n result.array[...] = self.global_.data_ro * x.array_r\n else:\n result.array[...]\n\n def multTransposeAdd(self, mat, x, y, z):\n if z.comm.rank == 0:\n ax = self.global_.data_ro * x.array_r\n if y == z:\n z.array[...] += ax\n else:\n z.array[...] = ax + y.array_r\n else:\n x.array_r\n y.array_r\n z.array[...]\n\n def duplicate(self, mat, copy=True):\n if copy:\n return _GlobalMat(self.global_.duplicate(), comm=mat.comm)\n else:\n return _GlobalMat(comm=mat.comm)\n",
"import abc\nimport collections\nimport copy\nimport ctypes\nimport enum\nimport itertools\nimport operator\nimport os\nimport types\n\nimport loopy as lp\nimport numpy as np\nfrom petsc4py import PETSc\n\nfrom . import (\n caching,\n compilation,\n configuration as conf,\n datatypes as dtypes,\n exceptions as ex,\n mpi,\n profiling,\n utils\n)\nfrom .kernel import Kernel, PyKernel\nfrom .types import (\n Access,\n Global, Dat, DatView, Mat, Map, MixedDat, AbstractDat, AbstractMat,\n Set, MixedSet, ExtrudedSet, Subset\n)\n\n\nclass Arg:\n\n \"\"\"An argument to a :func:`pyop2.op2.par_loop`.\n\n .. warning ::\n User code should not directly instantiate :class:`Arg`.\n Instead, use the call syntax on the :class:`DataCarrier`.\n \"\"\"\n\n def __init__(self, data=None, map=None, access=None, lgmaps=None, unroll_map=False):\n \"\"\"\n :param data: A data-carrying object, either :class:`Dat` or class:`Mat`\n :param map: A :class:`Map` to access this :class:`Arg` or the default\n if the identity map is to be used.\n :param access: An access descriptor of type :class:`Access`\n :param lgmaps: For :class:`Mat` objects, a tuple of 2-tuples of local to\n global maps used during assembly.\n\n Checks that:\n\n 1. the maps used are initialized i.e. have mapping data associated, and\n 2. the to Set of the map used to access it matches the Set it is\n defined on.\n\n A :class:`MapValueError` is raised if these conditions are not met.\"\"\"\n self.data = data\n self._map = map\n if map is None:\n self.map_tuple = ()\n elif isinstance(map, Map):\n self.map_tuple = (map, )\n else:\n self.map_tuple = tuple(map)\n\n if data is not None and hasattr(data, \"dtype\"):\n if data.dtype.kind == \"c\" and (access == Access.MIN or access == Access.MAX):\n raise ValueError(\"MIN and MAX access descriptors are undefined on complex data.\")\n self._access = access\n\n self.unroll_map = unroll_map\n self.lgmaps = None\n if self._is_mat and lgmaps is not None:\n self.lgmaps = utils.as_tuple(lgmaps)\n assert len(self.lgmaps) == self.data.nblocks\n else:\n if lgmaps is not None:\n raise ValueError(\"Local to global maps only for matrices\")\n\n # Check arguments for consistency\n if conf.configuration[\"type_check\"] and not (self._is_global or map is None):\n for j, m in enumerate(map):\n if m.iterset.total_size > 0 and len(m.values_with_halo) == 0:\n raise ex.MapValueError(\"%s is not initialized.\" % map)\n if self._is_mat and m.toset != data.sparsity.dsets[j].set:\n raise ex.MapValueError(\n \"To set of %s doesn't match the set of %s.\" % (map, data))\n if self._is_dat and map.toset != data.dataset.set:\n raise ex.MapValueError(\n \"To set of %s doesn't match the set of %s.\" % (map, data))\n\n def recreate(self, data=None, map=None, access=None, lgmaps=None, unroll_map=None):\n \"\"\"Creates a new Dat based on the existing Dat with the changes specified.\n\n :param data: A data-carrying object, either :class:`Dat` or class:`Mat`\n :param map: A :class:`Map` to access this :class:`Arg` or the default\n if the identity map is to be used.\n :param access: An access descriptor of type :class:`Access`\n :param lgmaps: For :class:`Mat` objects, a tuple of 2-tuples of local to\n global maps used during assembly.\"\"\"\n return type(self)(data=data or self.data,\n map=map or self.map,\n access=access or self.access,\n lgmaps=lgmaps or self.lgmaps,\n unroll_map=False if unroll_map is None else unroll_map)\n\n @utils.cached_property\n def _kernel_args_(self):\n return self.data._kernel_args_\n\n @utils.cached_property\n def _argtypes_(self):\n return self.data._argtypes_\n\n @utils.cached_property\n def _wrapper_cache_key_(self):\n if self.map is not None:\n map_ = tuple(None if m is None else m._wrapper_cache_key_ for m in self.map)\n else:\n map_ = self.map\n return (type(self), self.access, self.data._wrapper_cache_key_, map_, self.unroll_map)\n\n @property\n def _key(self):\n return (self.data, self._map, self._access)\n\n def __eq__(self, other):\n r\"\"\":class:`Arg`\\s compare equal of they are defined on the same data,\n use the same :class:`Map` with the same index and the same access\n descriptor.\"\"\"\n return self._key == other._key\n\n def __ne__(self, other):\n r\"\"\":class:`Arg`\\s compare equal of they are defined on the same data,\n use the same :class:`Map` with the same index and the same access\n descriptor.\"\"\"\n return not self.__eq__(other)\n\n def __str__(self):\n return \"OP2 Arg: dat %s, map %s, access %s\" % \\\n (self.data, self._map, self._access)\n\n def __repr__(self):\n return \"Arg(%r, %r, %r)\" % \\\n (self.data, self._map, self._access)\n\n def __iter__(self):\n for arg in self.split:\n yield arg\n\n @utils.cached_property\n def split(self):\n \"\"\"Split a mixed argument into a tuple of constituent arguments.\"\"\"\n if self._is_mixed_dat:\n return tuple(Arg(d, m, self._access)\n for d, m in zip(self.data, self._map))\n elif self._is_mixed_mat:\n rows, cols = self.data.sparsity.shape\n mr, mc = self.map\n return tuple(Arg(self.data[i, j], (mr.split[i], mc.split[j]), self._access)\n for i in range(rows) for j in range(cols))\n else:\n return (self,)\n\n @utils.cached_property\n def name(self):\n \"\"\"The generated argument name.\"\"\"\n return \"arg%d\" % self.position\n\n @utils.cached_property\n def ctype(self):\n \"\"\"String representing the C type of the data in this ``Arg``.\"\"\"\n return self.data.ctype\n\n @utils.cached_property\n def dtype(self):\n \"\"\"Numpy datatype of this Arg\"\"\"\n return self.data.dtype\n\n @utils.cached_property\n def map(self):\n \"\"\"The :class:`Map` via which the data is to be accessed.\"\"\"\n return self._map\n\n @utils.cached_property\n def access(self):\n \"\"\"Access descriptor. One of the constants of type :class:`Access`\"\"\"\n return self._access\n\n @utils.cached_property\n def _is_dat_view(self):\n return isinstance(self.data, DatView)\n\n @utils.cached_property\n def _is_mat(self):\n return isinstance(self.data, AbstractMat)\n\n @utils.cached_property\n def _is_mixed_mat(self):\n return self._is_mat and self.data.sparsity.shape > (1, 1)\n\n @utils.cached_property\n def _is_global(self):\n return isinstance(self.data, Global)\n\n @utils.cached_property\n def _is_global_reduction(self):\n return self._is_global and self._access in {Access.INC, Access.MIN, Access.MAX}\n\n @utils.cached_property\n def _is_dat(self):\n return isinstance(self.data, AbstractDat)\n\n @utils.cached_property\n def _is_mixed_dat(self):\n return isinstance(self.data, MixedDat)\n\n @utils.cached_property\n def _is_mixed(self):\n return self._is_mixed_dat or self._is_mixed_mat\n\n @utils.cached_property\n def _is_direct(self):\n return isinstance(self.data, Dat) and self.map is None\n\n @utils.cached_property\n def _is_indirect(self):\n return isinstance(self.data, Dat) and self.map is not None\n\n @mpi.collective\n def global_to_local_begin(self):\n \"\"\"Begin halo exchange for the argument if a halo update is required.\n Doing halo exchanges only makes sense for :class:`Dat` objects.\n \"\"\"\n assert self._is_dat, \"Doing halo exchanges only makes sense for Dats\"\n if self._is_direct:\n return\n if self.access is not Access.WRITE:\n self.data.global_to_local_begin(self.access)\n\n @mpi.collective\n def global_to_local_end(self):\n \"\"\"Finish halo exchange for the argument if a halo update is required.\n Doing halo exchanges only makes sense for :class:`Dat` objects.\n \"\"\"\n assert self._is_dat, \"Doing halo exchanges only makes sense for Dats\"\n if self._is_direct:\n return\n if self.access is not Access.WRITE:\n self.data.global_to_local_end(self.access)\n\n @mpi.collective\n def local_to_global_begin(self):\n assert self._is_dat, \"Doing halo exchanges only makes sense for Dats\"\n if self._is_direct:\n return\n if self.access in {Access.INC, Access.MIN, Access.MAX}:\n self.data.local_to_global_begin(self.access)\n\n @mpi.collective\n def local_to_global_end(self):\n assert self._is_dat, \"Doing halo exchanges only makes sense for Dats\"\n if self._is_direct:\n return\n if self.access in {Access.INC, Access.MIN, Access.MAX}:\n self.data.local_to_global_end(self.access)\n\n @mpi.collective\n def reduction_begin(self, comm):\n \"\"\"Begin reduction for the argument if its access is INC, MIN, or MAX.\n Doing a reduction only makes sense for :class:`Global` objects.\"\"\"\n assert self._is_global, \\\n \"Doing global reduction only makes sense for Globals\"\n if self.access is not Access.READ:\n if self.access is Access.INC:\n op = mpi.MPI.SUM\n elif self.access is Access.MIN:\n op = mpi.MPI.MIN\n elif self.access is Access.MAX:\n op = mpi.MPI.MAX\n if mpi.MPI.VERSION >= 3:\n self._reduction_req = comm.Iallreduce(self.data._data, self.data._buf, op=op)\n else:\n comm.Allreduce(self.data._data, self.data._buf, op=op)\n\n @mpi.collective\n def reduction_end(self, comm):\n \"\"\"End reduction for the argument if it is in flight.\n Doing a reduction only makes sense for :class:`Global` objects.\"\"\"\n assert self._is_global, \\\n \"Doing global reduction only makes sense for Globals\"\n if self.access is not Access.READ:\n if mpi.MPI.VERSION >= 3:\n self._reduction_req.Wait()\n self._reduction_req = None\n self.data._data[:] = self.data._buf[:]\n\n\nclass JITModule(caching.Cached):\n\n \"\"\"Cached module encapsulating the generated :class:`ParLoop` stub.\n\n .. warning::\n\n Note to implementors. This object is *cached* and therefore\n should not hold any references to objects you might want to be\n collected (such PyOP2 data objects).\"\"\"\n\n _cppargs = []\n _libraries = []\n _system_headers = []\n\n _cache = {}\n\n @classmethod\n def _cache_key(cls, kernel, iterset, *args, **kwargs):\n counter = itertools.count()\n seen = collections.defaultdict(lambda: next(counter))\n key = ((id(mpi.dup_comm(iterset.comm)), ) + kernel._wrapper_cache_key_ + iterset._wrapper_cache_key_\n + (iterset._extruded, (iterset._extruded and iterset.constant_layers), isinstance(iterset, Subset)))\n\n for arg in args:\n key += arg._wrapper_cache_key_\n for map_ in arg.map_tuple:\n key += (seen[map_],)\n\n key += (kwargs.get(\"iterate\", None), cls, conf.configuration[\"simd_width\"])\n\n return key\n\n def __init__(self, kernel, iterset, *args, **kwargs):\n r\"\"\"\n A cached compiled function to execute for a specified par_loop.\n\n See :func:`~.par_loop` for the description of arguments.\n\n .. warning ::\n\n Note to implementors. This object is *cached*, and therefore\n should not hold any long term references to objects that\n you want to be collected. In particular, after the\n ``args`` have been inspected to produce the compiled code,\n they **must not** remain part of the object's slots,\n otherwise they (and the :class:`~.Dat`\\s, :class:`~.Map`\\s\n and :class:`~.Mat`\\s they reference) will never be collected.\n \"\"\"\n # Return early if we were in the cache.\n if self._initialized:\n return\n self.comm = iterset.comm\n self._kernel = kernel\n self._fun = None\n self._iterset = iterset\n self._args = args\n self._iteration_region = kwargs.get('iterate', ALL)\n self._pass_layer_arg = kwargs.get('pass_layer_arg', False)\n # Copy the class variables, so we don't overwrite them\n self._cppargs = copy.deepcopy(type(self)._cppargs)\n self._libraries = copy.deepcopy(type(self)._libraries)\n self._system_headers = copy.deepcopy(type(self)._system_headers)\n if not kwargs.get('delay', False):\n self.compile()\n self._initialized = True\n\n @mpi.collective\n def __call__(self, *args):\n return self._fun(*args)\n\n @utils.cached_property\n def _wrapper_name(self):\n return 'wrap_%s' % self._kernel.name\n\n @utils.cached_property\n def code_to_compile(self):\n from pyop2.codegen.builder import WrapperBuilder\n from pyop2.codegen.rep2loopy import generate\n\n builder = WrapperBuilder(kernel=self._kernel,\n iterset=self._iterset,\n iteration_region=self._iteration_region,\n pass_layer_to_kernel=self._pass_layer_arg)\n for arg in self._args:\n builder.add_argument(arg)\n\n wrapper = generate(builder)\n code = lp.generate_code_v2(wrapper)\n\n if self._kernel._cpp:\n from loopy.codegen.result import process_preambles\n preamble = \"\".join(process_preambles(getattr(code, \"device_preambles\", [])))\n device_code = \"\\n\\n\".join(str(dp.ast) for dp in code.device_programs)\n return preamble + \"\\nextern \\\"C\\\" {\\n\" + device_code + \"\\n}\\n\"\n return code.device_code()\n\n @PETSc.Log.EventDecorator()\n @mpi.collective\n def compile(self):\n # If we weren't in the cache we /must/ have arguments\n if not hasattr(self, '_args'):\n raise RuntimeError(\"JITModule has no args associated with it, should never happen\")\n\n compiler = conf.configuration[\"compiler\"]\n extension = \"cpp\" if self._kernel._cpp else \"c\"\n cppargs = self._cppargs\n cppargs += [\"-I%s/include\" % d for d in utils.get_petsc_dir()] + \\\n [\"-I%s\" % d for d in self._kernel._include_dirs] + \\\n [\"-I%s\" % os.path.abspath(os.path.dirname(__file__))]\n ldargs = [\"-L%s/lib\" % d for d in utils.get_petsc_dir()] + \\\n [\"-Wl,-rpath,%s/lib\" % d for d in utils.get_petsc_dir()] + \\\n [\"-lpetsc\", \"-lm\"] + self._libraries\n ldargs += self._kernel._ldargs\n\n self._fun = compilation.load(self,\n extension,\n self._wrapper_name,\n cppargs=cppargs,\n ldargs=ldargs,\n restype=ctypes.c_int,\n compiler=compiler,\n comm=self.comm)\n # Blow away everything we don't need any more\n del self._args\n del self._kernel\n del self._iterset\n\n @utils.cached_property\n def argtypes(self):\n index_type = dtypes.as_ctypes(dtypes.IntType)\n argtypes = (index_type, index_type)\n argtypes += self._iterset._argtypes_\n for arg in self._args:\n argtypes += arg._argtypes_\n seen = set()\n for arg in self._args:\n maps = arg.map_tuple\n for map_ in maps:\n for k, t in zip(map_._kernel_args_, map_._argtypes_):\n if k in seen:\n continue\n argtypes += (t,)\n seen.add(k)\n return argtypes\n\n\nclass IterationRegion(enum.IntEnum):\n BOTTOM = 1\n TOP = 2\n INTERIOR_FACETS = 3\n ALL = 4\n\n\nON_BOTTOM = IterationRegion.BOTTOM\n\"\"\"Iterate over the cells at the bottom of the column in an extruded mesh.\"\"\"\n\nON_TOP = IterationRegion.TOP\n\"\"\"Iterate over the top cells in an extruded mesh.\"\"\"\n\nON_INTERIOR_FACETS = IterationRegion.INTERIOR_FACETS\n\"\"\"Iterate over the interior facets of an extruded mesh.\"\"\"\n\nALL = IterationRegion.ALL\n\"\"\"Iterate over all cells of an extruded mesh.\"\"\"\n\n\nclass AbstractParLoop(abc.ABC):\n \"\"\"Represents the kernel, iteration space and arguments of a parallel loop\n invocation.\n .. note ::\n Users should not directly construct :class:`ParLoop` objects, but\n use :func:`pyop2.op2.par_loop` instead.\n An optional keyword argument, ``iterate``, can be used to specify\n which region of an :class:`ExtrudedSet` the parallel loop should\n iterate over.\n \"\"\"\n\n @utils.validate_type(('kernel', Kernel, ex.KernelTypeError),\n ('iterset', Set, ex.SetTypeError))\n def __init__(self, kernel, iterset, *args, **kwargs):\n # INCs into globals need to start with zero and then sum back\n # into the input global at the end. This has the same number\n # of reductions but means that successive par_loops\n # incrementing into a global get the \"right\" value in\n # parallel.\n # Don't care about MIN and MAX because they commute with the reduction\n self._reduced_globals = {}\n for i, arg in enumerate(args):\n if arg._is_global_reduction and arg.access == Access.INC:\n glob = arg.data\n tmp = Global(glob.dim, data=np.zeros_like(glob.data_ro), dtype=glob.dtype)\n self._reduced_globals[tmp] = glob\n args[i].data = tmp\n\n # Always use the current arguments, also when we hit cache\n self._actual_args = args\n self._kernel = kernel\n self._is_layered = iterset._extruded\n self._iteration_region = kwargs.get(\"iterate\", None)\n self._pass_layer_arg = kwargs.get(\"pass_layer_arg\", False)\n\n check_iterset(self.args, iterset)\n\n if self._pass_layer_arg:\n if not self._is_layered:\n raise ValueError(\"Can't request layer arg for non-extruded iteration\")\n\n self.iterset = iterset\n self.comm = iterset.comm\n\n for i, arg in enumerate(self._actual_args):\n arg.position = i\n arg.indirect_position = i\n for i, arg1 in enumerate(self._actual_args):\n if arg1._is_dat and arg1._is_indirect:\n for arg2 in self._actual_args[i:]:\n # We have to check for identity here (we really\n # want these to be the same thing, not just look\n # the same)\n if arg2.data is arg1.data and arg2.map is arg1.map:\n arg2.indirect_position = arg1.indirect_position\n\n self.arglist = self.prepare_arglist(iterset, *self.args)\n\n def prepare_arglist(self, iterset, *args):\n \"\"\"Prepare the argument list for calling generated code.\n :arg iterset: The :class:`Set` iterated over.\n :arg args: A list of :class:`Args`, the argument to the :fn:`par_loop`.\n \"\"\"\n return ()\n\n @utils.cached_property\n def num_flops(self):\n iterset = self.iterset\n size = 1\n if iterset._extruded:\n region = self.iteration_region\n layers = np.mean(iterset.layers_array[:, 1] - iterset.layers_array[:, 0])\n if region is ON_INTERIOR_FACETS:\n size = layers - 2\n elif region not in [ON_TOP, ON_BOTTOM]:\n size = layers - 1\n return size * self._kernel.num_flops\n\n def log_flops(self, flops):\n pass\n\n @property\n @mpi.collective\n def _jitmodule(self):\n \"\"\"Return the :class:`JITModule` that encapsulates the compiled par_loop code.\n Return None if the child class should deal with this in another way.\"\"\"\n return None\n\n @utils.cached_property\n def _parloop_event(self):\n return profiling.timed_region(\"ParLoopExecute\")\n\n @mpi.collective\n def compute(self):\n \"\"\"Executes the kernel over all members of the iteration space.\"\"\"\n with self._parloop_event:\n orig_lgmaps = []\n for arg in self.args:\n if arg._is_mat:\n new_state = {Access.INC: Mat.ADD_VALUES,\n Access.WRITE: Mat.INSERT_VALUES}[arg.access]\n for m in arg.data:\n m.change_assembly_state(new_state)\n arg.data.change_assembly_state(new_state)\n # Boundary conditions applied to the matrix appear\n # as modified lgmaps on the Arg. We set them onto\n # the matrix so things are correctly dropped in\n # insertion, and then restore the original lgmaps\n # afterwards.\n if arg.lgmaps is not None:\n olgmaps = []\n for m, lgmaps in zip(arg.data, arg.lgmaps):\n olgmaps.append(m.handle.getLGMap())\n m.handle.setLGMap(*lgmaps)\n orig_lgmaps.append(olgmaps)\n self.global_to_local_begin()\n iterset = self.iterset\n arglist = self.arglist\n fun = self._jitmodule\n # Need to ensure INC globals are zero on entry to the loop\n # in case it's reused.\n for g in self._reduced_globals.keys():\n g._data[...] = 0\n self._compute(iterset.core_part, fun, *arglist)\n self.global_to_local_end()\n self._compute(iterset.owned_part, fun, *arglist)\n self.reduction_begin()\n self.local_to_global_begin()\n self.update_arg_data_state()\n for arg in reversed(self.args):\n if arg._is_mat and arg.lgmaps is not None:\n for m, lgmaps in zip(arg.data, orig_lgmaps.pop()):\n m.handle.setLGMap(*lgmaps)\n self.reduction_end()\n self.local_to_global_end()\n\n @mpi.collective\n def _compute(self, part, fun, *arglist):\n \"\"\"Executes the kernel over all members of a MPI-part of the iteration space.\n :arg part: The :class:`SetPartition` to compute over\n :arg fun: The :class:`JITModule` encapsulating the compiled\n code (may be ignored by the backend).\n :arg arglist: The arguments to pass to the compiled code (may\n be ignored by the backend, depending on the exact implementation)\"\"\"\n raise RuntimeError(\"Must select a backend\")\n\n @mpi.collective\n def global_to_local_begin(self):\n \"\"\"Start halo exchanges.\"\"\"\n for arg in self.unique_dat_args:\n arg.global_to_local_begin()\n\n @mpi.collective\n def global_to_local_end(self):\n \"\"\"Finish halo exchanges\"\"\"\n for arg in self.unique_dat_args:\n arg.global_to_local_end()\n\n @mpi.collective\n def local_to_global_begin(self):\n \"\"\"Start halo exchanges.\"\"\"\n for arg in self.unique_dat_args:\n arg.local_to_global_begin()\n\n @mpi.collective\n def local_to_global_end(self):\n \"\"\"Finish halo exchanges (wait on irecvs)\"\"\"\n for arg in self.unique_dat_args:\n arg.local_to_global_end()\n\n @utils.cached_property\n def _reduction_event_begin(self):\n return profiling.timed_region(\"ParLoopRednBegin\")\n\n @utils.cached_property\n def _reduction_event_end(self):\n return profiling.timed_region(\"ParLoopRednEnd\")\n\n @utils.cached_property\n def _has_reduction(self):\n return len(self.global_reduction_args) > 0\n\n @mpi.collective\n def reduction_begin(self):\n \"\"\"Start reductions\"\"\"\n if not self._has_reduction:\n return\n with self._reduction_event_begin:\n for arg in self.global_reduction_args:\n arg.reduction_begin(self.comm)\n\n @mpi.collective\n def reduction_end(self):\n \"\"\"End reductions\"\"\"\n if not self._has_reduction:\n return\n with self._reduction_event_end:\n for arg in self.global_reduction_args:\n arg.reduction_end(self.comm)\n # Finalise global increments\n for tmp, glob in self._reduced_globals.items():\n glob._data += tmp._data\n\n @mpi.collective\n def update_arg_data_state(self):\n r\"\"\"Update the state of the :class:`DataCarrier`\\s in the arguments to the `par_loop`.\n This marks :class:`Mat`\\s that need assembly.\"\"\"\n for arg in self.args:\n access = arg.access\n if access is Access.READ:\n continue\n if arg._is_dat:\n arg.data.halo_valid = False\n if arg._is_mat:\n state = {Access.WRITE: Mat.INSERT_VALUES,\n Access.INC: Mat.ADD_VALUES}[access]\n arg.data.assembly_state = state\n\n @utils.cached_property\n def dat_args(self):\n return tuple(arg for arg in self.args if arg._is_dat)\n\n @utils.cached_property\n def unique_dat_args(self):\n seen = {}\n unique = []\n for arg in self.dat_args:\n if arg.data not in seen:\n unique.append(arg)\n seen[arg.data] = arg\n elif arg.access != seen[arg.data].access:\n raise ValueError(\"Same Dat appears multiple times with different \"\n \"access descriptors\")\n return tuple(unique)\n\n @utils.cached_property\n def global_reduction_args(self):\n return tuple(arg for arg in self.args if arg._is_global_reduction)\n\n @utils.cached_property\n def kernel(self):\n \"\"\"Kernel executed by this parallel loop.\"\"\"\n return self._kernel\n\n @utils.cached_property\n def args(self):\n \"\"\"Arguments to this parallel loop.\"\"\"\n return self._actual_args\n\n @utils.cached_property\n def is_layered(self):\n \"\"\"Flag which triggers extrusion\"\"\"\n return self._is_layered\n\n @utils.cached_property\n def iteration_region(self):\n \"\"\"Specifies the part of the mesh the parallel loop will\n be iterating over. The effect is the loop only iterates over\n a certain part of an extruded mesh, for example on top cells, bottom cells or\n interior facets.\"\"\"\n return self._iteration_region\n\n\nclass ParLoop(AbstractParLoop):\n\n def log_flops(self, flops):\n PETSc.Log.logFlops(flops)\n\n def prepare_arglist(self, iterset, *args):\n arglist = iterset._kernel_args_\n for arg in args:\n arglist += arg._kernel_args_\n seen = set()\n for arg in args:\n maps = arg.map_tuple\n for map_ in maps:\n if map_ is None:\n continue\n for k in map_._kernel_args_:\n if k in seen:\n continue\n arglist += (k,)\n seen.add(k)\n return arglist\n\n @utils.cached_property\n def _jitmodule(self):\n return JITModule(self.kernel, self.iterset, *self.args,\n iterate=self.iteration_region,\n pass_layer_arg=self._pass_layer_arg)\n\n @utils.cached_property\n def _compute_event(self):\n return profiling.timed_region(\"ParLoop_{0}_{1}\".format(self.iterset.name, self._jitmodule._wrapper_name))\n\n @mpi.collective\n def _compute(self, part, fun, *arglist):\n with self._compute_event:\n self.log_flops(part.size * self.num_flops)\n fun(part.offset, part.offset + part.size, *arglist)\n\n\nclass PyParLoop(AbstractParLoop):\n \"\"\"A stub implementation of \"Python\" parallel loops.\n\n This basically executes a python function over the iteration set,\n feeding it the appropriate data for each set entity.\n\n Example usage::\n\n .. code-block:: python\n\n s = op2.Set(10)\n d = op2.Dat(s)\n d2 = op2.Dat(s**2)\n\n m = op2.Map(s, s, 2, np.dstack(np.arange(4),\n np.roll(np.arange(4), -1)))\n\n def fn(x, y):\n x[0] = y[0]\n x[1] = y[1]\n\n d.data[:] = np.arange(4)\n\n op2.par_loop(fn, s, d2(op2.WRITE), d(op2.READ, m))\n\n print d2.data\n # [[ 0. 1.]\n # [ 1. 2.]\n # [ 2. 3.]\n # [ 3. 0.]]\n\n def fn2(x, y):\n x[0] += y[0]\n x[1] += y[0]\n\n op2.par_loop(fn, s, d2(op2.INC), d(op2.READ, m[1]))\n\n print d2.data\n # [[ 1. 2.]\n # [ 3. 4.]\n # [ 5. 6.]\n # [ 3. 0.]]\n \"\"\"\n def __init__(self, kernel, *args, **kwargs):\n if not isinstance(kernel, types.FunctionType):\n raise ValueError(\"Expecting a python function, not a %r\" % type(kernel))\n super().__init__(PyKernel(kernel), *args, **kwargs)\n\n def _compute(self, part, *arglist):\n if part.set._extruded:\n raise NotImplementedError\n subset = isinstance(self.iterset, Subset)\n\n def arrayview(array, access):\n array = array.view()\n array.setflags(write=(access is not Access.READ))\n return array\n\n # Just walk over the iteration set\n for e in range(part.offset, part.offset + part.size):\n args = []\n if subset:\n idx = self.iterset._indices[e]\n else:\n idx = e\n for arg in self.args:\n if arg._is_global:\n args.append(arrayview(arg.data._data, arg.access))\n elif arg._is_direct:\n args.append(arrayview(arg.data._data[idx, ...], arg.access))\n elif arg._is_indirect:\n args.append(arrayview(arg.data._data[arg.map.values_with_halo[idx], ...], arg.access))\n elif arg._is_mat:\n if arg.access not in {Access.INC, Access.WRITE}:\n raise NotImplementedError\n if arg._is_mixed_mat:\n raise ValueError(\"Mixed Mats must be split before assembly\")\n shape = tuple(map(operator.attrgetter(\"arity\"), arg.map_tuple))\n args.append(np.zeros(shape, dtype=arg.data.dtype))\n if args[-1].shape == ():\n args[-1] = args[-1].reshape(1)\n self._kernel(*args)\n for arg, tmp in zip(self.args, args):\n if arg.access is Access.READ:\n continue\n if arg._is_global:\n arg.data._data[:] = tmp[:]\n elif arg._is_direct:\n arg.data._data[idx, ...] = tmp[:]\n elif arg._is_indirect:\n arg.data._data[arg.map.values_with_halo[idx], ...] = tmp[:]\n elif arg._is_mat:\n if arg.access is Access.INC:\n arg.data.addto_values(arg.map[0].values_with_halo[idx],\n arg.map[1].values_with_halo[idx],\n tmp)\n elif arg.access is Access.WRITE:\n arg.data.set_values(arg.map[0].values_with_halo[idx],\n arg.map[1].values_with_halo[idx],\n tmp)\n\n for arg in self.args:\n if arg._is_mat and arg.access is not Access.READ:\n # Queue up assembly of matrix\n arg.data.assemble()\n\n\ndef check_iterset(args, iterset):\n \"\"\"Checks that the iteration set of the :class:`ParLoop` matches the\n iteration set of all its arguments. A :class:`MapValueError` is raised\n if this condition is not met.\"\"\"\n\n if isinstance(iterset, Subset):\n _iterset = iterset.superset\n else:\n _iterset = iterset\n if conf.configuration[\"type_check\"]:\n if isinstance(_iterset, MixedSet):\n raise ex.SetTypeError(\"Cannot iterate over MixedSets\")\n for i, arg in enumerate(args):\n if arg._is_global:\n continue\n if arg._is_direct:\n if isinstance(_iterset, ExtrudedSet):\n if arg.data.dataset.set != _iterset.parent:\n raise ex.MapValueError(\n \"Iterset of direct arg %s doesn't match ParLoop iterset.\" % i)\n elif arg.data.dataset.set != _iterset:\n raise ex.MapValueError(\n \"Iterset of direct arg %s doesn't match ParLoop iterset.\" % i)\n continue\n for j, m in enumerate(arg._map):\n if isinstance(_iterset, ExtrudedSet):\n if m.iterset != _iterset and m.iterset not in _iterset:\n raise ex.MapValueError(\n \"Iterset of arg %s map %s doesn't match ParLoop iterset.\" % (i, j))\n elif m.iterset != _iterset and m.iterset not in _iterset:\n raise ex.MapValueError(\n \"Iterset of arg %s map %s doesn't match ParLoop iterset.\" % (i, j))\n\n\[email protected]\ndef par_loop(kernel, iterset, *args, **kwargs):\n r\"\"\"Invocation of an OP2 kernel\n\n :arg kernel: The :class:`Kernel` to be executed.\n :arg iterset: The iteration :class:`Set` over which the kernel should be\n executed.\n :arg \\*args: One or more :class:`base.Arg`\\s constructed from a\n :class:`Global`, :class:`Dat` or :class:`Mat` using the call\n syntax and passing in an optionally indexed :class:`Map`\n through which this :class:`base.Arg` is accessed and the\n :class:`base.Access` descriptor indicating how the\n :class:`Kernel` is going to access this data (see the example\n below). These are the global data structures from and to\n which the kernel will read and write.\n :kwarg iterate: Optionally specify which region of an\n :class:`ExtrudedSet` to iterate over.\n Valid values are:\n\n - ``ON_BOTTOM``: iterate over the bottom layer of cells.\n - ``ON_TOP`` iterate over the top layer of cells.\n - ``ALL`` iterate over all cells (the default if unspecified)\n - ``ON_INTERIOR_FACETS`` iterate over all the layers\n except the top layer, accessing data two adjacent (in\n the extruded direction) cells at a time.\n\n :kwarg pass_layer_arg: Should the wrapper pass the current layer\n into the kernel (as an ``int``). Only makes sense for\n indirect extruded iteration.\n\n .. warning ::\n It is the caller's responsibility that the number and type of all\n :class:`base.Arg`\\s passed to the :func:`par_loop` match those expected\n by the :class:`Kernel`. No runtime check is performed to ensure this!\n\n :func:`par_loop` invocation is illustrated by the following example ::\n\n pyop2.par_loop(mass, elements,\n mat(pyop2.INC, (elem_node[pyop2.i[0]]), elem_node[pyop2.i[1]]),\n coords(pyop2.READ, elem_node))\n\n This example will execute the :class:`Kernel` ``mass`` over the\n :class:`Set` ``elements`` executing 3x3 times for each\n :class:`Set` member, assuming the :class:`Map` ``elem_node`` is of arity 3.\n The :class:`Kernel` takes four arguments, the first is a :class:`Mat` named\n ``mat``, the second is a field named ``coords``. The remaining two arguments\n indicate which local iteration space point the kernel is to execute.\n\n A :class:`Mat` requires a pair of :class:`Map` objects, one each\n for the row and column spaces. In this case both are the same\n ``elem_node`` map. The row :class:`Map` is indexed by the first\n index in the local iteration space, indicated by the ``0`` index\n to :data:`pyop2.i`, while the column space is indexed by\n the second local index. The matrix is accessed to increment\n values using the ``pyop2.INC`` access descriptor.\n\n The ``coords`` :class:`Dat` is also accessed via the ``elem_node``\n :class:`Map`, however no indices are passed so all entries of\n ``elem_node`` for the relevant member of ``elements`` will be\n passed to the kernel as a vector.\n \"\"\"\n if isinstance(kernel, types.FunctionType):\n return PyParLoop(kernel, iterset, *args, **kwargs).compute()\n return ParLoop(kernel, iterset, *args, **kwargs).compute()\n\n\ndef generate_single_cell_wrapper(iterset, args, forward_args=(), kernel_name=None, wrapper_name=None):\n \"\"\"Generates wrapper for a single cell. No iteration loop, but cellwise data is extracted.\n Cell is expected as an argument to the wrapper. For extruded, the numbering of the cells\n is columnwise continuous, bottom to top.\n\n :param iterset: The iteration set\n :param args: :class:`Arg`s\n :param forward_args: To forward unprocessed arguments to the kernel via the wrapper,\n give an iterable of strings describing their C types.\n :param kernel_name: Kernel function name\n :param wrapper_name: Wrapper function name\n\n :return: string containing the C code for the single-cell wrapper\n \"\"\"\n from pyop2.codegen.builder import WrapperBuilder\n from pyop2.codegen.rep2loopy import generate\n from loopy.types import OpaqueType\n\n forward_arg_types = [OpaqueType(fa) for fa in forward_args]\n empty_kernel = Kernel(\"\", kernel_name)\n builder = WrapperBuilder(kernel=empty_kernel,\n iterset=iterset, single_cell=True,\n forward_arg_types=forward_arg_types)\n for arg in args:\n builder.add_argument(arg)\n wrapper = generate(builder, wrapper_name)\n code = lp.generate_code_v2(wrapper)\n\n return code.device_code()\n"
] |
[
[
"numpy.asarray",
"numpy.dtype",
"numpy.full",
"numpy.prod",
"numpy.zeros"
],
[
"numpy.mean",
"numpy.zeros_like",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
laochonlam/dali_backend
|
[
"461fe528d42a6ba48baa95c4b817cc757c351f55"
] |
[
"qa/L0_DALI_GPU_ensemble/client.py"
] |
[
"# The MIT License (MIT)\n#\n# Copyright (c) 2021 NVIDIA CORPORATION\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport numpy as np\nfrom dali_backend.test_utils.client import TestClient\nfrom numpy.random import randint, random\nimport argparse\n\n# TODO: Use actual DALI pipelines to calculate ground truth\ndef ref_func(inp1, inp2):\n return inp1 * 2 / 3, (inp2 * 3).astype(np.half).astype(np.single) / 2\n\ndef random_gen(max_batch_size):\n while True:\n size1 = randint(100, 300)\n size2 = randint(100, 300)\n bs = randint(1, max_batch_size + 1)\n yield random((bs, size1)).astype(np.single), \\\n random((bs, size2)).astype(np.single)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8001',\n help='Inference server GRPC URL. Default is localhost:8001.')\n parser.add_argument('-n', '--n_iters', type=int, required=False, default=1, help='Number of iterations')\n parser.add_argument('-c', '--concurrency', type=int, required=False, default=1,\n help='Request concurrency level')\n parser.add_argument('-b', '--max_batch_size', type=int, required=False, default=256)\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n client = TestClient('dali_ensemble', ['INPUT_0', 'INPUT_1'], ['OUTPUT_0', 'OUTPUT_1'], args.url,\n concurrency=args.concurrency)\n client.run_tests(random_gen(args.max_batch_size), ref_func,\n n_infers=args.n_iters, eps=1e-4)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.random",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sjvrijn/ConfiguringCMAES
|
[
"ff48885cbe15b86b90edd0279f6bdf8a43dfd85d"
] |
[
"bbob/bbob_pproc/ppconverrorbars.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Process data and generates some comparison results.\n\n Synopsis:\n python path_to_folder/bbob_pproc/runcompall.py [OPTIONS] FOLDER_NAME...\n\n Help:\n python path_to_folder/bbob_pproc/runcompall.py -h\n\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os, sys\nimport warnings\nimport numpy\nfrom pdb import set_trace\n\n# Add the path to bbob_pproc\nif __name__ == \"__main__\":\n # append path without trailing '/bbob_pproc', using os.sep fails in mingw32\n #sys.path.append(filepath.replace('\\\\', '/').rsplit('/', 1)[0])\n (filepath, filename) = os.path.split(sys.argv[0])\n #Test system independent method:\n sys.path.append(os.path.join(filepath, os.path.pardir))\n import matplotlib\n matplotlib.use('Agg') # To avoid window popup and use without X forwarding\n\nfrom bbob.bbob_pproc import genericsettings, pproc\nfrom bbob.bbob_pproc.pproc import DataSetList\nfrom bbob.bbob_pproc.ppfig import saveFigure\nfrom bbob.bbob_pproc.toolsstats import prctile\n\nimport matplotlib.pyplot as plt\n\nwarned = False # print just one warning and set to True\n\n#FUNCTION DEFINITIONS\n\n\ndef rearrange(blist, flist):\n \"\"\"Alligns the number of evaluations taken from the blist with the correpsning flist\"\"\"\n final_b=[]\n final_f=[]\n for i in range(0,len(blist)): #runs over dimensions\n erg_b = numpy.empty((0), float)\n erg_f = [numpy.empty ((0), float), numpy.empty ((0), float), numpy.empty ((0), float)]\n for j in range(0,len(blist[i])): #runs over function evaluations\n erg_b=numpy.append(erg_b,blist[i][j])\n erg_f[0]=numpy.append(erg_f[0],numpy.median(flist[i][j]))\n erg_f[1]=numpy.append(erg_f[1],prctile(flist[i][j], [0.25]))\n erg_f[2]=numpy.append(erg_f[2],prctile(flist[i][j], [0.75]))\n final_b.append(erg_b)\n final_f.append(erg_f)\n return final_b, final_f\n\ndef main(dictAlg, outputdir='.', verbose=True):\n \"\"\"Main routine for generating convergence plots\n\n \"\"\"\n global warned # bind variable warned into this scope\n dictFun = pproc.dictAlgByFun(dictAlg)\n for l in dictFun: # l appears to be the function id!?\n for i in dictFun[l]: # please, what is i??? appears to be the algorithm-key\n plt.figure()\n if type(i) in (list, tuple):\n figurename = \"ppconv_plot_\" + i[0] + \"_f\" + str(l)\n else:\n try:\n figurename = \"ppconv_plot_\" + dictFun[l][i].algId + \"_f\" + str(l)\n except AttributeError: # this is a (rather desperate) bug-fix attempt that works for the unit test\n figurename = \"ppconv_plot_\" + dictFun[l][i][0].algId + \"_f\" + str(l)\n plt.xlabel('number of function evaluations / dimension')\n plt.ylabel('Median of fitness')\n plt.grid()\n ax = plt.gca()\n ax.set_yscale(\"log\")\n ax.set_xscale(\"log\")\n for j in dictFun[l][i]: # please, what is j??? a dataset\n dimList_b = []\n dimList_f = []\n dimList_b.append(j.funvals[:,0])\n dimList_f.append(j.funvals[:,1:])\n bs, fs= rearrange(dimList_b, dimList_f)\n labeltext=str(j.dim)+\"D\"\n try:\n if 11 < 3:\n plt.errorbar(bs[0] / j.dim, fs[0][0], yerr = [fs[0][1], fs[0][2]], label = labeltext)\n else:\n plt.errorbar(bs[0] / j.dim, fs[0][0], label = labeltext)\n except FloatingPointError: # that's a bit of a hack\n if not warned:\n print('Warning: floating point error when plotting errorbars, ignored')\n warned = True\n plt.legend(loc=3)\n saveFigure(os.path.join(outputdir, figurename.replace(' ','')), genericsettings.fig_formats, verbose=verbose)\n plt.close()\n print(\"Convergence plots done.\")\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.use",
"numpy.median",
"numpy.append",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
842974287/glow
|
[
"7d77eb9a1c00dbba77321f62ad9c9078beb2b725"
] |
[
"torch_glow/tests/nodes/add_test.py"
] |
[
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport torch\nfrom tests.utils import jitVsGlow\n\n\nclass TestAdd(unittest.TestCase):\n def test_add_basic(self):\n \"\"\"Basic test of the PyTorch add Node on Glow.\"\"\"\n\n def test_f(a, b):\n c = a.add(b)\n return c.add(c)\n\n x = torch.randn(4)\n y = torch.randn(4)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::add\"})\n\n def test_add_inplace(self):\n \"\"\"Test of the PyTorch add_ Node on Glow.\"\"\"\n\n def test_f(a, b):\n c = a.add_(b)\n return c.add_(c)\n\n x = torch.randn(4)\n y = torch.randn(4)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::add_\"})\n\n def test_add_broadcast_1(self):\n \"\"\"Test of the PyTorch add Node on Glow with broadcasting.\"\"\"\n\n def test_f(a, b):\n c = a.add(b)\n return c.add(c)\n\n x = torch.randn(8, 3, 4, 2)\n y = torch.randn(4, 2)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::add\"})\n\n def test_add_broadcast_2(self):\n \"\"\"Test of the PyTorch add Node on Glow with broadcasting.\"\"\"\n\n def test_f(a, b):\n c = a.add(b)\n return c.add(c)\n\n x = torch.randn(8, 3, 4, 2)\n y = torch.randn(1, 2)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::add\"})\n\n def test_add_broadcast_3(self):\n \"\"\"Test of the PyTorch add Node on Glow with broadcasting.\"\"\"\n\n def test_f(a, b):\n c = a.add(b)\n return c.add(c)\n\n x = torch.randn(4, 2)\n y = torch.randn(8, 3, 4, 2)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::add\"})\n\n def test_add_float(self):\n \"\"\"Test of the PyTorch aten::add Node with a float argument\"\"\"\n\n def test_f(a):\n return (a * a).add(3.9)\n\n x = torch.randn(4)\n\n jitVsGlow(test_f, x, expected_fused_ops={\"aten::add\"})\n\n def test_add_int(self):\n \"\"\"Test of the PyTorch aten::add Node with an int argument\"\"\"\n\n def test_f(a):\n return (a * a).add(20)\n\n x = torch.randn(4)\n\n jitVsGlow(test_f, x, expected_fused_ops={\"aten::add\"})\n"
] |
[
[
"torch.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
loic-beheshti/pytorch-lightning
|
[
"6ac16ff34822cef9b3c16e54f872655b585a066a"
] |
[
"pytorch_lightning/trainer/training_loop.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\nfrom typing import Any, Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType\nfrom pytorch_lightning.utilities.distributed import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.finite_checks import detect_nan_parameters\nfrom pytorch_lightning.utilities.grads import grad_norm\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(\n self,\n trainer,\n multiple_trainloader_mode: str,\n max_epochs: Optional[int],\n min_epochs: Optional[int],\n max_steps: Optional[int],\n min_steps: Optional[int],\n num_sanity_val_steps: int,\n ):\n self.trainer = trainer\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n self._optimizer_freq_cumsum = None\n\n self.global_step = 0\n self.current_epoch = 0\n self.trainer.should_stop = False\n\n self.total_batch_idx = 0\n self.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.max_steps = max_steps\n self.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n @property\n def optimizer_freq_cumsum(self):\n if self._optimizer_freq_cumsum is None:\n self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n return self._optimizer_freq_cumsum\n\n def should_skip_training(self) -> bool:\n should_by_max_steps = self.max_steps is not None and self.global_step >= self.max_steps\n should_by_epoch = self.max_epochs is not None and self.current_epoch >= self.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator.on_train_end()\n\n # reset bookkeeping\n self.trainer.state.stage = None\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last and cb.verbose for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.lightning_module\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.current_epoch = epoch\n\n model = self.trainer.lightning_module\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]\n\n processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)\n\n # hook\n self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model) -> None:\n \"\"\"\n Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n \"\"\"\n if self.trainer.train_dataloader is None:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n hook_overridden = self._should_add_batch_output_to_epoch_output()\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def _should_add_batch_output_to_epoch_output(self) -> bool:\n # We add to the epoch outputs if\n # 1. The model defines training_epoch_end OR\n # 2. The model overrides on_train_epoch_end which has `outputs` in the signature\n # TODO: in v1.5 this only needs to check if training_epoch_end is overridden\n lightning_module = self.trainer.lightning_module\n if is_overridden(\"training_epoch_end\", model=lightning_module):\n return True\n\n if is_overridden(\"on_train_epoch_end\", model=lightning_module):\n model_hook_fx = getattr(lightning_module, \"on_train_epoch_end\")\n if is_param_in_hook_signature(model_hook_fx, \"outputs\"):\n return True\n\n return False\n\n def get_optimizers_iterable(self, batch_idx=None):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n if batch_idx is None:\n batch_idx = self.total_batch_idx\n\n optimizers_loop_length = self.optimizer_freq_cumsum[-1]\n current_place_in_loop = batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n training_step_output.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.lightning_module\n\n with self.trainer.profiler.profile(\"model_forward\"):\n step_kwargs = self._build_kwargs(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator.training_step(step_kwargs)\n self.trainer.accelerator.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n if training_step_output_for_epoch_end is None:\n return\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.lightning_module.automatic_optimization:\n # accumulate loss. if accumulate_grad_batches==1, no effect\n closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n result = self.trainer.lightning_module._results\n\n loss = None\n hiddens = None\n result[\"extra\"] = {}\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n if hiddens is not None:\n hiddens = hiddens.detach()\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n\n # map to results under the hood\n result.minimize = loss\n self.trainer.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()\n\n return training_step_output_for_epoch_end, result\n\n @staticmethod\n def _prepare_outputs(\n outputs: List[List[List[Result]]],\n batch_mode: bool,\n ) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:\n \"\"\"\n Extract required information from batch or epoch end results.\n\n Args:\n outputs: A 3-dimensional list of ``Result`` objects with dimensions:\n [optimizer outs][batch outs][tbptt steps].\n\n batch_mode: If True, ignore the batch output dimension.\n\n Returns:\n The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will\n be collapsed.\n \"\"\"\n processed_outputs = []\n for opt_outputs in outputs:\n # handle an edge case where an optimizer output is the empty list\n if len(opt_outputs) == 0:\n continue\n\n processed_batch_outputs = []\n\n if batch_mode:\n opt_outputs = [opt_outputs]\n\n for batch_outputs in opt_outputs:\n processed_tbptt_outputs = []\n\n for tbptt_output in batch_outputs:\n out = tbptt_output.extra\n out['loss'] = tbptt_output.minimize\n processed_tbptt_outputs.append(out)\n\n # if there was only one tbptt step then we can collapse that dimension\n if len(processed_tbptt_outputs) == 1:\n processed_tbptt_outputs = processed_tbptt_outputs[0]\n processed_batch_outputs.append(processed_tbptt_outputs)\n\n # batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer\n if batch_mode:\n processed_batch_outputs = processed_batch_outputs[0]\n processed_outputs.append(processed_batch_outputs)\n\n # if there is only one optimiser then we collapse that dimension\n if len(processed_outputs) == 1:\n processed_outputs = processed_outputs[0]\n return processed_outputs\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.lightning_module\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer) -> dict:\n # track gradient norms\n grad_norm_dict = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator.clip_gradients(\n optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm\n )\n return grad_norm_dict\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.lightning_module\n grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def _tbptt_split_batch(self, batch: Any) -> List[Any]:\n splits = [batch]\n truncated_bptt_enabled = self._truncated_bptt_enabled()\n if truncated_bptt_enabled:\n model_ref = self.trainer.lightning_module\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n val_loop_called = False\n\n batch_idx = None\n is_last_batch = None\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n self.batch_idx = batch_idx\n self.trainer.is_last_batch = is_last_batch\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(\n epoch_output,\n batch_output.training_step_output_for_epoch_end,\n batch,\n batch_idx,\n dataloader_idx,\n )\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED\n # -----------------------------------------\n should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.validating = True\n self.trainer._run_evaluation()\n self.trainer.training = True\n val_loop_called = True\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.max_steps is not None and self.max_steps <= self.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n if batch_idx is None:\n # dataloader/iterator did not produce a batch\n return\n\n # handle epoch_output on epoch end\n self.on_train_epoch_end(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)\n\n should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n # update epoch level lr_schedulers if no val loop outside train loop is triggered\n if (val_loop_called and not should_check_val) or should_train_only:\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n\n if should_train_only:\n self.check_checkpoint_callback(True)\n\n if should_check_val:\n self.trainer.validating = True\n self.trainer._run_evaluation(on_epoch=True)\n self.trainer.training = True\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n # prepare epoch output\n processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)\n\n # get the model and call model.training_epoch_end\n model = self.trainer.lightning_module\n\n if is_overridden('training_epoch_end', model=model):\n # run training_epoch_end\n # refresh the result for custom logging at the epoch level\n model._current_fx_name = 'training_epoch_end'\n\n # lightningmodule hook\n training_epoch_end_output = model.training_epoch_end(processed_epoch_output)\n\n if training_epoch_end_output is not None:\n raise MisconfigurationException(\n 'training_epoch_end expects a return of None. '\n 'HINT: remove the return statement in training_epoch_end'\n )\n\n # capture logging\n self.trainer.logger_connector.cache_logged_metrics()\n\n # call train epoch end hooks\n self._on_train_epoch_end_hook(processed_epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:\n # We cannot rely on Trainer.call_hook because the signatures might be different across\n # lightning module and callback\n # As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`\n\n # This implementation is copied from Trainer.call_hook\n hook_name = \"on_train_epoch_end\"\n\n # set hook_name to model + reset Result obj\n skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)\n\n # always profile hooks\n with self.trainer.profiler.profile(hook_name):\n\n # first call trainer hook\n if hasattr(self.trainer, hook_name):\n trainer_hook = getattr(self.trainer, hook_name)\n trainer_hook(processed_epoch_output)\n\n # next call hook in lightningModule\n model_ref = self.trainer.lightning_module\n if is_overridden(hook_name, model_ref):\n hook_fx = getattr(model_ref, hook_name)\n if is_param_in_hook_signature(hook_fx, \"outputs\"):\n self.warning_cache.warn(\n \"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3.\"\n \" `outputs` parameter has been deprecated.\"\n \" Support for the old signature will be removed in v1.5\", DeprecationWarning\n )\n model_ref.on_train_epoch_end(processed_epoch_output)\n else:\n model_ref.on_train_epoch_end()\n\n # if the PL module doesn't have the hook then call the accelerator\n # used to auto-reduce things for the user with Results obj\n elif hasattr(self.trainer.accelerator, hook_name):\n accelerator_hook = getattr(self.trainer.accelerator, hook_name)\n accelerator_hook()\n\n if not skip:\n self.trainer._cache_logged_metrics()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dict = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n optimizers = self.prepare_optimizers()\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(optimizers))]\n\n if batch is None:\n self.warning_cache.warn(\"train_dataloader yielded None. If this was on purpose, ignore this warning...\")\n return AttributeDict(\n signal=0,\n grad_norm_dict={},\n training_step_output_for_epoch_end=batch_outputs,\n )\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dict={})\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dict={})\n\n # lightning module hook\n splits = self._tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in optimizers:\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n result = AttributeDict()\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n else:\n if self.trainer.lightning_module.automatic_optimization:\n\n def train_step_and_backward_closure():\n nonlocal result\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n result = self.training_step(split_batch, batch_idx, opt_idx, self.trainer.hiddens)\n\n if not result:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dict = result.get(\"grad_norm_dict\", {})\n\n # update running loss + reset accumulated loss\n self.update_running_loss(result.loss)\n\n batch_outputs = self._process_closure_result(\n opt_closure_result=result,\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n result = AttributeDict(\n signal=0,\n grad_norm_dict=grad_norm_dict,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.trainer.lightning_module.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(\n self, opt_closure_result: Optional[AttributeDict], batch_outputs: list, opt_idx: int\n ) -> list:\n if opt_closure_result:\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"Wrap forward, zero_grad and backward in a closure so second order methods work\"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n\n if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:\n is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0\n\n if is_first_batch_to_accumulate:\n self.on_before_zero_grad(optimizer)\n self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)\n\n # backward pass\n if result is not None:\n with self.trainer.profiler.profile(\"backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(result.loss)\n\n else:\n self.warning_cache.warn(\n \"training_step returned None. If this was on purpose, ignore this warning...\"\n )\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.lightning_module.untoggle_optimizer(opt_idx)\n\n return result\n\n def _check_finite(self, loss: torch.Tensor) -> None:\n if not torch.isfinite(loss).all():\n raise ValueError(f'The loss returned in `training_step` is {loss}.')\n model = self.trainer.lightning_module\n detect_nan_parameters(model)\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n result.grad_norm_dict = self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(\n interval=\"step\",\n monitor_metrics=monitor_metrics,\n opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],\n )\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.global_step = self.trainer.accelerator.update_global_step(self.total_batch_idx, self.global_step)\n\n def _accumulated_batches_reached(self):\n return (self.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:\n \"\"\" Decide if we should run validation. \"\"\"\n\n if not self.trainer.enable_validation:\n return False\n\n # check if this epoch is eligible to run validation\n if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:\n return False\n\n # val_check_batch is inf for iterable datasets with no length defined\n # TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch\n is_val_check_batch = False\n if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):\n is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0\n elif self.trainer.val_check_batch != float('inf'):\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n\n # Note: num_training_batches is also inf for iterable datasets with no length defined\n epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n\n if on_epoch:\n return (\n is_val_check_batch and epoch_end_val_check\n ) or self.trainer.should_stop or is_last_batch_for_infinite_dataset\n else:\n return is_val_check_batch and not epoch_end_val_check\n\n def _build_kwargs(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)])\n\n lightning_module = self.trainer.lightning_module\n\n if len(self.trainer.optimizers) > 1:\n training_step_fx = getattr(lightning_module, \"training_step\")\n has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, \"optimizer_idx\")\n if has_opt_idx_in_train_step:\n if not lightning_module.automatic_optimization:\n self.warning_cache.warn(\n \"`training_step` hook signature has changed in v1.3.\"\n \" `optimizer_idx` argument has been removed in case of manual optimization. Support for\"\n \" the old signature will be removed in v1.5\", DeprecationWarning\n )\n step_kwargs['optimizer_idx'] = opt_idx\n elif not has_opt_idx_in_train_step and self.trainer.lightning_module.automatic_optimization:\n raise ValueError(\n f\"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but\"\n ' `training_step` is missing the `optimizer_idx` argument.'\n )\n\n # pass hiddens if using tbptt\n if self._truncated_bptt_enabled():\n step_kwargs['hiddens'] = hiddens\n\n return step_kwargs\n\n def _truncated_bptt_enabled(self) -> bool:\n \"\"\" Temporary tbptt utilities until this flag is fully migrated to the lightning module. \"\"\"\n return self._truncated_bptt_steps() > 0\n\n def _truncated_bptt_steps(self) -> int:\n lightning_module = self.trainer.lightning_module\n # Give precedence to the LightningModule as the Trainer flag will be removed in v1.5\n if lightning_module.truncated_bptt_steps > 0:\n return lightning_module.truncated_bptt_steps\n return self.trainer.truncated_bptt_steps or 0\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.trainer.lightning_module.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.lightning_module\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self, current_loss: torch.Tensor) -> None:\n if self.trainer.lightning_module.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(current_loss)\n\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n"
] |
[
[
"torch.isfinite",
"numpy.argmax",
"numpy.cumsum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dawn1206/X2Paddle
|
[
"3ad9b82230f2a30fd709fdfb49147678bf0ed4e2"
] |
[
"x2paddle/project_convertor/pytorch/torch2paddle/vision_transforms.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport PIL\nimport numbers\nimport numpy as np\nfrom PIL import Image\nfrom paddle.vision.transforms import BaseTransform\nfrom paddle.vision.transforms import functional as F\n\n\nclass ToPILImage(BaseTransform):\n def __init__(self, mode=None, keys=None):\n super(ToPILImage, self).__init__(keys)\n self.data_format = data_format\n\n def _apply_image(self, pic):\n \"\"\"\n Args:\n pic (Tensor|np.ndarray): Image to be converted to PIL Image.\n Returns:\n PIL: Converted image.\n \"\"\"\n if not (isinstance(pic, paddle.Tensor) or isinstance(pic, np.ndarray)):\n raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(\n type(pic)))\n\n elif isinstance(pic, paddle.Tensor):\n if pic.ndimension() not in {2, 3}:\n raise ValueError(\n 'pic should be 2/3 dimensional. Got {} dimensions.'.format(\n pic.ndimension()))\n\n elif pic.ndimension() == 2:\n # if 2D image, add channel dimension (CHW)\n pic = pic.unsqueeze(0)\n\n elif isinstance(pic, np.ndarray):\n if pic.ndim not in {2, 3}:\n raise ValueError(\n 'pic should be 2/3 dimensional. Got {} dimensions.'.format(\n pic.ndim))\n\n elif pic.ndim == 2:\n # if 2D image, add channel dimension (HWC)\n pic = np.expand_dims(pic, 2)\n\n npimg = pic\n if isinstance(pic, paddle.Tensor) and \"float\" in str(pic.numpy(\n ).dtype) and mode != 'F':\n pic = pic.mul(255).byte()\n if isinstance(pic, paddle.Tensor):\n npimg = np.transpose(pic.numpy(), (1, 2, 0))\n\n if not isinstance(npimg, np.ndarray):\n raise TypeError(\n 'Input pic must be a paddle.Tensor or NumPy ndarray, ' +\n 'not {}'.format(type(npimg)))\n\n if npimg.shape[2] == 1:\n expected_mode = None\n npimg = npimg[:, :, 0]\n if npimg.dtype == np.uint8:\n expected_mode = 'L'\n elif npimg.dtype == np.int16:\n expected_mode = 'I;16'\n elif npimg.dtype == np.int32:\n expected_mode = 'I'\n elif npimg.dtype == np.float32:\n expected_mode = 'F'\n if mode is not None and mode != expected_mode:\n raise ValueError(\n \"Incorrect mode ({}) supplied for input type {}. Should be {}\"\n .format(mode, np.dtype, expected_mode))\n mode = expected_mode\n\n elif npimg.shape[2] == 2:\n permitted_2_channel_modes = ['LA']\n if mode is not None and mode not in permitted_2_channel_modes:\n raise ValueError(\"Only modes {} are supported for 2D inputs\".\n format(permitted_2_channel_modes))\n\n if mode is None and npimg.dtype == np.uint8:\n mode = 'LA'\n\n elif npimg.shape[2] == 4:\n permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']\n if mode is not None and mode not in permitted_4_channel_modes:\n raise ValueError(\"Only modes {} are supported for 4D inputs\".\n format(permitted_4_channel_modes))\n\n if mode is None and npimg.dtype == np.uint8:\n mode = 'RGBA'\n else:\n permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']\n if mode is not None and mode not in permitted_3_channel_modes:\n raise ValueError(\"Only modes {} are supported for 3D inputs\".\n format(permitted_3_channel_modes))\n if mode is None and npimg.dtype == np.uint8:\n mode = 'RGB'\n\n if mode is None:\n raise TypeError('Input type {} is not supported'.format(\n npimg.dtype))\n\n return Image.fromarray(npimg, mode=mode)\n\n\nclass ToTensor(BaseTransform):\n \"\"\"Convert a ``PIL.Image`` or ``numpy.ndarray`` to ``numpy.ndarray`` with shapr (C x H x W).\n Args:\n data_format (str, optional): Data format of output tensor, should be 'HWC' or\n 'CHW'. Default: 'CHW'.\n keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.\n \"\"\"\n\n def __init__(self, data_format='CHW', keys=None):\n super(ToTensor, self).__init__(keys)\n self.data_format = data_format\n\n def _apply_image(self, img):\n \"\"\"\n Args:\n img (PIL.Image|np.ndarray): Image to be converted to tensor.\n Returns:\n np.ndarray: Converted image.\n \"\"\"\n if isinstance(img, PIL.JpegImagePlugin.JpegImageFile) or isinstance(\n img, PIL.Image.Image):\n img = np.array(img)\n img = img / 255.0\n img = img.transpose((2, 0, 1)).astype(\"float32\")\n img = paddle.to_tensor(img)\n return img\n\n\nclass Normalize(BaseTransform):\n \"\"\"Normalize the input data with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels,\n this transform will normalize each channel of the input data.\n ``output[channel] = (input[channel] - mean[channel]) / std[channel]``\n Args:\n mean (int|float|list): Sequence of means for each channel.\n std (int|float|list): Sequence of standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean=0.0, std=1.0, inplace=False):\n key = None\n super(Normalize, self).__init__(key)\n if isinstance(mean, numbers.Number):\n mean = [mean, mean, mean]\n\n if isinstance(std, numbers.Number):\n std = [std, std, std]\n\n self.mean = mean\n self.std = std\n\n def _apply_image(self, img):\n if isinstance(img, paddle.Tensor):\n img = img.numpy()\n return F.normalize(img, self.mean, self.std, 'CHW', False)\n\n\nclass Lambda(BaseTransform):\n \"\"\"Apply a user-defined lambda as a transform. This transform does not support torchscript.\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n if not callable(lambd):\n raise TypeError(\"Argument lambd should be callable, got {}\".format(\n repr(type(lambd).__name__)))\n self.lambd = lambd\n\n def _apply_image(self, img):\n return self.lambd(img)\n"
] |
[
[
"numpy.array",
"numpy.expand_dims"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VinAIResearch/mDSDI
|
[
"8ec49085d8389ab490ec633c3ae4bf66be085366"
] |
[
"DomainBed/domainbed/lib/fast_data_loader.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\n\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\n\nclass InfiniteDataLoader:\n def __init__(self, dataset, weights, batch_size, num_workers):\n super().__init__()\n\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights, replacement=True, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=True)\n\n if weights == None:\n weights = torch.ones(len(dataset))\n\n batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=True)\n\n self._infinite_iterator = iter(\n torch.utils.data.DataLoader(\n dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler)\n )\n )\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n raise ValueError\n\n\nclass FastDataLoader:\n \"\"\"DataLoader wrapper with slightly improved speed by not respawning worker\n processes at every epoch.\"\"\"\n\n def __init__(self, dataset, batch_size, num_workers):\n super().__init__()\n\n batch_sampler = torch.utils.data.BatchSampler(\n torch.utils.data.RandomSampler(dataset, replacement=False), batch_size=batch_size, drop_last=False\n )\n\n self._infinite_iterator = iter(\n torch.utils.data.DataLoader(\n dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler)\n )\n )\n\n self._length = len(batch_sampler)\n\n def __iter__(self):\n for _ in range(len(self)):\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return self._length\n"
] |
[
[
"torch.utils.data.RandomSampler",
"torch.utils.data.WeightedRandomSampler",
"torch.utils.data.BatchSampler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
taklpw/Link-Cruiser
|
[
"ec82b3f94dbfbbcda01b1ab156ee24fb01f27eaf"
] |
[
"map_from_bag.py"
] |
[
"import numpy as np\r\nimport cv2\r\nimport pyrealsense2 as rs\r\nimport pyqtgraph.colormap\r\nfrom pyqtgraph.Qt import QtCore, QtGui\r\nimport pyqtgraph.opengl as gl\r\nimport sys\r\n\r\n\r\n# Initialise OpenGL app\r\napp = QtGui.QApplication(sys.argv)\r\n\r\n\r\ndef get_pointcloud(depth_image, color_frame, img, img_size):\r\n point_cloud = rs.pointcloud()\r\n points = rs.points()\r\n\r\n # Obtain point cloud data\r\n point_cloud.map_to(color_frame)\r\n points = point_cloud.calculate(depth_image)\r\n\r\n # Convert point cloud to 2d Array\r\n points3d = np.asanyarray(points.get_vertices())\r\n points3d = points3d.view(np.float32).reshape(points3d.shape + (-1,))\r\n texture_coords = np.asanyarray(points.get_texture_coordinates())\r\n texture_coords = texture_coords.view(np.float32).reshape(texture_coords.shape + (-1,))\r\n\r\n # Remove all invalid data within a certain distance\r\n long_distance_mask = points3d[:, 2] < 10\r\n short_distance_mask = points3d[:, 2] > 0.3\r\n distance_mask = np.logical_and(long_distance_mask, short_distance_mask)\r\n points3d = points3d[distance_mask]\r\n texture_coords = texture_coords[distance_mask]\r\n\r\n # Get colours\r\n u_coords = ((texture_coords[:, 0])*img_size[0])\r\n u_coords = np.round(np.clip(u_coords, a_min=0, a_max=img_size[0]-1))\r\n v_coords = ((texture_coords[:, 1])*img_size[1])\r\n v_coords = np.round(np.clip(v_coords, a_min=0, a_max=img_size[1]-1))\r\n uv_coords = np.vstack((u_coords, v_coords)).T.astype(np.uint16)\r\n\r\n # Sample random points\r\n idx = np.random.randint(points3d.shape[0], size=round(points3d.shape[0]/500))\r\n sampled_points = points3d[idx, :]\r\n uv_coords = uv_coords[idx, :]\r\n\r\n # Add extra column of 0's to 3d points\r\n o = np.ones((sampled_points.shape[0], 1))\r\n sampled_points = np.hstack((sampled_points, o))\r\n\r\n # Get colours of points\r\n point_colors = []\r\n for i, coord in enumerate(uv_coords):\r\n cols = img[coord[0], coord[1], :]\r\n point_colors.append(cols)\r\n\r\n point_colors = np.array(point_colors)\r\n\r\n return sampled_points, point_colors\r\n\r\n\r\ndef play_bag(filename):\r\n # Set visualisation\r\n w = gl.GLViewWidget()\r\n w.opts['distance'] = 20\r\n w.show()\r\n w.setWindowTitle('Complete Points')\r\n w.resize(800, 800)\r\n g = gl.GLGridItem()\r\n w.addItem(g)\r\n\r\n # Configure options and start stream\r\n pipeline = rs.pipeline()\r\n config = rs.config()\r\n config.enable_device_from_file(filename)\r\n config.enable_all_streams()\r\n profile = pipeline.start(config)\r\n\r\n # Past and Present Variables\r\n new_depth_data = None\r\n old_depth_data = None\r\n new_depth_intrinsics = None\r\n old_depth_intrinsics = None\r\n new_color_data = None\r\n old_color_data = None\r\n\r\n frame_num = 0\r\n frames_processed = 0\r\n frames_to_skip = 1\r\n img_size = None\r\n\r\n locations = np.array([[0, 0, 0]])\r\n location = np.array([[0], [0], [0], [1]])\r\n all_points = None\r\n all_colors = None\r\n while True:\r\n # Get frame from bag\r\n frames = pipeline.wait_for_frames()\r\n\r\n # Stop if the bag is done playing\r\n if frames.frame_number < frame_num:\r\n break\r\n else:\r\n frame_num = frames.frame_number\r\n\r\n # Align depth to colour\r\n align = rs.align(rs.stream.color)\r\n frames = align.process(frames)\r\n\r\n # Obtain depth and colour frames\r\n depth_frame = frames.get_depth_frame()\r\n color_frame = frames.get_color_frame()\r\n\r\n # If their is either no depth or color frame try again\r\n if not depth_frame or not color_frame:\r\n continue\r\n\r\n # Decimation Filter\r\n dec_filter = rs.decimation_filter()\r\n # Edge-preserving smoothing\r\n spat_filter = rs.spatial_filter()\r\n # Apply Filters\r\n depth_frame = dec_filter.process(depth_frame)\r\n depth_frame = spat_filter.process(depth_frame)\r\n\r\n if frames_processed % frames_to_skip == 0:\r\n # Get old data from previous frame\r\n if frames_processed:\r\n old_depth_data = new_depth_data\r\n old_color_data = new_color_data\r\n old_depth_intrinsics = new_depth_intrinsics\r\n\r\n # Intrinsicts and Extrinsics\r\n new_depth_intrinsics = depth_frame.profile.as_video_stream_profile().intrinsics\r\n color_intrinsics = color_frame.profile.as_video_stream_profile().intrinsics\r\n depth_to_color_extrinsics = depth_frame.profile.get_extrinsics_to(color_frame.profile)\r\n\r\n # Obtain Depth scale\r\n depth_sensor = profile.get_device().first_depth_sensor()\r\n depth_scale = depth_sensor.get_depth_scale()\r\n\r\n # Get colour and depth data\r\n new_color_data = np.asanyarray(color_frame.get_data())\r\n new_color_data = cv2.cvtColor(new_color_data, cv2.COLOR_RGB2BGR)\r\n new_depth_data = np.asanyarray(depth_frame.get_data())\r\n\r\n # Ressize color image to depth size\r\n if new_color_data.shape != new_depth_data.shape:\r\n new_color_data = cv2.resize(new_color_data, (new_depth_data.shape[1], new_depth_data.shape[0]))\r\n img_size = new_depth_data.shape\r\n\r\n # Colorize depth data\r\n depth_colormap = cv2.applyColorMap(\r\n cv2.convertScaleAbs(new_depth_data, alpha=0.08),\r\n cv2.COLORMAP_JET\r\n )\r\n depth_colormap = np.asanyarray(depth_colormap)\r\n # depth_colormap = depth_colormap[:, 40:, :]\r\n\r\n if frames_processed:\r\n # Get absolute orientation\r\n camera_matrix = np.array([\r\n [color_intrinsics.fx, 0, color_intrinsics.ppx],\r\n [0, color_intrinsics.fy, color_intrinsics.ppy],\r\n [0, 0, 1]\r\n ])\r\n odom = cv2.rgbd.RgbdICPOdometry_create(camera_matrix)\r\n\r\n # Scale depth data\r\n old_depth_data_scaled = (old_depth_data*depth_scale).astype(np.float32)\r\n new_depth_data_scaled = (new_depth_data*depth_scale).astype(np.float32)\r\n\r\n # Create masks to ignore invalid depth data\r\n srcmask = np.ones_like(old_depth_data, dtype=np.uint8)\r\n srcmask[old_depth_data_scaled == 0] = 0\r\n srcmask[old_depth_data_scaled > 10] = 0\r\n\r\n dstmask = np.ones_like(new_depth_data, dtype=np.uint8)\r\n dstmask[new_depth_data_scaled == 0] = 0\r\n dstmask[new_depth_data_scaled > 10] = 0\r\n\r\n old_gray = cv2.cvtColor(old_color_data, cv2.COLOR_RGB2GRAY)\r\n new_gray = cv2.cvtColor(new_color_data, cv2.COLOR_RGB2GRAY)\r\n old_depth_data_scaled[old_depth_data_scaled == 0] = np.nan\r\n new_depth_data_scaled[new_depth_data_scaled == 0] = np.nan\r\n\r\n retval, Rt = odom.compute(\r\n srcImage=old_gray, srcDepth=old_depth_data_scaled,\r\n srcMask=srcmask,\r\n dstImage=new_gray, dstDepth=new_depth_data_scaled,\r\n dstMask=dstmask\r\n )\r\n\r\n location = np.dot(Rt, location)\r\n # if abs(locations[frames_processed-1] - locations[frames_processed]) > 0.5:\r\n # pass\r\n locations = np.vstack([locations, location[0:3].T])\r\n\r\n # Get pointcloud and perform transformation\r\n point_cloud, point_colors = get_pointcloud(depth_frame, color_frame, new_color_data, img_size)\r\n t_point_cloud = np.dot(Rt, point_cloud.T)\r\n t_point_cloud = t_point_cloud[0:3, :].T\r\n # all_points = np.vstack((all_points, t_point_cloud))\r\n\r\n # Make colormap\r\n if all_points is None or all_colors is None:\r\n all_colors = np.copy(point_colors)\r\n all_points = np.copy(t_point_cloud)\r\n else:\r\n all_colors = np.vstack((all_colors, point_colors))\r\n all_points = np.vstack((all_points, t_point_cloud))\r\n\r\n p = gl.GLScatterPlotItem(pos=all_points, size=2, color=all_colors/255, pxMode=True)\r\n # Rotate set of points by 90 degrees\r\n p.rotate(180, x=1, y=1, z=1)\r\n w.addItem(p)\r\n w.show()\r\n\r\n # Show Video\r\n if frames_processed:\r\n images = np.hstack((new_color_data, depth_colormap))\r\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\r\n cv2.imshow('RealSense', images)\r\n cv2.waitKey(1)\r\n\r\n frames_processed += 1\r\n\r\n pipeline.stop()\r\n cv2.destroyAllWindows()\r\n\r\n\r\nif __name__ == '__main__':\r\n play_bag('kellysroom.bag')\r\n app.exec_()\r\n"
] |
[
[
"numpy.hstack",
"numpy.dot",
"numpy.ones_like",
"numpy.logical_and",
"numpy.clip",
"numpy.ones",
"numpy.copy",
"numpy.asanyarray",
"numpy.array",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ivan-bilan/tac-self-attention
|
[
"8dd583ac960716bbf0c645c23f2c50bd36ca042a"
] |
[
"ensemble.py"
] |
[
"\"\"\"\nEnsemble the predictions from different model outputs.\n\"\"\"\nimport argparse\nimport json\nimport pickle\nimport numpy as np\nfrom collections import Counter\n\nfrom data.loader import DataLoader\nfrom utils import scorer, constant\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('pred_files', nargs='+', help='A list of prediction files written by eval.py.')\n parser.add_argument('--data_dir', default='dataset/tacred')\n parser.add_argument('--dataset', default='test', help='Evaluate on dev or test set.')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n print(\"Loading data file...\")\n filename = args.data_dir + '/{}.json'.format(args.dataset)\n with open(filename, 'r') as infile:\n data = json.load(infile, encoding='utf8')\n labels = [d['relation'] for d in data]\n\n # read predictions\n print(\"Loading {} prediction files...\".format(len(args.pred_files)))\n scores_list = []\n for path in args.pred_files:\n print(path)\n with open(path, 'rb') as infile:\n scores = pickle.load(infile)\n scores_list += [scores]\n \n print(\"Calculating ensembled predictions...\")\n predictions = []\n scores_by_examples = list(zip(*scores_list))\n\n assert len(scores_by_examples) == len(data)\n for scores in scores_by_examples:\n pred = ensemble(scores)\n predictions += [pred]\n\n id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])\n predictions = [id2label[p] for p in predictions]\n scorer.score(labels, predictions, verbose=True)\n\n\ndef ensemble(scores):\n \"\"\"\n Ensemble by majority vote.\n \"\"\"\n c = Counter()\n for probs in zip(scores):\n idx = int(np.argmax(np.array(probs)))\n c.update([idx])\n best = c.most_common(1)[0][0]\n return best\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Amonsoft/catalyst
|
[
"d9762e8646d46fa1dcda29151388ebcd0734337f",
"28060bf11fc34966438c24c688caf335bf15f1d7"
] |
[
"catalyst/gens/tradesimulation.py",
"catalyst/finance/risk/period.py"
] |
[
"#\n# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom contextlib2 import ExitStack\nfrom logbook import Logger, Processor\nfrom pandas.tslib import normalize_date\nfrom catalyst.protocol import BarData\nfrom catalyst.utils.api_support import ZiplineAPI\nfrom six import viewkeys\n\nfrom catalyst.gens.sim_engine import (\n BAR,\n SESSION_START,\n SESSION_END,\n MINUTE_END,\n BEFORE_TRADING_START_BAR\n)\n\nlog = Logger('Trade Simulation')\n\n\nclass AlgorithmSimulator(object):\n\n EMISSION_TO_PERF_KEY_MAP = {\n 'minute': 'minute_perf',\n 'daily': 'daily_perf'\n }\n\n def __init__(self, algo, sim_params, data_portal, clock, benchmark_source,\n restrictions, universe_func):\n\n # ==============\n # Simulation\n # Param Setup\n # ==============\n self.sim_params = sim_params\n self.env = algo.trading_environment\n self.data_portal = data_portal\n self.restrictions = restrictions\n\n # ==============\n # Algo Setup\n # ==============\n self.algo = algo\n\n # ==============\n # Snapshot Setup\n # ==============\n\n # This object is the way that user algorithms interact with OHLCV data,\n # fetcher data, and some API methods like `data.can_trade`.\n self.current_data = self._create_bar_data(universe_func)\n\n # We don't have a datetime for the current snapshot until we\n # receive a message.\n self.simulation_dt = None\n\n self.clock = clock\n\n self.benchmark_source = benchmark_source\n\n # =============\n # Logging Setup\n # =============\n\n # Processor function for injecting the algo_dt into\n # user prints/logs.\n def inject_algo_dt(record):\n if 'algo_dt' not in record.extra:\n record.extra['algo_dt'] = self.simulation_dt\n self.processor = Processor(inject_algo_dt)\n\n def get_simulation_dt(self):\n return self.simulation_dt\n\n def _create_bar_data(self, universe_func):\n return BarData(\n data_portal=self.data_portal,\n simulation_dt_func=self.get_simulation_dt,\n data_frequency=self.sim_params.data_frequency,\n trading_calendar=self.algo.trading_calendar,\n restrictions=self.restrictions,\n universe_func=universe_func\n )\n\n def transform(self):\n \"\"\"\n Main generator work loop.\n \"\"\"\n algo = self.algo\n emission_rate = algo.perf_tracker.emission_rate\n\n def every_bar(dt_to_use, current_data=self.current_data,\n handle_data=algo.event_manager.handle_data):\n # called every tick (minute or day).\n algo.on_dt_changed(dt_to_use)\n\n for capital_change in calculate_minute_capital_changes(dt_to_use):\n yield capital_change\n\n self.simulation_dt = dt_to_use\n\n blotter = algo.blotter\n perf_tracker = algo.perf_tracker\n\n # handle any transactions and commissions coming out new orders\n # placed in the last bar\n new_transactions, new_commissions, closed_orders = \\\n blotter.get_transactions(current_data)\n\n blotter.prune_orders(closed_orders)\n\n for transaction in new_transactions:\n perf_tracker.process_transaction(transaction)\n\n # since this order was modified, record it\n order = blotter.orders[transaction.order_id]\n perf_tracker.process_order(order)\n\n if new_commissions:\n for commission in new_commissions:\n perf_tracker.process_commission(commission)\n\n handle_data(algo, current_data, dt_to_use)\n\n # grab any new orders from the blotter, then clear the list.\n # this includes cancelled orders.\n new_orders = blotter.new_orders\n blotter.new_orders = []\n\n # if we have any new orders, record them so that we know\n # in what perf period they were placed.\n if new_orders:\n for new_order in new_orders:\n perf_tracker.process_order(new_order)\n\n algo.portfolio_needs_update = True\n algo.account_needs_update = True\n algo.performance_needs_update = True\n\n def once_a_day(midnight_dt, current_data=self.current_data,\n data_portal=self.data_portal):\n\n perf_tracker = algo.perf_tracker\n\n # Get the positions before updating the date so that prices are\n # fetched for trading close instead of midnight\n positions = algo.perf_tracker.position_tracker.positions\n position_assets = algo.asset_finder.retrieve_all(positions)\n\n # set all the timestamps\n self.simulation_dt = midnight_dt\n algo.on_dt_changed(midnight_dt)\n\n # process any capital changes that came overnight\n for capital_change in algo.calculate_capital_changes(\n midnight_dt, emission_rate=emission_rate,\n is_interday=True):\n yield capital_change\n\n # we want to wait until the clock rolls over to the next day\n # before cleaning up expired assets.\n self._cleanup_expired_assets(midnight_dt, position_assets)\n\n # handle any splits that impact any positions or any open orders.\n assets_we_care_about = \\\n viewkeys(perf_tracker.position_tracker.positions) | \\\n viewkeys(algo.blotter.open_orders)\n\n if assets_we_care_about:\n splits = data_portal.get_splits(assets_we_care_about,\n midnight_dt)\n if splits:\n algo.blotter.process_splits(splits)\n perf_tracker.position_tracker.handle_splits(splits)\n\n def handle_benchmark(date, benchmark_source=self.benchmark_source):\n algo.perf_tracker.all_benchmark_returns[date] = \\\n benchmark_source.get_value(date)\n\n def on_exit():\n # Remove references to algo, data portal, et al to break cycles\n # and ensure deterministic cleanup of these objects when the\n # simulation finishes.\n self.algo = None\n self.benchmark_source = self.current_data = self.data_portal = None\n\n with ExitStack() as stack:\n stack.callback(on_exit)\n stack.enter_context(self.processor)\n stack.enter_context(ZiplineAPI(self.algo))\n\n if algo.data_frequency == 'minute':\n def execute_order_cancellation_policy():\n algo.blotter.execute_cancel_policy(SESSION_END)\n\n def calculate_minute_capital_changes(dt):\n # process any capital changes that came between the last\n # and current minutes\n return algo.calculate_capital_changes(\n dt, emission_rate=emission_rate, is_interday=False)\n else:\n def execute_order_cancellation_policy():\n pass\n\n def calculate_minute_capital_changes(dt):\n return []\n\n for dt, action in self.clock:\n if action == BAR:\n for capital_change_packet in every_bar(dt):\n yield capital_change_packet\n elif action == SESSION_START:\n for capital_change_packet in once_a_day(dt):\n yield capital_change_packet\n elif action == SESSION_END:\n # End of the session.\n if emission_rate == 'daily':\n handle_benchmark(normalize_date(dt))\n execute_order_cancellation_policy()\n\n yield self._get_daily_message(dt, algo, algo.perf_tracker)\n elif action == BEFORE_TRADING_START_BAR:\n self.simulation_dt = dt\n algo.on_dt_changed(dt)\n algo.before_trading_start(self.current_data)\n elif action == MINUTE_END:\n handle_benchmark(dt)\n minute_msg = \\\n self._get_minute_message(dt, algo, algo.perf_tracker)\n\n yield minute_msg\n\n risk_message = algo.perf_tracker.handle_simulation_end()\n yield risk_message\n\n def _cleanup_expired_assets(self, dt, position_assets):\n \"\"\"\n Clear out any assets that have expired before starting a new sim day.\n\n Performs two functions:\n\n 1. Finds all assets for which we have open orders and clears any\n orders whose assets are on or after their auto_close_date.\n\n 2. Finds all assets for which we have positions and generates\n close_position events for any assets that have reached their\n auto_close_date.\n \"\"\"\n algo = self.algo\n\n def past_auto_close_date(asset):\n acd = asset.auto_close_date\n return acd is not None and acd <= dt\n\n # Remove positions in any sids that have reached their auto_close date.\n assets_to_clear = \\\n [asset for asset in position_assets if past_auto_close_date(asset)]\n perf_tracker = algo.perf_tracker\n data_portal = self.data_portal\n for asset in assets_to_clear:\n perf_tracker.process_close_position(asset, dt, data_portal)\n\n # Remove open orders for any sids that have reached their\n # auto_close_date.\n blotter = algo.blotter\n assets_to_cancel = \\\n set([asset for asset in blotter.open_orders\n if past_auto_close_date(asset)])\n for asset in assets_to_cancel:\n blotter.cancel_all_orders_for_asset(asset)\n\n def _get_daily_message(self, dt, algo, perf_tracker):\n \"\"\"\n Get a perf message for the given datetime.\n \"\"\"\n perf_message = perf_tracker.handle_market_close(\n dt, self.data_portal,\n )\n perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars\n return perf_message\n\n def _get_minute_message(self, dt, algo, perf_tracker):\n \"\"\"\n Get a perf message for the given datetime.\n \"\"\"\n rvars = algo.recorded_vars\n\n minute_message = perf_tracker.handle_minute_close(\n dt, self.data_portal,\n )\n\n minute_message['minute_perf']['recorded_vars'] = rvars\n return minute_message\n",
"#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\n\nimport logbook\n\nfrom six import iteritems\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import risk\nfrom . risk import check_entry\n\nfrom empyrical import (\n alpha_beta_aligned,\n annual_volatility,\n cum_returns,\n downside_risk,\n information_ratio,\n max_drawdown,\n sharpe_ratio,\n sortino_ratio\n)\n\nlog = logbook.Logger('Risk Period')\n\nchoose_treasury = functools.partial(risk.choose_treasury,\n risk.select_treasury_duration)\n\n\nclass RiskMetricsPeriod(object):\n def __init__(self, start_session, end_session, returns, trading_calendar,\n treasury_curves, benchmark_returns, algorithm_leverages=None):\n if treasury_curves.index[-1] >= start_session:\n mask = ((treasury_curves.index >= start_session) &\n (treasury_curves.index <= end_session))\n\n self.treasury_curves = treasury_curves[mask]\n else:\n # our test is beyond the treasury curve history\n # so we'll use the last available treasury curve\n self.treasury_curves = treasury_curves[-1:]\n\n self._start_session = start_session\n self._end_session = end_session\n self.trading_calendar = trading_calendar\n\n trading_sessions = trading_calendar.sessions_in_range(\n self._start_session,\n self._end_session,\n )\n self.algorithm_returns = self.mask_returns_to_period(returns,\n trading_sessions)\n\n # Benchmark needs to be masked to the same dates as the algo returns\n self.benchmark_returns = self.mask_returns_to_period(\n benchmark_returns,\n self.algorithm_returns.index\n )\n self.algorithm_leverages = algorithm_leverages\n\n self.calculate_metrics()\n\n def calculate_metrics(self):\n self.benchmark_period_returns = \\\n cum_returns(self.benchmark_returns).iloc[-1]\n\n self.algorithm_period_returns = \\\n cum_returns(self.algorithm_returns).iloc[-1]\n\n if not self.algorithm_returns.index.equals(\n self.benchmark_returns.index\n ):\n message = \"Mismatch between benchmark_returns ({bm_count}) and \\\n algorithm_returns ({algo_count}) in range {start} : {end}\"\n message = message.format(\n bm_count=len(self.benchmark_returns),\n algo_count=len(self.algorithm_returns),\n start=self._start_session,\n end=self._end_session\n )\n raise Exception(message)\n\n self.num_trading_days = len(self.benchmark_returns)\n\n self.mean_algorithm_returns = (\n self.algorithm_returns.cumsum() /\n np.arange(1, self.num_trading_days + 1, dtype=np.float64)\n )\n\n self.benchmark_volatility = annual_volatility(self.benchmark_returns)\n self.algorithm_volatility = annual_volatility(self.algorithm_returns)\n\n self.treasury_period_return = choose_treasury(\n self.treasury_curves,\n self._start_session,\n self._end_session,\n self.trading_calendar,\n )\n self.sharpe = sharpe_ratio(\n self.algorithm_returns,\n )\n # The consumer currently expects a 0.0 value for sharpe in period,\n # this differs from cumulative which was np.nan.\n # When factoring out the sharpe_ratio, the different return types\n # were collapsed into `np.nan`.\n # TODO: Either fix consumer to accept `np.nan` or make the\n # `sharpe_ratio` return type configurable.\n # In the meantime, convert nan values to 0.0\n if pd.isnull(self.sharpe):\n self.sharpe = 0.0\n self.downside_risk = downside_risk(\n self.algorithm_returns.values\n )\n self.sortino = sortino_ratio(\n self.algorithm_returns.values,\n _downside_risk=self.downside_risk,\n )\n self.information = information_ratio(\n self.algorithm_returns.values,\n self.benchmark_returns.values,\n )\n self.alpha, self.beta = alpha_beta_aligned(\n self.algorithm_returns.values,\n self.benchmark_returns.values,\n )\n self.excess_return = self.algorithm_period_returns - \\\n self.treasury_period_return\n self.max_drawdown = max_drawdown(self.algorithm_returns.values)\n self.max_leverage = self.calculate_max_leverage()\n\n def to_dict(self):\n \"\"\"\n Creates a dictionary representing the state of the risk report.\n Returns a dict object of the form:\n \"\"\"\n period_label = self._end_session.strftime(\"%Y-%m\")\n rval = {\n 'trading_days': self.num_trading_days,\n 'benchmark_volatility': self.benchmark_volatility,\n 'algo_volatility': self.algorithm_volatility,\n 'treasury_period_return': self.treasury_period_return,\n 'algorithm_period_return': self.algorithm_period_returns,\n 'benchmark_period_return': self.benchmark_period_returns,\n 'sharpe': self.sharpe,\n 'sortino': self.sortino,\n 'information': self.information,\n 'beta': self.beta,\n 'alpha': self.alpha,\n 'excess_return': self.excess_return,\n 'max_drawdown': self.max_drawdown,\n 'max_leverage': self.max_leverage,\n 'period_label': period_label\n }\n\n return {k: None if check_entry(k, v) else v\n for k, v in iteritems(rval)}\n\n def __repr__(self):\n statements = []\n metrics = [\n \"algorithm_period_returns\",\n \"benchmark_period_returns\",\n \"excess_return\",\n \"num_trading_days\",\n \"benchmark_volatility\",\n \"algorithm_volatility\",\n \"sharpe\",\n \"sortino\",\n \"information\",\n \"beta\",\n \"alpha\",\n \"max_drawdown\",\n \"max_leverage\",\n \"algorithm_returns\",\n \"benchmark_returns\",\n ]\n\n for metric in metrics:\n value = getattr(self, metric)\n statements.append(\"{m}:{v}\".format(m=metric, v=value))\n\n return '\\n'.join(statements)\n\n def mask_returns_to_period(self, daily_returns, trading_days):\n if isinstance(daily_returns, list):\n returns = pd.Series([x.returns for x in daily_returns],\n index=[x.date for x in daily_returns])\n else: # otherwise we're receiving an index already\n returns = daily_returns\n\n trade_day_mask = returns.index.normalize().isin(trading_days)\n\n mask = ((returns.index >= self._start_session) &\n (returns.index <= self._end_session) & trade_day_mask)\n\n returns = returns[mask]\n return returns\n\n def calculate_max_leverage(self):\n if self.algorithm_leverages is None:\n return 0.0\n else:\n return max(self.algorithm_leverages)\n"
] |
[
[
"pandas.tslib.normalize_date"
],
[
"numpy.arange",
"pandas.Series",
"pandas.isnull"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ryanjmccall/feets
|
[
"56c0887ffc3f9d19b00606263b645b9f518d1574",
"56c0887ffc3f9d19b00606263b645b9f518d1574"
] |
[
"feets/tests/test_FATS_to_feets.py",
"experiments/data_syn.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# The MIT License (MIT)\n\n# Copyright (c) 2017 Juan Cabral\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n# =============================================================================\n# FUTURE\n# =============================================================================\n\nfrom __future__ import unicode_literals\n\n\n# =============================================================================\n# DOC\n# =============================================================================\n\n__doc__ = \"\"\"FATS to feets compatibility testing\"\"\"\n\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport os\n\nimport numpy as np\n\nimport pandas as pd\n\nfrom .. import FeatureSpace, preprocess\nfrom .. datasets import macho\n\nfrom .core import FeetsTestCase, DATA_PATH\n\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\nclass FATSPreprocessRegressionTestCase(FeetsTestCase):\n\n def setUp(self):\n lc = macho.load_MACHO_example()\n self.time = lc.data.R.time\n self.mag = lc.data.R.magnitude\n self.error = lc.data.R.error\n self.time2 = lc.data.B.time\n self.mag2 = lc.data.B.magnitude\n self.error2 = lc.data.B.error\n\n self.preprc_path = os.path.join(DATA_PATH, \"FATS_preprc.npz\")\n with np.load(self.preprc_path) as npz:\n self.pF_time, self.pF_time2 = npz[\"time\"], npz[\"time2\"]\n self.pF_mag, self.pF_mag2 = npz[\"mag\"], npz[\"mag2\"]\n self.pF_error, self.pF_error2 = npz[\"error\"], npz[\"error2\"]\n\n self.lc_path = os.path.join(DATA_PATH, \"FATS_aligned.npz\")\n with np.load(self.lc_path) as npz:\n self.aF_time = npz['aligned_time']\n self.aF_mag = npz['aligned_mag']\n self.aF_mag2 = npz['aligned_mag2']\n self.aF_error = npz['aligned_error']\n self.aF_error2 = npz['aligned_error2']\n\n def test_remove_noise(self):\n p_time, p_mag, p_error = preprocess.remove_noise(\n self.time, self.mag, self.error)\n p_time2, p_mag2, p_error2 = preprocess.remove_noise(\n self.time2, self.mag2, self.error2)\n self.assertArrayEqual(p_time, self.pF_time)\n self.assertArrayEqual(p_time2, self.pF_time2)\n self.assertArrayEqual(p_mag, self.pF_mag)\n self.assertArrayEqual(p_mag2, self.pF_mag2)\n self.assertArrayEqual(p_error, self.pF_error)\n self.assertArrayEqual(p_error2, self.pF_error2)\n\n def test_align(self):\n a_time, a_mag, a_mag2, a_error, a_error2 = preprocess.align(\n self.pF_time, self.pF_time2,\n self.pF_mag, self.pF_mag2,\n self.pF_error, self.pF_error2)\n self.assertArrayEqual(a_time, self.aF_time)\n self.assertArrayEqual(a_mag, self.aF_mag)\n self.assertArrayEqual(a_mag2, self.aF_mag2)\n self.assertArrayEqual(a_error, self.aF_error)\n self.assertArrayEqual(a_error2, self.aF_error2)\n\n\nclass FATSRegressionTestCase(FeetsTestCase):\n\n def setUp(self):\n # the paths\n self.lc_path = os.path.join(DATA_PATH, \"FATS_aligned.npz\")\n self.FATS_result_path = os.path.join(DATA_PATH, \"FATS_result.npz\")\n\n # recreate light curve\n with np.load(self.lc_path) as npz:\n self.lc = (\n npz['time'],\n npz['mag'],\n npz['error'],\n npz['mag2'],\n npz['aligned_time'],\n npz['aligned_mag'],\n npz['aligned_mag2'],\n npz['aligned_error'],\n npz['aligned_error2'])\n\n # recreate the FATS result\n with np.load(self.FATS_result_path) as npz:\n self.features = npz[\"features\"]\n self.features = self.features.astype(\"U\")\n self.FATS_result = dict(zip(self.features, npz[\"values\"]))\n\n # creates an template for all error, messages\n self.err_template = (\"Feature '{feature}' missmatch.\")\n\n def exclude_value_feature_evaluation(self, feature):\n return \"_harmonics_\" in feature\n\n def assert_feature_params(self, feature):\n feature_params = {\n \"PeriodLS\": {\"atol\": 1e-04},\n \"Period_fit\": {\"atol\": 1e-40},\n \"Psi_CS\": {\"atol\": 1e-02},\n \"Psi_eta\": {\"atol\": 1e-01}}\n params = {\"err_msg\": self.err_template.format(feature=feature)}\n params .update(feature_params.get(feature, {}))\n return params\n\n def assertFATS(self, feets_result):\n for feature in self.features:\n if feature not in feets_result:\n self.fail(\"Missing feature {}\".format(feature))\n if self.exclude_value_feature_evaluation(feature):\n continue\n feets_value = feets_result[feature]\n FATS_value = self.FATS_result[feature]\n params = self.assert_feature_params(feature)\n self.assertAllClose(feets_value, FATS_value, **params)\n\n def test_FATS_to_feets_extract_one(self):\n fs = FeatureSpace(\n SlottedA_length={\"T\": None},\n StetsonKAC={\"T\": None})\n result = fs.extract(*self.lc)\n feets_result = dict(zip(*result))\n self.assertFATS(feets_result)\n\n\nclass FATSTutorialTestCase(FeetsTestCase):\n\n def shuffle(self, mag, error, time, mag2, aligned_mag, aligned_mag2,\n aligned_time, aligned_error, aligned_error2):\n\n N = len(mag)\n shuffle = np.arange(0, N)\n index = self.random.permutation(shuffle)\n index = np.sort(index[0:int(N/2)])\n\n mag_test = mag[index]\n time_test = time[index]\n error_test = error[index]\n\n N2 = len(mag2)\n shuffle2 = np.arange(0, N2)\n index2 = self.random.permutation(shuffle2)\n index2 = np.sort(index2[0:int(N2/2)])\n\n mag2_test = mag2[index2]\n\n N3 = len(aligned_mag)\n shuffle3 = np.arange(0, N3)\n index3 = self.random.permutation(shuffle3)\n index3 = np.sort(index3[0:int(N3/2)])\n\n aligned_mag_test = aligned_mag[index3]\n aligned_mag2_test = aligned_mag2[index3]\n aligned_time_test = aligned_time[index3]\n aligned_error_test = aligned_error[index3]\n aligned_error2_test = aligned_error2[index3]\n\n return {\n \"magnitude\": mag_test,\n \"time\": time_test,\n \"error\": error_test,\n \"magnitude2\": mag2_test,\n \"aligned_magnitude\": aligned_mag_test,\n \"aligned_magnitude2\": aligned_mag2_test,\n \"aligned_time\": aligned_time_test,\n \"aligned_error\": aligned_error_test,\n \"aligned_error2\": aligned_error2_test}\n\n def setUp(self):\n self.random = np.random.RandomState(42)\n self.lc_path = os.path.join(DATA_PATH, \"FATS_aligned.npz\")\n with np.load(self.lc_path) as npz:\n self.lc = dict(npz)\n\n def test_invariance_to_unequal_sampling(self):\n # tests performed to the features in order to check their invariance\n # to unequal sampling. To do so, we take random observations of a\n # light-curve and compare the resulting features with the ones obtained\n # from the original data.\n\n fs = FeatureSpace()\n\n # We calculate the features values for fifty random samples of the\n # original light-curve:\n features_values = []\n for i in range(50):\n sample = self.shuffle(**self.lc)\n features, values = fs.extract(**sample)\n result = dict(zip(features, values))\n features_values.append(result)\n\n # We obtain the mean and standard deviation of each calculated feature:\n stats = pd.DataFrame(features_values).aggregate([np.mean, np.std])\n\n # Original light-curve:\n features, values = fs.extract(\n magnitude=self.lc[\"mag\"],\n time=self.lc[\"time\"],\n error=self.lc[\"error\"],\n magnitude2=self.lc[\"mag2\"],\n aligned_magnitude=self.lc[\"aligned_mag\"],\n aligned_magnitude2=self.lc[\"aligned_mag2\"],\n aligned_time=self.lc[\"aligned_time\"],\n aligned_error=self.lc[\"aligned_error\"],\n aligned_error2=self.lc[\"aligned_error2\"])\n\n def normalize(c):\n name, value = c.name, c[0]\n mean, std = stats[name][\"mean\"], stats[name][\"std\"]\n return (value - mean) / std\n\n original = pd.DataFrame([dict(zip(features, values))])\n result = original.apply(normalize)\n\n self.assertLess(np.abs(result.mean()), 0.09)\n self.assertLess(result.std(), 1.09)\n",
"# NORMAL\ntime_normal = np.arange(10000)\nmag_normal = np.random.normal(size=10000)\nerror_normal = np.random.normal(loc=1, scale =0.008, size=10000)\n\nmag_normal2 = np.random.normal(size=10000)\nerror_normal2 = np.random.normal(loc=1, scale =0.008, size=10000)\n\nlc_normal = {\n \"time\": time_normal,\n \"magnitude\": mag_normal,\n \"error\": error_normal,\n \"magnitude2\": mag_normal2,\n \"aligned_time\": time_normal,\n \"aligned_magnitude\": mag_normal,\n \"aligned_magnitude2\": mag_normal2,\n \"aligned_error\": error_normal,\n \"aligned_error2\": error_normal2}\n\n# PERIODIC\nimport numpy as np\nrand = np.random.RandomState(42)\ntime_periodic = 100 * rand.rand(100)\nmag_periodic = np.sin(2 * np.pi * time_periodic) + 0.1 * rand.randn(100)\n\nlc_periodic = {\"time\": time_periodic, \"magnitude\": mag_periodic}\n\n# UNIFORM\nlc_uniform = {\n \"time\": np.arange(10000),\n \"magnitude\": np.random.uniform(size=10000)}\n"
] |
[
[
"numpy.arange",
"numpy.random.RandomState",
"pandas.DataFrame",
"numpy.load"
],
[
"numpy.arange",
"numpy.sin",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.random.RandomState"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kushbanga/phylib
|
[
"a4c7262b4ebcd6edb29409e2c08c5870fc9444eb"
] |
[
"phylib/io/tests/test_datasets.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"Tests of dataset utility functions.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport logging\nfrom pathlib import Path\nfrom itertools import product\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal as ae\nimport responses\nfrom pytest import raises, yield_fixture\n\nfrom ..datasets import (download_file,\n download_test_file,\n _check_md5_of_url,\n )\nfrom phylib.utils.testing import captured_logging\n\nlogger = logging.getLogger(__name__)\n\n\n#------------------------------------------------------------------------------\n# Fixtures\n#------------------------------------------------------------------------------\n\n# Test URL and data\n_URL = 'http://test/data'\n_DATA = np.linspace(0., 1., 100000).astype(np.float32)\n_CHECKSUM = '7d257d0ae7e3af8ca3574ccc3a4bf072'\n\n\ndef _add_mock_response(url, body, file_type='binary'):\n content_type = ('application/octet-stream'\n if file_type == 'binary' else 'text/plain')\n responses.add(responses.GET, url,\n body=body,\n status=200,\n content_type=content_type,\n )\n\n\n@yield_fixture\ndef mock_url():\n _add_mock_response(_URL, _DATA.tostring())\n _add_mock_response(_URL + '.md5', _CHECKSUM + ' ' + Path(_URL).name)\n yield _URL\n responses.reset()\n\n\n@yield_fixture(params=product((True, False), repeat=4))\ndef mock_urls(request):\n data = _DATA.tostring()\n checksum = _CHECKSUM\n url_data = _URL\n url_checksum = _URL + '.md5'\n\n if not request.param[0]:\n # Data URL is corrupted.\n url_data = url_data[:-1]\n if not request.param[1]:\n # Data is corrupted.\n data = data[:-1]\n if not request.param[2]:\n # Checksum URL is corrupted.\n url_checksum = url_checksum[:-1]\n if not request.param[3]:\n # Checksum is corrupted.\n checksum = checksum[:-1]\n\n _add_mock_response(url_data, data)\n _add_mock_response(url_checksum, checksum)\n yield request.param, url_data, url_checksum\n responses.reset()\n\n\ndef _dl(path):\n assert path\n download_file(_URL, path)\n with open(path, 'rb') as f:\n data = f.read()\n return data\n\n\ndef _check(data):\n ae(np.frombuffer(data, np.float32), _DATA)\n\n\n#------------------------------------------------------------------------------\n# Test utility functions\n#------------------------------------------------------------------------------\n\[email protected]\ndef test_check_md5_of_url(tempdir, mock_url):\n output_path = Path(tempdir) / 'data'\n download_file(_URL, output_path)\n assert _check_md5_of_url(output_path, _URL)\n\n\n#------------------------------------------------------------------------------\n# Test download functions\n#------------------------------------------------------------------------------\n\[email protected]\ndef test_download_not_found(tempdir):\n path = Path(tempdir) / 'test'\n with raises(Exception):\n download_file(_URL + '_notfound', path)\n\n\[email protected]\ndef test_download_already_exists_invalid(tempdir, mock_url):\n with captured_logging() as buf:\n path = Path(tempdir) / 'test'\n # Create empty file.\n open(path, 'a').close()\n _check(_dl(path))\n assert 'redownload' in buf.getvalue()\n\n\[email protected]\ndef test_download_already_exists_valid(tempdir, mock_url):\n with captured_logging() as buf:\n path = Path(tempdir) / 'test'\n # Create valid file.\n with open(path, 'ab') as f:\n f.write(_DATA.tostring())\n _check(_dl(path))\n assert 'skip' in buf.getvalue()\n\n\[email protected]\ndef test_download_file(tempdir, mock_urls):\n path = Path(tempdir) / 'test'\n param, url_data, url_checksum = mock_urls\n data_here, data_valid, checksum_here, checksum_valid = param\n\n assert_succeeds = (data_here and data_valid and\n ((checksum_here == checksum_valid) or\n (not(checksum_here) and checksum_valid)))\n\n download_succeeds = (assert_succeeds or (data_here and\n (not(data_valid) and not(checksum_here))))\n\n if download_succeeds:\n data = _dl(path)\n else:\n with raises(Exception):\n data = _dl(path)\n\n if assert_succeeds:\n _check(data)\n\n\ndef test_download_test_file(tempdir):\n name = 'test/test-4ch-1s.dat'\n path = download_test_file(name, config_dir=tempdir)\n assert path.exists()\n assert path.stat().st_size == 160000\n path = download_test_file(name, config_dir=tempdir)\n"
] |
[
[
"numpy.frombuffer",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hieunq95/federated
|
[
"15402997ce7fb35d782d715758acf82767206916",
"15402997ce7fb35d782d715758acf82767206916"
] |
[
"tensorflow_federated/python/core/impl/transformations_test.py",
"tensorflow_federated/python/tensorflow_libs/tensor_utils_test.py"
] |
[
"# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import placements\nfrom tensorflow_federated.python.core.impl import computation_building_blocks\nfrom tensorflow_federated.python.core.impl import computation_constructing_utils\nfrom tensorflow_federated.python.core.impl import context_stack_impl\nfrom tensorflow_federated.python.core.impl import intrinsic_defs\nfrom tensorflow_federated.python.core.impl import tensorflow_serialization\nfrom tensorflow_federated.python.core.impl import transformation_utils\nfrom tensorflow_federated.python.core.impl import transformations\nfrom tensorflow_federated.python.core.impl import type_utils\n\nRENAME_PREFIX = '_variable'\n\n\ndef _create_called_federated_aggregate(value, zero, accumulate, merge, report):\n r\"\"\"Creates a to call a federated aggregate.\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp, Comp, Comp, Comp, Comp]\n\n Args:\n value: A `computation_building_blocks.ComputationBuildingBlock` to use as\n the value of the federated aggregate intrinsic.\n zero: A `computation_building_blocks.ComputationBuildingBlock` to use as the\n zero of the federated aggregate intrinsic.\n accumulate: A `computation_building_blocks.ComputationBuildingBlock` to use\n as the accumulate of the federated aggregate intrinsic.\n merge: A `computation_building_blocks.ComputationBuildingBlock` to use as\n the merge of the federated aggregate intrinsic.\n report: A `computation_building_blocks.ComputationBuildingBlock` to use as\n the report of the federated aggregate intrinsic.\n\n Returns:\n A `computation_building_blocks.Call`.\n \"\"\"\n py_typecheck.check_type(value,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(zero,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(accumulate,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(merge,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(report,\n computation_building_blocks.ComputationBuildingBlock)\n result_type = computation_types.FederatedType(report.type_signature.result,\n placements.SERVER, True)\n intrinsic_type = computation_types.FunctionType((\n value.type_signature,\n zero.type_signature,\n accumulate.type_signature,\n merge.type_signature,\n report.type_signature,\n ), result_type)\n intrinsic = computation_building_blocks.Intrinsic(\n intrinsic_defs.FEDERATED_AGGREGATE.uri, intrinsic_type)\n arg = computation_building_blocks.Tuple((\n value,\n zero,\n accumulate,\n merge,\n report,\n ))\n return computation_building_blocks.Call(intrinsic, arg)\n\n\ndef _create_called_federated_apply(fn, arg):\n r\"\"\"Creates a to call a federated apply.\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp, Comp]\n\n Args:\n fn: A functional `computation_building_blocks.ComputationBuildingBlock` to\n use as the function.\n arg: A `computation_building_blocks.ComputationBuildingBlock` to use as the\n argument.\n\n Returns:\n A `computation_building_blocks.Call`.\n \"\"\"\n py_typecheck.check_type(fn,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(arg,\n computation_building_blocks.ComputationBuildingBlock)\n if not type_utils.is_assignable_from(fn.parameter_type,\n arg.type_signature.member):\n raise TypeError(\n 'The parameter of the function is of type {}, and the argument is of '\n 'an incompatible type {}.'.format(\n str(fn.parameter_type), str(arg.type_signature.member)))\n result_type = computation_types.FederatedType(fn.type_signature.result,\n placements.SERVER, True)\n intrinsic_type = computation_types.FunctionType(\n (fn.type_signature, arg.type_signature), result_type)\n intrinsic = computation_building_blocks.Intrinsic(\n intrinsic_defs.FEDERATED_APPLY.uri, intrinsic_type)\n tup = computation_building_blocks.Tuple((fn, arg))\n return computation_building_blocks.Call(intrinsic, tup)\n\n\ndef _create_called_sequence_map(fn, arg):\n r\"\"\"Creates a to call a sequence map.\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp, Comp]\n\n Args:\n fn: A functional `computation_building_blocks.ComputationBuildingBlock` to\n use as the function.\n arg: A `computation_building_blocks.ComputationBuildingBlock` to use as the\n argument.\n\n Returns:\n A `computation_building_blocks.Call`.\n \"\"\"\n py_typecheck.check_type(fn,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(arg,\n computation_building_blocks.ComputationBuildingBlock)\n if not type_utils.is_assignable_from(fn.parameter_type,\n arg.type_signature.element):\n raise TypeError(\n 'The parameter of the function is of type {}, and the argument is of '\n 'an incompatible type {}.'.format(\n str(fn.parameter_type), str(arg.type_signature.element)))\n result_type = computation_types.SequenceType(fn.type_signature.result)\n intrinsic_type = computation_types.FunctionType(\n (fn.type_signature, arg.type_signature), result_type)\n intrinsic = computation_building_blocks.Intrinsic(\n intrinsic_defs.SEQUENCE_MAP.uri, intrinsic_type)\n tup = computation_building_blocks.Tuple((fn, arg))\n return computation_building_blocks.Call(intrinsic, tup)\n\n\ndef _create_chained_calls(functions, arg):\n r\"\"\"Creates a chain of `n` calls.\n\n Call\n / \\\n Comp ...\n \\\n Call\n / \\\n Comp Comp\n\n The first functional computation in `functions` must have a parameter type\n that is assignable from the type of `arg`, each other functional computation\n in `functions` must have a parameter type that is assignable from the previous\n functional computations result type.\n\n Args:\n functions: A Python list of functional computations.\n arg: A `computation_building_blocks.ComputationBuildingBlock`.\n\n Returns:\n A `computation_building_blocks.Call`.\n \"\"\"\n py_typecheck.check_type(arg,\n computation_building_blocks.ComputationBuildingBlock)\n for fn in functions:\n py_typecheck.check_type(\n fn, computation_building_blocks.ComputationBuildingBlock)\n if not type_utils.is_assignable_from(fn.parameter_type, arg.type_signature):\n raise TypeError(\n 'The parameter of the function is of type {}, and the argument is of '\n 'an incompatible type {}.'.format(\n str(fn.parameter_type), str(arg.type_signature)))\n call = computation_building_blocks.Call(fn, arg)\n arg = call\n return call\n\n\ndef _create_chained_called_federated_map(functions, arg):\n r\"\"\"Creates a chain of `n` calls to federated map.\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp, Comp]\n \\\n ...\n \\\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp, Comp]\n\n The first functional computation in `functions` must have a parameter type\n that is assignable from the type of `arg`, each other functional computation\n in `functions` must have a parameter type that is assignable from the previous\n functional computations result type.\n\n Args:\n functions: A Python list of functional computations.\n arg: A `computation_building_blocks.ComputationBuildingBlock`.\n\n Returns:\n A `computation_building_blocks.Call`.\n \"\"\"\n py_typecheck.check_type(arg,\n computation_building_blocks.ComputationBuildingBlock)\n for fn in functions:\n py_typecheck.check_type(\n fn, computation_building_blocks.ComputationBuildingBlock)\n if not type_utils.is_assignable_from(fn.parameter_type,\n arg.type_signature.member):\n raise TypeError(\n 'The parameter of the function is of type {}, and the argument is of '\n 'an incompatible type {}.'.format(\n str(fn.parameter_type), str(arg.type_signature.member)))\n call = computation_constructing_utils.create_federated_map(fn, arg)\n arg = call\n return call\n\n\ndef _create_lambda_to_identity(parameter_name, parameter_type):\n r\"\"\"Creates a lambda to return the argument.\n\n Lambda(x)\n \\\n Ref(x)\n\n Args:\n parameter_name: The name of the parameter.\n parameter_type: The type of the parameter.\n\n Returns:\n A `computation_building_blocks.Lambda`.\n \"\"\"\n ref = computation_building_blocks.Reference(parameter_name, parameter_type)\n return computation_building_blocks.Lambda(ref.name, ref.type_signature, ref)\n\n\ndef _create_dummy_block(comp):\n r\"\"\"Creates a dummy block.\n\n Block\n / \\\n local=Data(x) Comp\n\n Args:\n comp: A `computation_building_blocks.ComputationBuildingBlock`.\n\n Returns:\n A dummy `computation_building_blocks.Block`.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n data = computation_building_blocks.Data('x', tf.int32)\n return computation_building_blocks.Block([('local', data)], comp)\n\n\ndef _create_lambda_to_dummy_intrinsic(uri='dummy', type_spec=tf.int32):\n r\"\"\"Creates a lambda to call a dummy intrinsic.\n\n Lambda(x)\n \\\n Call\n / \\\n Intrinsic Ref(x)\n\n Args:\n uri: The URI of the intrinsic.\n type_spec: The type of the parameter.\n\n Returns:\n A `computation_building_blocks.Lambda`.\n \"\"\"\n py_typecheck.check_type(type_spec, tf.dtypes.DType)\n intrinsic_type = computation_types.FunctionType(type_spec, type_spec)\n intrinsic = computation_building_blocks.Intrinsic(uri, intrinsic_type)\n ref = computation_building_blocks.Reference('x', type_spec)\n call = computation_building_blocks.Call(intrinsic, ref)\n return computation_building_blocks.Lambda(ref.name, ref.type_signature, call)\n\n\ndef _create_lambda_to_dummy_cast(parameter_type, result_type):\n r\"\"\"Creates a lambda to cast from `parameter_type` to `result_type`.\n\n Lambda(x)\n \\\n Data(y)\n\n Args:\n parameter_type: The type of the argument.\n result_type: The type to cast the argument to.\n\n Returns:\n A `computation_building_blocks.Lambda`.\n \"\"\"\n py_typecheck.check_type(parameter_type, tf.dtypes.DType)\n py_typecheck.check_type(result_type, tf.dtypes.DType)\n arg = computation_building_blocks.Data('y', result_type)\n return computation_building_blocks.Lambda('x', parameter_type, arg)\n\n\ndef _create_dummy_called_federated_aggregate():\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n value = computation_building_blocks.Data('v', value_type)\n zero = computation_building_blocks.Data('z', tf.int32)\n accumulate_type = computation_types.FunctionType((tf.int32, tf.int32),\n tf.int32)\n accumulate_result = computation_building_blocks.Data('a', tf.int32)\n accumulate = computation_building_blocks.Lambda('x', accumulate_type,\n accumulate_result)\n merge_type = computation_types.FunctionType((tf.int32, tf.int32), tf.int32)\n merge_result = computation_building_blocks.Data('m', tf.int32)\n merge = computation_building_blocks.Lambda('x', merge_type, merge_result)\n report_type = computation_types.FederatedType(tf.int32, placements.SERVER,\n True)\n report_result = computation_building_blocks.Data('r', tf.int32)\n report = computation_building_blocks.Lambda('x', report_type, report_result)\n return _create_called_federated_aggregate(value, zero, accumulate, merge,\n report)\n\n\ndef _create_dummy_called_federated_apply(parameter_name='x',\n parameter_type=tf.int32,\n argument_name='y'):\n fn = _create_lambda_to_identity(parameter_name, parameter_type)\n arg_type = computation_types.FederatedType(parameter_type, placements.SERVER,\n True)\n arg = computation_building_blocks.Data(argument_name, arg_type)\n return _create_called_federated_apply(fn, arg)\n\n\ndef _create_dummy_called_federated_map(parameter_name='x',\n parameter_type=tf.int32,\n argument_name='y'):\n fn = _create_lambda_to_identity(parameter_name, parameter_type)\n arg_type = computation_types.FederatedType(parameter_type, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data(argument_name, arg_type)\n return computation_constructing_utils.create_federated_map(fn, arg)\n\n\ndef _create_dummy_called_sequence_map(parameter_name='x',\n parameter_type=tf.int32,\n argument_name='y'):\n fn = _create_lambda_to_identity(parameter_name, parameter_type)\n arg_type = computation_types.SequenceType(parameter_type)\n arg = computation_building_blocks.Data(argument_name, arg_type)\n return _create_called_sequence_map(fn, arg)\n\n\ndef _create_dummy_called_intrinsic(uri='dummy', type_spec=tf.int32):\n py_typecheck.check_type(type_spec, tf.dtypes.DType)\n intrinsic_type = computation_types.FunctionType(type_spec, type_spec)\n intrinsic = computation_building_blocks.Intrinsic(uri, intrinsic_type)\n arg = computation_building_blocks.Data('x', type_spec)\n return computation_building_blocks.Call(intrinsic, arg)\n\n\ndef _has_unique_names(comp):\n \"\"\"Checks that each variable of `comp` is bound at most once.\"\"\"\n names = set()\n # TODO(b/129791812): Cleanup Python 2 and 3 compatibility\n unique = [True]\n\n def _transform(comp):\n if unique[0]:\n if isinstance(comp, computation_building_blocks.Block):\n for name, _ in comp.locals:\n if name in names:\n unique[0] = False\n names.add(name)\n elif isinstance(comp, computation_building_blocks.Lambda):\n if comp.parameter_name in names:\n unique[0] = False\n names.add(comp.parameter_name)\n return comp, False\n\n transformation_utils.transform_postorder(comp, _transform)\n\n return unique[0]\n\n\nclass TransformationsTest(parameterized.TestCase):\n\n def test_replace_compiled_computations_names_raises_type_error(self):\n with self.assertRaises(TypeError):\n transformations.replace_compiled_computations_names_with_unique_names(\n None)\n\n def test_replace_compiled_computations_names_replaces_name(self):\n fn = lambda: tf.constant(1)\n tf_comp, _ = tensorflow_serialization.serialize_py_fn_as_tf_computation(\n fn, None, context_stack_impl.context_stack)\n compiled_comp = computation_building_blocks.CompiledComputation(tf_comp)\n comp = compiled_comp\n\n transformed_comp, modified = transformations.replace_compiled_computations_names_with_unique_names(\n comp)\n\n self.assertNotEqual(transformed_comp._name, comp._name)\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_compiled_computations_names_replaces_multiple_names(self):\n elements = []\n for _ in range(10):\n fn = lambda: tf.constant(1)\n tf_comp, _ = tensorflow_serialization.serialize_py_fn_as_tf_computation(\n fn, None, context_stack_impl.context_stack)\n compiled_comp = computation_building_blocks.CompiledComputation(tf_comp)\n elements.append(compiled_comp)\n compiled_comps = computation_building_blocks.Tuple(elements)\n comp = compiled_comps\n\n transformed_comp, modified = transformations.replace_compiled_computations_names_with_unique_names(\n comp)\n\n comp_names = [element._name for element in comp]\n transformed_comp_names = [element._name for element in transformed_comp]\n self.assertNotEqual(transformed_comp_names, comp_names)\n self.assertEqual(\n len(transformed_comp_names), len(set(transformed_comp_names)),\n 'The transformed computation names are not unique: {}.'.format(\n transformed_comp_names))\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_compiled_computations_names_does_not_replace_other_name(\n self):\n comp = computation_building_blocks.Reference('name', tf.int32)\n\n transformed_comp, modified = transformations.replace_compiled_computations_names_with_unique_names(\n comp)\n\n self.assertEqual(transformed_comp._name, comp._name)\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_intrinsic_raises_type_error_none_comp(self):\n uri = 'dummy'\n body = lambda x: x\n\n with self.assertRaises(TypeError):\n transformations.replace_intrinsic_with_callable(\n None, uri, body, context_stack_impl.context_stack)\n\n def test_replace_intrinsic_raises_type_error_none_uri(self):\n comp = _create_lambda_to_dummy_intrinsic()\n body = lambda x: x\n\n with self.assertRaises(TypeError):\n transformations.replace_intrinsic_with_callable(\n comp, None, body, context_stack_impl.context_stack)\n\n def test_replace_intrinsic_raises_type_error_none_body(self):\n comp = _create_lambda_to_dummy_intrinsic()\n uri = 'dummy'\n\n with self.assertRaises(TypeError):\n transformations.replace_intrinsic_with_callable(\n comp, uri, None, context_stack_impl.context_stack)\n\n def test_replace_intrinsic_raises_type_error_none_context_stack(self):\n comp = _create_lambda_to_dummy_intrinsic()\n uri = 'dummy'\n body = lambda x: x\n\n with self.assertRaises(TypeError):\n transformations.replace_intrinsic_with_callable(comp, uri, body, None)\n\n def test_replace_intrinsic_replaces_intrinsic(self):\n comp = _create_lambda_to_dummy_intrinsic()\n uri = 'dummy'\n body = lambda x: x\n\n transformed_comp, modified = transformations.replace_intrinsic_with_callable(\n comp, uri, body, context_stack_impl.context_stack)\n\n self.assertEqual(comp.tff_repr, '(x -> dummy(x))')\n self.assertEqual(transformed_comp.tff_repr,\n '(x -> (dummy_arg -> dummy_arg)(x))')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_intrinsic_replaces_nested_intrinsic(self):\n fn = _create_lambda_to_dummy_intrinsic()\n block = _create_dummy_block(fn)\n comp = block\n uri = 'dummy'\n body = lambda x: x\n\n transformed_comp, modified = transformations.replace_intrinsic_with_callable(\n comp, uri, body, context_stack_impl.context_stack)\n\n self.assertEqual(comp.tff_repr, '(let local=x in (x -> dummy(x)))')\n self.assertEqual(transformed_comp.tff_repr,\n '(let local=x in (x -> (dummy_arg -> dummy_arg)(x)))')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_intrinsic_replaces_chained_intrinsics(self):\n fn = _create_lambda_to_dummy_intrinsic(type_spec=tf.int32)\n arg = computation_building_blocks.Data('x', tf.int32)\n call = _create_chained_calls([fn, fn], arg)\n comp = call\n uri = 'dummy'\n body = lambda x: x\n\n transformed_comp, modified = transformations.replace_intrinsic_with_callable(\n comp, uri, body, context_stack_impl.context_stack)\n\n self.assertEqual(comp.tff_repr, '(x -> dummy(x))((x -> dummy(x))(x))')\n self.assertEqual(\n transformed_comp.tff_repr,\n '(x -> (dummy_arg -> dummy_arg)(x))((x -> (dummy_arg -> dummy_arg)(x))(x))'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_intrinsic_does_not_replace_other_intrinsic(self):\n comp = _create_lambda_to_dummy_intrinsic()\n uri = 'other'\n body = lambda x: x\n\n transformed_comp, modified = transformations.replace_intrinsic_with_callable(\n comp, uri, body, context_stack_impl.context_stack)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, '(x -> dummy(x))')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_called_lambda_raises_type_error(self):\n with self.assertRaises(TypeError):\n transformations.replace_called_lambda_with_block(None)\n\n def test_replace_called_lambda_replaces_called_lambda(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg = computation_building_blocks.Data('y', tf.int32)\n call = computation_building_blocks.Call(fn, arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_called_lambda_with_block(\n comp)\n\n self.assertEqual(comp.tff_repr, '(x -> x)(y)')\n self.assertEqual(transformed_comp.tff_repr, '(let x=y in x)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_called_lambda_replaces_nested_called_lambda(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg = computation_building_blocks.Data('y', tf.int32)\n call = computation_building_blocks.Call(fn, arg)\n block = _create_dummy_block(call)\n comp = block\n\n transformed_comp, modified = transformations.replace_called_lambda_with_block(\n comp)\n\n self.assertEqual(comp.tff_repr, '(let local=x in (x -> x)(y))')\n self.assertEqual(transformed_comp.tff_repr,\n '(let local=x in (let x=y in x))')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_called_lambda_replaces_chained_called_lambdas(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg = computation_building_blocks.Data('y', tf.int32)\n call = _create_chained_calls([fn, fn], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_called_lambda_with_block(\n comp)\n\n self.assertEqual(comp.tff_repr, '(x -> x)((x -> x)(y))')\n self.assertEqual(transformed_comp.tff_repr, '(let x=(let x=y in x) in x)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_called_lambda_does_not_replace_uncalled_lambda(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n comp = fn\n\n transformed_comp, modified = transformations.replace_called_lambda_with_block(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, '(x -> x)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_called_lambda_does_not_replace_separated_called_lambda(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n block = _create_dummy_block(fn)\n arg = computation_building_blocks.Data('y', tf.int32)\n call = computation_building_blocks.Call(block, arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_called_lambda_with_block(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, '(let local=x in (x -> x))(y)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_remove_mapped_or_applied_identity_raises_type_error(self):\n with self.assertRaises(TypeError):\n transformations.remove_mapped_or_applied_identity(None)\n\n # pyformat: disable\n @parameterized.named_parameters(\n ('federated_apply',\n intrinsic_defs.FEDERATED_APPLY.uri,\n _create_dummy_called_federated_apply),\n ('federated_map',\n intrinsic_defs.FEDERATED_MAP.uri,\n _create_dummy_called_federated_map),\n ('sequence_map',\n intrinsic_defs.SEQUENCE_MAP.uri,\n _create_dummy_called_sequence_map))\n # pyformat: enable\n def test_remove_mapped_or_applied_identity_removes_identity(\n self, uri, comp_factory):\n call = comp_factory()\n comp = call\n\n transformed_comp, modified = transformations.remove_mapped_or_applied_identity(\n comp)\n\n self.assertEqual(comp.tff_repr, '{}(<(x -> x),y>)'.format(uri))\n self.assertEqual(transformed_comp.tff_repr, 'y')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_remove_mapped_or_applied_identity_replaces_federated_maps_with_named_result(\n self):\n parameter_type = [('a', tf.int32), ('b', tf.int32)]\n fn = _create_lambda_to_identity('x', parameter_type)\n arg_type = computation_types.FederatedType(parameter_type,\n placements.CLIENTS, False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = computation_constructing_utils.create_federated_map(fn, arg)\n comp = call\n\n transformed_comp, modified = transformations.remove_mapped_or_applied_identity(\n comp)\n\n self.assertEqual(comp.tff_repr, 'federated_map(<(x -> x),y>)')\n self.assertEqual(transformed_comp.tff_repr, 'y')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_remove_mapped_or_applied_identity_removes_nested_federated_map(self):\n call = _create_dummy_called_federated_map()\n block = _create_dummy_block(call)\n comp = block\n\n transformed_comp, modified = transformations.remove_mapped_or_applied_identity(\n comp)\n\n self.assertEqual(comp.tff_repr,\n '(let local=x in federated_map(<(x -> x),y>))')\n self.assertEqual(transformed_comp.tff_repr, '(let local=x in y)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_remove_mapped_or_applied_identity_removes_chained_federated_maps(\n self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = _create_chained_called_federated_map([fn, fn], arg)\n comp = call\n\n transformed_comp, modified = transformations.remove_mapped_or_applied_identity(\n comp)\n\n self.assertEqual(comp.tff_repr,\n 'federated_map(<(x -> x),federated_map(<(x -> x),y>)>)')\n self.assertEqual(transformed_comp.tff_repr, 'y')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_remove_mapped_or_applied_identity_does_not_remove_dummy_intrinsic(\n self):\n comp = _create_dummy_called_intrinsic()\n\n transformed_comp, modified = transformations.remove_mapped_or_applied_identity(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, 'dummy(x)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_remove_mapped_or_applied_identity_does_not_remove_called_lambda(\n self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg = computation_building_blocks.Data('y', tf.int32)\n call = computation_building_blocks.Call(fn, arg)\n comp = call\n\n transformed_comp, modified = transformations.remove_mapped_or_applied_identity(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, '(x -> x)(y)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_chained_federated_maps_raises_type_error(self):\n with self.assertRaises(TypeError):\n transformations.replace_chained_federated_maps_with_federated_map(None)\n\n def test_replace_chained_federated_maps_replaces_federated_maps(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = _create_chained_called_federated_map([fn, fn], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(comp.tff_repr,\n 'federated_map(<(x -> x),federated_map(<(x -> x),y>)>)')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> fn[1](fn[0](arg)))),y>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_replaces_federated_maps_with_different_names(\n self):\n fn_1 = _create_lambda_to_identity('a', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('b', arg_type)\n fn_2 = _create_lambda_to_identity('c', tf.int32)\n call = _create_chained_called_federated_map([fn_1, fn_2], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(comp.tff_repr,\n 'federated_map(<(c -> c),federated_map(<(a -> a),b>)>)')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(a -> a),(c -> c)> in (arg -> fn[1](fn[0](arg)))),b>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_replaces_federated_maps_with_different_types(\n self):\n fn_1 = _create_lambda_to_dummy_cast(tf.int32, tf.float32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n fn_2 = _create_lambda_to_identity('x', tf.float32)\n call = _create_chained_called_federated_map([fn_1, fn_2], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(comp.tff_repr,\n 'federated_map(<(x -> x),federated_map(<(x -> y),y>)>)')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> y),(x -> x)> in (arg -> fn[1](fn[0](arg)))),y>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_replaces_federated_maps_with_named_result(\n self):\n parameter_type = [('a', tf.int32), ('b', tf.int32)]\n fn = _create_lambda_to_identity('x', parameter_type)\n arg_type = computation_types.FederatedType(parameter_type,\n placements.CLIENTS, False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = _create_chained_called_federated_map([fn, fn], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(comp.tff_repr,\n 'federated_map(<(x -> x),federated_map(<(x -> x),y>)>)')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> fn[1](fn[0](arg)))),y>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_replaces_federated_maps_with_unbound_references(\n self):\n ref = computation_building_blocks.Reference('arg', tf.int32)\n fn = computation_building_blocks.Lambda('x', tf.int32, ref)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = _create_chained_called_federated_map([fn, fn], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n 'federated_map(<(x -> arg),federated_map(<(x -> arg),y>)>)')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> arg),(x -> arg)> in (arg -> fn[1](fn[0](arg)))),y>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_replaces_nested_federated_maps(self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = _create_chained_called_federated_map([fn, fn], arg)\n block = _create_dummy_block(call)\n comp = block\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '(let local=x in federated_map(<(x -> x),federated_map(<(x -> x),y>)>))'\n )\n self.assertEqual(\n transformed_comp.tff_repr,\n '(let local=x in federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> fn[1](fn[0](arg)))),y>))'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_replaces_multiple_chained_federated_maps(\n self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = _create_chained_called_federated_map([fn, fn, fn], arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n 'federated_map(<(x -> x),federated_map(<(x -> x),federated_map(<(x -> x),y>)>)>)'\n )\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(let fn=<(x -> x),(x -> x)> in (arg -> fn[1](fn[0](arg)))),(x -> x)> in (arg -> fn[1](fn[0](arg)))),y>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_chained_federated_maps_does_not_replace_one_federated_map(\n self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = computation_constructing_utils.create_federated_map(fn, arg)\n comp = call\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, 'federated_map(<(x -> x),y>)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_chained_federated_maps_does_not_replace_separated_federated_maps(\n self):\n fn = _create_lambda_to_identity('x', tf.int32)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call_1 = computation_constructing_utils.create_federated_map(fn, arg)\n block = _create_dummy_block(call_1)\n call_2 = computation_constructing_utils.create_federated_map(fn, block)\n comp = call_2\n\n transformed_comp, modified = transformations.replace_chained_federated_maps_with_federated_map(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(x -> x),(let local=x in federated_map(<(x -> x),y>))>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_tuple_intrinsics_raises_type_error(self):\n with self.assertRaises(TypeError):\n transformations.replace_tuple_intrinsics_with_intrinsic(None)\n\n def test_replace_tuple_intrinsics_replaces_federated_aggregates(self):\n elements = [_create_dummy_called_federated_aggregate() for _ in range(2)]\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<federated_aggregate(<v,z,(x -> a),(x -> m),(x -> r)>),federated_aggregate(<v,z,(x -> a),(x -> m),(x -> r)>)>'\n )\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_aggregate(<<v,v>,<z,z>,(let fn=<(x -> a),(x -> a)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),(let fn=<(x -> m),(x -> m)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),(let fn=<(x -> r),(x -> r)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>))>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_federated_maps(self):\n elements = [_create_dummy_called_federated_map() for _ in range(2)]\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<federated_map(<(x -> x),y>),federated_map(<(x -> x),y>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<y,y>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_federated_maps_with_different_names(\n self):\n elements = (\n _create_dummy_called_federated_map(\n parameter_name='a', argument_name='b'),\n _create_dummy_called_federated_map(\n parameter_name='c', argument_name='d'),\n )\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<federated_map(<(a -> a),b>),federated_map(<(c -> c),d>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(a -> a),(c -> c)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<b,d>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_federated_maps_with_different_types(\n self):\n elements = (\n _create_dummy_called_federated_map(parameter_type=tf.int32),\n _create_dummy_called_federated_map(parameter_type=tf.float32),\n )\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<federated_map(<(x -> x),y>),federated_map(<(x -> x),y>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<y,y>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_federated_maps_with_named_result(\n self):\n parameter_type = [('a', tf.int32), ('b', tf.int32)]\n fn = _create_lambda_to_identity('x', parameter_type)\n arg_type = computation_types.FederatedType(parameter_type,\n placements.CLIENTS, False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = computation_constructing_utils.create_federated_map(fn, arg)\n elements = [call, call]\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<federated_map(<(x -> x),y>),federated_map(<(x -> x),y>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<y,y>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_federated_maps_with_unbound_reference(\n self):\n ref = computation_building_blocks.Reference('arg', tf.int32)\n fn = computation_building_blocks.Lambda('x', tf.int32, ref)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS,\n False)\n arg = computation_building_blocks.Data('y', arg_type)\n call = computation_constructing_utils.create_federated_map(fn, arg)\n elements = [call, call]\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<federated_map(<(x -> arg),y>),federated_map(<(x -> arg),y>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> arg),(x -> arg)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<y,y>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_named_federated_maps(self):\n elements = (\n ('a', _create_dummy_called_federated_map()),\n ('b', _create_dummy_called_federated_map()),\n )\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<a=federated_map(<(x -> x),y>),b=federated_map(<(x -> x),y>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<a=(x -> x),b=(x -> x)> in (arg -> <a=fn[0](arg[0]),b=fn[1](arg[1])>)),<a=y,b=y>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_nested_federated_maps(self):\n elements = [_create_dummy_called_federated_map() for _ in range(2)]\n calls = computation_building_blocks.Tuple(elements)\n block = _create_dummy_block(calls)\n comp = block\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '(let local=x in <federated_map(<(x -> x),y>),federated_map(<(x -> x),y>)>)'\n )\n self.assertEqual(\n transformed_comp.tff_repr,\n '(let local=x in federated_map(<(let fn=<(x -> x),(x -> x)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<y,y>>))'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_multiple_federated_maps(self):\n comp_elements = []\n for _ in range(2):\n call_elements = [_create_dummy_called_federated_map() for _ in range(2)]\n calls = computation_building_blocks.Tuple(call_elements)\n comp_elements.append(calls)\n comps = computation_building_blocks.Tuple(comp_elements)\n comp = comps\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(\n comp.tff_repr,\n '<<federated_map(<(x -> x),y>),federated_map(<(x -> x),y>)>,<federated_map(<(x -> x),y>),federated_map(<(x -> x),y>)>>'\n )\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(let fn=<(x -> x),(x -> x)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),(let fn=<(x -> x),(x -> x)> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>))> in (arg -> <fn[0](arg[0]),fn[1](arg[1])>)),<<y,y>,<y,y>>>)'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_replaces_one_federated_map(self):\n elements = (_create_dummy_called_federated_map(),)\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(comp.tff_repr, '<federated_map(<(x -> x),y>)>')\n self.assertEqual(\n transformed_comp.tff_repr,\n 'federated_map(<(let fn=<(x -> x)> in (arg -> <fn[0](arg[0])>)),<y>>)')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertTrue(modified)\n\n def test_replace_tuple_intrinsics_does_not_replace_different_intrinsics(self):\n elements = (\n _create_dummy_called_federated_aggregate(),\n _create_dummy_called_federated_map(),\n )\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(\n transformed_comp.tff_repr,\n '<federated_aggregate(<v,z,(x -> a),(x -> m),(x -> r)>),federated_map(<(x -> x),y>)>'\n )\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_replace_tuple_intrinsics_does_not_replace_dummy_intrinsics(self):\n elements = [_create_dummy_called_intrinsic() for _ in range(2)]\n calls = computation_building_blocks.Tuple(elements)\n comp = calls\n\n transformed_comp, modified = transformations.replace_tuple_intrinsics_with_intrinsic(\n comp)\n\n self.assertEqual(transformed_comp.tff_repr, comp.tff_repr)\n self.assertEqual(transformed_comp.tff_repr, '<dummy(x),dummy(x)>')\n self.assertEqual(transformed_comp.type_signature, comp.type_signature)\n self.assertFalse(modified)\n\n def test_merge_chained_blocks_fails_on_none(self):\n with self.assertRaises(TypeError):\n transformations.merge_chained_blocks(None)\n\n def test_merge_chained_blocks_single_level_of_nesting(self):\n input1 = computation_building_blocks.Reference('input1', tf.int32)\n result = computation_building_blocks.Reference('result', tf.int32)\n block1 = computation_building_blocks.Block([('result', input1)], result)\n input2 = computation_building_blocks.Data('input2', tf.int32)\n block2 = computation_building_blocks.Block([('input1', input2)], block1)\n self.assertEqual(block2.tff_repr,\n '(let input1=input2 in (let result=input1 in result))')\n merged_blocks, modified = transformations.merge_chained_blocks(block2)\n self.assertEqual(merged_blocks.tff_repr,\n '(let input1=input2,result=input1 in result)')\n self.assertTrue(modified)\n\n def test_merge_chained_blocks_leaves_names(self):\n input1 = computation_building_blocks.Data('input1', tf.int32)\n result_tuple = computation_building_blocks.Tuple([\n ('a', computation_building_blocks.Data('result_a', tf.int32)),\n ('b', computation_building_blocks.Data('result_b', tf.int32))\n ])\n block1 = computation_building_blocks.Block([('x', input1)], result_tuple)\n result_block = block1\n input2 = computation_building_blocks.Data('input2', tf.int32)\n block2 = computation_building_blocks.Block([('y', input2)], result_block)\n self.assertEqual(\n block2.tff_repr,\n '(let y=input2 in (let x=input1 in <a=result_a,b=result_b>))')\n merged, modified = transformations.merge_chained_blocks(block2)\n self.assertEqual(merged.tff_repr,\n '(let y=input2,x=input1 in <a=result_a,b=result_b>)')\n self.assertTrue(modified)\n\n def test_merge_chained_blocks_leaves_separated_chained_blocks_alone(self):\n input1 = computation_building_blocks.Data('input1', tf.int32)\n result = computation_building_blocks.Data('result', tf.int32)\n block1 = computation_building_blocks.Block([('x', input1)], result)\n result_block = block1\n result_tuple = computation_building_blocks.Tuple([result_block])\n input2 = computation_building_blocks.Data('input2', tf.int32)\n block2 = computation_building_blocks.Block([('y', input2)], result_tuple)\n self.assertEqual(block2.tff_repr,\n '(let y=input2 in <(let x=input1 in result)>)')\n merged, modified = transformations.merge_chained_blocks(block2)\n self.assertEqual(merged.tff_repr,\n '(let y=input2 in <(let x=input1 in result)>)')\n self.assertFalse(modified)\n\n def test_merge_chained_blocks_two_levels_of_nesting(self):\n input1 = computation_building_blocks.Reference('input1', tf.int32)\n result = computation_building_blocks.Reference('result', tf.int32)\n block1 = computation_building_blocks.Block([('result', input1)], result)\n input2 = computation_building_blocks.Reference('input2', tf.int32)\n block2 = computation_building_blocks.Block([('input1', input2)], block1)\n input3 = computation_building_blocks.Data('input3', tf.int32)\n block3 = computation_building_blocks.Block([('input2', input3)], block2)\n self.assertEqual(\n block3.tff_repr,\n '(let input2=input3 in (let input1=input2 in (let result=input1 in result)))'\n )\n merged_blocks, modified = transformations.merge_chained_blocks(block3)\n self.assertEqual(\n merged_blocks.tff_repr,\n '(let input2=input3,input1=input2,result=input1 in result)')\n self.assertTrue(modified)\n\n def test_replace_selection_from_tuple_fails_on_none_comp(self):\n with self.assertRaises(TypeError):\n transformations.replace_selection_from_tuple_with_tuple_element(None)\n\n def test_replace_selection_from_tuple_leaves_selection_from_ref_by_index_alone(\n self):\n ref_to_tuple = computation_building_blocks.Reference(\n 'tup', [('a', tf.int32), ('b', tf.float32)])\n a_selected = computation_building_blocks.Selection(ref_to_tuple, index=0)\n b_selected = computation_building_blocks.Selection(ref_to_tuple, index=1)\n\n a_returned, a_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n a_selected)\n b_returned, b_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n b_selected)\n\n self.assertFalse(a_transformed)\n self.assertEqual(a_returned.proto, a_selected.proto)\n self.assertFalse(b_transformed)\n self.assertEqual(b_returned.proto, b_selected.proto)\n\n def test_replace_selection_from_tuple_leaves_selection_from_ref_by_name_alone(\n self):\n ref_to_tuple = computation_building_blocks.Reference(\n 'tup', [('a', tf.int32), ('b', tf.float32)])\n a_selected = computation_building_blocks.Selection(ref_to_tuple, name='a')\n b_selected = computation_building_blocks.Selection(ref_to_tuple, name='b')\n\n a_returned, a_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n a_selected)\n b_returned, b_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n b_selected)\n\n self.assertFalse(a_transformed)\n self.assertEqual(a_returned.proto, a_selected.proto)\n self.assertFalse(b_transformed)\n self.assertEqual(b_returned.proto, b_selected.proto)\n\n def test_replace_selection_from_tuple_by_index_grabs_correct_element(self):\n x_data = computation_building_blocks.Data('x', tf.int32)\n y_data = computation_building_blocks.Data('y', [('a', tf.float32)])\n tup = computation_building_blocks.Tuple([x_data, y_data])\n x_selected = computation_building_blocks.Selection(tup, index=0)\n y_selected = computation_building_blocks.Selection(tup, index=1)\n\n collapsed_selection_x, x_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n x_selected)\n collapsed_selection_y, y_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n y_selected)\n\n self.assertTrue(x_transformed)\n self.assertTrue(y_transformed)\n self.assertEqual(collapsed_selection_x.proto, x_data.proto)\n self.assertEqual(collapsed_selection_y.proto, y_data.proto)\n\n def test_replace_selection_from_tuple_by_name_grabs_correct_element(self):\n x_data = computation_building_blocks.Data('x', tf.int32)\n y_data = computation_building_blocks.Data('y', [('a', tf.float32)])\n tup = computation_building_blocks.Tuple([('a', x_data), ('b', y_data)])\n x_selected = computation_building_blocks.Selection(tup, name='a')\n y_selected = computation_building_blocks.Selection(tup, name='b')\n\n collapsed_selection_x, x_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n x_selected)\n collapsed_selection_y, y_transformed = transformations.replace_selection_from_tuple_with_tuple_element(\n y_selected)\n\n self.assertTrue(x_transformed)\n self.assertTrue(y_transformed)\n self.assertEqual(collapsed_selection_x.proto, x_data.proto)\n self.assertEqual(collapsed_selection_y.proto, y_data.proto)\n\n\nclass UniquifyReferencesTest(absltest.TestCase):\n\n def test_uniquify_references_single_level_block(self):\n x_ref = computation_building_blocks.Reference('x', tf.int32)\n data = computation_building_blocks.Data('data', tf.int32)\n block = computation_building_blocks.Block([('x', data), ('x', x_ref),\n ('x', x_ref)], x_ref)\n self.assertEqual(block.tff_repr, '(let x=data,x=x,x=x in x)')\n renamed = transformations.uniquify_references(block)\n self.assertEqual(\n renamed.tff_repr,\n '(let {0}1=data,{0}2={0}1,{0}3={0}2 in {0}3)'.format(RENAME_PREFIX))\n\n def test_uniquify_references_nested_blocks(self):\n x_ref = computation_building_blocks.Reference('x', tf.int32)\n input1 = computation_building_blocks.Data('input1', tf.int32)\n block1 = computation_building_blocks.Block([('x', input1), ('x', x_ref)],\n x_ref)\n input2 = computation_building_blocks.Data('input2', tf.int32)\n block2 = computation_building_blocks.Block([('x', input2), ('x', x_ref)],\n block1)\n self.assertEqual(\n str(block2), '(let x=input2,x=x in (let x=input1,x=x in x))')\n renamed = transformations.uniquify_references(block2)\n self.assertTrue(_has_unique_names(renamed))\n self.assertEqual(\n renamed.tff_repr,\n '(let {0}1=input2,{0}2={0}1 in (let {0}3=input1,{0}4={0}3 in {0}4))'\n .format(RENAME_PREFIX))\n\n def test_uniquify_references_nested_lambdas(self):\n comp = computation_building_blocks.Data('test', tf.int32)\n input1 = computation_building_blocks.Reference('input1',\n comp.type_signature)\n first_level_call = computation_building_blocks.Call(\n computation_building_blocks.Lambda('input1', input1.type_signature,\n input1), comp)\n input2 = computation_building_blocks.Reference(\n 'input2', first_level_call.type_signature)\n second_level_call = computation_building_blocks.Call(\n computation_building_blocks.Lambda('input2', input2.type_signature,\n input2), first_level_call)\n renamed = transformations.uniquify_references(second_level_call)\n self.assertTrue(_has_unique_names(renamed))\n self.assertEqual(\n renamed.tff_repr,\n '({0}1 -> {0}1)(({0}2 -> {0}2)(test))'.format(RENAME_PREFIX))\n\n def test_uniquify_references_block_lambda_block_lambda(self):\n x_ref = computation_building_blocks.Reference('x', tf.int32)\n inner_lambda = computation_building_blocks.Lambda('x', tf.int32, x_ref)\n called_lambda = computation_building_blocks.Call(inner_lambda, x_ref)\n lower_block = computation_building_blocks.Block([('x', x_ref),\n ('x', x_ref)],\n called_lambda)\n second_lambda = computation_building_blocks.Lambda('x', tf.int32,\n lower_block)\n second_call = computation_building_blocks.Call(second_lambda, x_ref)\n final_input = computation_building_blocks.Data('test_data', tf.int32)\n last_block = computation_building_blocks.Block([('x', final_input),\n ('x', x_ref)], second_call)\n renamed = transformations.uniquify_references(last_block)\n self.assertEqual(\n last_block.tff_repr,\n '(let x=test_data,x=x in (x -> (let x=x,x=x in (x -> x)(x)))(x))')\n self.assertTrue(_has_unique_names(renamed))\n self.assertEqual(\n renamed.tff_repr,\n '(let {0}1=test_data,{0}2={0}1 in ({0}3 -> (let {0}4={0}3,{0}5={0}4 in ({0}6 -> {0}6)({0}5)))({0}2))'\n .format(RENAME_PREFIX))\n\n def test_uniquify_references_blocks_nested_inside_of_locals(self):\n x_data = computation_building_blocks.Data('x', tf.int32)\n data = computation_building_blocks.Data('data', tf.int32)\n lower_block = computation_building_blocks.Block([('y', data)], x_data)\n middle_block = computation_building_blocks.Block([('y', lower_block)],\n x_data)\n higher_block = computation_building_blocks.Block([('y', middle_block)],\n x_data)\n\n y_ref = computation_building_blocks.Reference('y', tf.int32)\n lower_block_with_y_ref = computation_building_blocks.Block([('y', y_ref)],\n x_data)\n middle_block_with_y_ref = computation_building_blocks.Block(\n [('y', lower_block_with_y_ref)], x_data)\n higher_block_with_y_ref = computation_building_blocks.Block(\n [('y', middle_block_with_y_ref)], x_data)\n\n multiple_bindings_highest_block = computation_building_blocks.Block(\n [('y', higher_block),\n ('y', higher_block_with_y_ref)], higher_block_with_y_ref)\n renamed = transformations.uniquify_references(\n multiple_bindings_highest_block)\n self.assertEqual(higher_block.tff_repr,\n '(let y=(let y=(let y=data in x) in x) in x)')\n self.assertEqual(higher_block_with_y_ref.tff_repr,\n '(let y=(let y=(let y=y in x) in x) in x)')\n self.assertEqual(renamed.locals[0][0], '{}4'.format(RENAME_PREFIX))\n self.assertEqual(\n renamed.locals[0][1].tff_repr,\n '(let {0}3=(let {0}2=(let {0}1=data in x) in x) in x)'.format(\n RENAME_PREFIX))\n self.assertEqual(renamed.locals[1][0], '{}8'.format(RENAME_PREFIX))\n self.assertEqual(\n renamed.locals[1][1].tff_repr,\n '(let {0}7=(let {0}6=(let {0}5={0}4 in x) in x) in x)'.format(\n RENAME_PREFIX))\n self.assertEqual(\n renamed.result.tff_repr,\n '(let {0}11=(let {0}10=(let {0}9={0}8 in x) in x) in x)'.format(\n RENAME_PREFIX))\n self.assertTrue(_has_unique_names(renamed))\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tensor_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import test\nfrom tensorflow_federated.python.tensorflow_libs import tensor_utils\n\n\nclass TensorUtilsTest(test.TestCase):\n\n def test_check_nested_equal(self):\n nested_dict = {\n 'KEY1': {\n 'NESTED_KEY': 0\n },\n 'KEY2': 1,\n }\n nested_list = [('KEY1', ('NESTED_KEY', 0)), ('KEY2', 1)]\n flat_dict = {\n 'KEY1': 0,\n 'KEY2': 1,\n }\n nested_dtypes = {\n 'x': [tf.int32, tf.float32],\n 'y': tf.float32,\n }\n nested_shapes = {\n # N.B. tf.TensorShape([None]) == tf.TensorShape([None])\n # returns False, so we can't use a None shape here.\n 'x': [[1], [3, 5]],\n 'y': [1],\n }\n\n # Should not raise an exception.\n tensor_utils.check_nested_equal(nested_dict, nested_dict)\n tensor_utils.check_nested_equal(nested_list, nested_list)\n tensor_utils.check_nested_equal(flat_dict, flat_dict)\n tensor_utils.check_nested_equal(nested_dtypes, nested_dtypes)\n tensor_utils.check_nested_equal(nested_shapes, nested_shapes)\n\n with self.assertRaises(TypeError):\n tensor_utils.check_nested_equal(nested_dict, nested_list)\n\n with self.assertRaises(ValueError):\n # Different nested structures.\n tensor_utils.check_nested_equal(nested_dict, flat_dict)\n\n # Same as nested_dict, but using float values. Equality still holds for\n # 0 == 0.0 despite different types.\n nested_dict_different_types = {\n 'KEY1': {\n 'NESTED_KEY': 0.0\n },\n 'KEY2': 1.0,\n }\n tf.nest.assert_same_structure(nested_dict, nested_dict_different_types)\n\n # Same as nested_dict but with one different value\n nested_dict_different_value = {\n 'KEY1': {\n 'NESTED_KEY': 0.5\n },\n 'KEY2': 1.0,\n }\n with self.assertRaises(ValueError):\n tensor_utils.check_nested_equal(nested_dict, nested_dict_different_value)\n\n tensor_utils.check_nested_equal([None], [None])\n\n def always_neq(x, y):\n del x, y\n return False\n\n with self.assertRaises(ValueError):\n tensor_utils.check_nested_equal([1], [1], always_neq)\n\n def test_to_var_dict(self):\n v1 = tf.Variable(0, name='v1')\n v2 = tf.Variable(0, name='v2')\n\n d0 = tensor_utils.to_var_dict([])\n self.assertIsInstance(d0, collections.OrderedDict)\n self.assertEmpty(d0)\n\n d1 = tensor_utils.to_var_dict([v1])\n self.assertIsInstance(d1, collections.OrderedDict)\n self.assertLen(d1, 1)\n self.assertEqual(d1['v1'], v1)\n\n d2 = tensor_utils.to_var_dict([v1, v2])\n self.assertIsInstance(d2, collections.OrderedDict)\n self.assertLen(d2, 2)\n self.assertEqual(d2['v1'], v1)\n self.assertEqual(d2['v2'], v2)\n\n with self.assertRaises(TypeError):\n tensor_utils.to_var_dict(v1)\n\n with self.assertRaises(TypeError):\n tensor_utils.to_var_dict([tf.constant(1)])\n\n def test_to_var_dict_preserves_order(self):\n a = tf.Variable(0, name='a')\n b = tf.Variable(0, name='b')\n c = tf.Variable(0, name='c')\n var_dict = tensor_utils.to_var_dict([c, a, b])\n self.assertEqual(['c', 'a', 'b'], list(var_dict.keys()))\n\n def test_to_var_dict_duplicate_names(self):\n v1 = tf.Variable(0, name='foo')\n v2 = tf.Variable(0, name='foo')\n assert v1.name == v2.name\n with self.assertRaisesRegexp(ValueError, 'multiple.*foo'):\n tensor_utils.to_var_dict([v1, v2])\n\n def test_to_odict(self):\n d1 = {'b': 2, 'a': 1}\n odict1 = tensor_utils.to_odict(d1)\n self.assertIsInstance(odict1, collections.OrderedDict)\n self.assertCountEqual(d1, odict1)\n\n odict2 = tensor_utils.to_odict(odict1)\n self.assertEqual(odict1, odict2)\n\n with self.assertRaises(TypeError):\n tensor_utils.to_odict({1: 'a', 2: 'b'})\n\n def test_zero_all_if_any_non_finite(self):\n\n def expect_ok(structure):\n with tf.Graph().as_default():\n result, error = tensor_utils.zero_all_if_any_non_finite(structure)\n with self.session() as sess:\n result, error = sess.run((result, error))\n try:\n tf.nest.map_structure(np.testing.assert_allclose, result, structure)\n except AssertionError:\n self.fail('Expected to get input {} back, but instead got {}'.format(\n structure, result))\n self.assertEqual(error, 0)\n\n expect_ok([])\n expect_ok([(), {}])\n expect_ok(1.1)\n expect_ok([1.0, 0.0])\n expect_ok([1.0, 2.0, {'a': 0.0, 'b': -3.0}])\n\n def expect_zeros(structure, expected):\n with tf.Graph().as_default():\n result, error = tensor_utils.zero_all_if_any_non_finite(structure)\n with self.session() as sess:\n result, error = sess.run((result, error))\n try:\n tf.nest.map_structure(np.testing.assert_allclose, result, expected)\n except AssertionError:\n self.fail('Expected to get zeros, but instead got {}'.format(result))\n self.assertEqual(error, 1)\n\n expect_zeros(np.inf, 0.0)\n expect_zeros((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))\n expect_zeros((1.0, (2.0, {\n 'a': 3.0,\n 'b': [[np.inf], [np.nan]]\n })), (0.0, (0.0, {\n 'a': 0.0,\n 'b': [[0.0], [0.0]]\n })))\n\n def test_is_scalar_with_list(self):\n self.assertRaises(TypeError, tensor_utils.is_scalar, [10])\n\n def test_is_scalar_with_bool(self):\n self.assertRaises(TypeError, tensor_utils.is_scalar, True)\n\n def test_is_scalar_with_tf_constant(self):\n self.assertTrue(tensor_utils.is_scalar(tf.constant(10)))\n\n def test_is_scalar_with_scalar_tf_variable(self):\n self.assertTrue(tensor_utils.is_scalar(tf.Variable(0.0, 'scalar')))\n\n def test_is_scalar_with_nonscalar_tf_variable(self):\n self.assertFalse(\n tensor_utils.is_scalar(tf.Variable([0.0, 1.0], 'notscalar')))\n\n @test.graph_mode_test\n def test_metrics_sum(self):\n with self.session() as sess:\n v = tf.placeholder(tf.float32)\n sum_tensor, update_op = tensor_utils.metrics_sum(v)\n sess.run(tf.local_variables_initializer())\n sess.run(update_op, feed_dict={v: [1.0, 2.0]})\n self.assertEqual(sess.run(sum_tensor), 3.0)\n sess.run(update_op, feed_dict={v: [3.0]})\n self.assertEqual(sess.run(sum_tensor), 6.0)\n\n def test_same_dimension(self):\n self.assertTrue(\n tensor_utils.same_dimension(tf.Dimension(None), tf.Dimension(None)))\n self.assertTrue(\n tensor_utils.same_dimension(tf.Dimension(1), tf.Dimension(1)))\n\n self.assertFalse(\n tensor_utils.same_dimension(tf.Dimension(None), tf.Dimension(1)))\n self.assertFalse(\n tensor_utils.same_dimension(tf.Dimension(1), tf.Dimension(None)))\n self.assertFalse(\n tensor_utils.same_dimension(tf.Dimension(1), tf.Dimension(2)))\n\n def test_same_shape(self):\n self.assertTrue(\n tensor_utils.same_shape(tf.TensorShape(None), tf.TensorShape(None)))\n self.assertTrue(\n tensor_utils.same_shape(tf.TensorShape([None]), tf.TensorShape([None])))\n self.assertTrue(\n tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([1])))\n self.assertTrue(\n tensor_utils.same_shape(\n tf.TensorShape([None, 1]), tf.TensorShape([None, 1])))\n self.assertTrue(\n tensor_utils.same_shape(\n tf.TensorShape([1, 2, 3]), tf.TensorShape([1, 2, 3])))\n\n self.assertFalse(\n tensor_utils.same_shape(tf.TensorShape(None), tf.TensorShape([1])))\n self.assertFalse(\n tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape(None)))\n self.assertFalse(\n tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([None])))\n self.assertFalse(\n tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([2])))\n self.assertFalse(\n tensor_utils.same_shape(tf.TensorShape([1, 2]), tf.TensorShape([2, 1])))\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.constant"
],
[
"tensorflow.TensorShape",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.nest.assert_same_structure",
"tensorflow.Variable",
"tensorflow.local_variables_initializer",
"tensorflow.Dimension",
"tensorflow.placeholder",
"tensorflow.nest.map_structure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.4",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Fork-for-Modify/CSENDistance
|
[
"6f6d1b87ea776389d543c7873422e44b35a3f0af"
] |
[
"cl_csen_regressor/model.py"
] |
[
"import os\nimport tensorflow as tf\ntf.random.set_seed(10)\nimport numpy as np\n\nfrom cl_csen_regressor.utils import *\n\nclass model:\n def __init__(self):\n self.imageSizeM = 80\n self.imageSizeN = 15\n self.model = None\n self.history = None\n self.x_train = None\n self.x_val = None\n self.x_test = None\n self.y_train = None\n self.y_val = None\n self.y_test = None\n self.proj_m = None\n\n def loadData(self, dataDir, feature_type, set, MR, isTrain=True):\n # Check if the CSEN data is available.\n if not os.path.exists(dataDir): exit('CSENdata-2D is not prepared!')\n data = dataDir + feature_type\n dataPath = data + '_mr_' + MR + '_run' + str(set) + '.mat'\n # dic_label = scipy.io.loadmat(dataDir+'dic_label' + '.mat')[\"ans\"]\n\n self.proj_m, self.x_train, self.x_val, self.x_test, self.y_train, self.y_val, self.y_test = loadData(dataPath, isTrain)\n\n \n def getModel(self):\n input_shape = (self.imageSizeM, self.imageSizeN, 1)\n input = tf.keras.Input(shape = (self.x_train.shape[-1],), name='input')\n x_0 = tf.keras.layers.Dense(self.imageSizeM * self.imageSizeN, activation = 'relu')(input)\n x_0 = tf.keras.layers.Reshape(input_shape)(x_0) # Size of reshaped proxy from CRC estimation.\n x_0 = tf.keras.layers.Conv2D(64, 5, padding = 'same', activation = 'relu')(x_0)\n x_0 = tf.keras.layers.MaxPooling2D(pool_size=(4, 5))(x_0) # Sparse code shapes.\n x_0 = tf.keras.layers.Conv2D(1, 5, padding = 'same', activation = 'relu')(x_0)\n \n y = tf.keras.layers.Flatten()(x_0)\n y = tf.keras.layers.Dense(1, activation = 'softplus')(y)\n \n self.model = tf.keras.models.Model(input, y, name='CL-CSEN')\n self.model.summary()\n\n def train(self, weightPath, epochs = 100, batch_size = 16):\n adam = tf.keras.optimizers.Adam(\n lr=0.001, beta_1=0.9, beta_2=0.999, \n epsilon=None, decay=0.0, amsgrad=True)\n\n checkpoint_csen = tf.keras.callbacks.ModelCheckpoint(\n weightPath, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n \n callbacks_csen = [checkpoint_csen]\n\n while True:\n self.getModel()\n\n M_T = [self.proj_m.T, np.zeros((self.proj_m.shape[0]),)]\n self.model.layers[1].set_weights(M_T) # Denoiser layer.\n\n self.model.compile(loss = tf.compat.v1.losses.huber_loss,\n optimizer = adam, metrics=['mae', 'mse'] )\n\n # Training.\n self.history = self.model.fit(self.x_train, self.y_train,\n validation_data=(self.x_val, self.y_val), \n epochs = epochs, batch_size = batch_size,\n shuffle = True, callbacks=callbacks_csen)\n\n if self.model.history.history['loss'][1] < 8: # If it is converged.\n break\n\n def load_weights(self, weightPath):\n self.getModel()\n self.model.load_weights(weightPath)\n\n def predict(self):\n y_pred = self.model.predict(self.x_test)\n return y_pred\n######"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.Input",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.optimizers.Adam",
"numpy.zeros",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.random.set_seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
roychen97/detect_and_track
|
[
"aaff2178bbe75aad9019cc0193a85c9c60cdda87",
"aaff2178bbe75aad9019cc0193a85c9c60cdda87"
] |
[
"ssdface_freeze/import numpy as np.py",
"preprocessing/preprocessing_unittest.py"
] |
[
"import numpy as np\n\n\nnum = [3,5,6,9]\nsplit_param = []\nsplit_param.append(len(num[0]))\nfor layer_num in num[1:-2]:\n split_param.append(layer_num + split_param[-1])\nprint(split_param)\n\n\nto_split = np.range(23)\nsplit = np.split(to_split, split_param)\nprint(split)",
"# Copyright 2018 Changan Wang\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n# from scipy.misc import imread, imsave, imshow, imresize\nimport numpy as np\nimport sys; sys.path.insert(0, \".\")\nfrom utility import draw_toolbox\nimport ssd_preprocessing\nimport cv2 as cv\nslim = tf.contrib.slim\n\ndef save_image_with_bbox(image, labels_, scores_, bboxes_):\n if not hasattr(save_image_with_bbox, \"counter\"):\n save_image_with_bbox.counter = 0 # it doesn't exist yet, so initialize it\n save_image_with_bbox.counter += 1\n img_to_draw = np.copy(image)\n img_to_draw = draw_toolbox.rotate_bboxes_draw_on_img(img_to_draw, labels_, scores_, bboxes_, thickness=3, color=(255,128,80))\n # img_to_draw = draw_toolbox.bboxes_draw_on_img(img_to_draw, labels_, scores_, bboxes_, thickness=2, xy_order='yx')\n cv.imwrite(os.path.join('./debug/{}.jpg').format(save_image_with_bbox.counter), cv.cvtColor(img_to_draw, cv.COLOR_BGR2RGB))\n return save_image_with_bbox.counter\n\n\ndef save_batch(image, filename, shape, glabels_raw, gbboxes_raw):\n if not hasattr(save_batch, \"counter\"):\n save_batch.counter = 0 # it doesn't exist yet, so initialize it\n save_batch.counter += 1\n np.save('./debug/debug_shape_{}.npy'.format(save_batch.counter), np.copy(shape))\n np.save('./debug/debug_glabels_raw_{}.npy'.format(save_batch.counter), np.copy(glabels_raw))\n np.save('./debug/debug_gbboxes_raw_{}.npy'.format(save_batch.counter), np.copy(gbboxes_raw))\n return save_batch.counter\n\n\n\ndef slim_get_split(file_pattern='{}_????'):\n # Features in Pascal VOC TFRecords.\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n 'image/channels': tf.FixedLenFeature([1], tf.int64),\n 'image/shape': tf.FixedLenFeature([3], tf.int64),\n 'image/object/bbox/cx': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/cy': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/width': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/height': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),\n }\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),\n 'filename': slim.tfexample_decoder.Tensor('image/filename'),\n 'shape': slim.tfexample_decoder.Tensor('image/shape'),\n 'object/bbox': slim.tfexample_decoder.BoundingBox(\n ['cy', 'cx', 'height', 'width'], 'image/object/bbox/'),\n 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),\n 'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),\n 'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'),\n }\n decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\n\n dataset = slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=100,\n items_to_descriptions=None,\n num_classes=21,\n labels_to_names=None)\n\n with tf.name_scope('dataset_data_provider'):\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers=2,\n common_queue_capacity=32,\n common_queue_min=8,\n shuffle=True,\n num_epochs=1)\n\n [org_image, filename, shape, glabels_raw, gbboxes_raw, isdifficult] = provider.get(['image', 'filename', 'shape',\n 'object/label',\n 'object/bbox',\n 'object/difficult'])\n # save_npy_op = tf.py_func(save_batch,[org_image, filename, shape, glabels_raw, gbboxes_raw], tf.int64, stateful=True)\n # save_image_op = tf.py_func(save_image_with_bbox,\n # [org_image,\n # tf.clip_by_value(glabels_raw, 0, tf.int64.max),\n # tf.ones_like(glabels_raw),\n # gbboxes_raw],\n # tf.int64, stateful=True)\n # return save_image_op, org_image, glabels_raw, gbboxes_raw\n image, glabels, gbboxes = ssd_preprocessing.preprocess_image(org_image, glabels_raw, gbboxes_raw,[128.,128.,128.], [300, 300], is_training=True, data_format='channels_first', output_rgb=True)\n\n\n image = tf.transpose(image, perm=(1, 2, 0))\n save_image_op = tf.py_func(save_image_with_bbox,\n [ssd_preprocessing.unwhiten_image(image,[128.,128.,128.]),\n tf.clip_by_value(glabels, 0, tf.int64.max),\n tf.ones_like(glabels),\n gbboxes],\n tf.int64, stateful=True)\n return save_image_op, org_image, glabels_raw, gbboxes_raw\n\nif __name__ == '__main__':\n # tf_file='/home/scchiu/Data/person_detection_tfrecord/*.tfrecord'\n tf_file='./dataset/wework_tfrecords/train*'\n save_image_op, org_image, glabels_raw, gbboxes_raw = slim_get_split(tf_file)\n # Create the graph, etc.\n init_op = tf.group([tf.local_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer()])\n\n # Create a session for running operations in the Graph.\n sess = tf.Session()\n # Initialize the variables (like the epoch counter).\n sess.run(init_op)\n\n # Start input enqueue threads.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n while not coord.should_stop():\n # Run training steps or whatever\n # img, labels, boxes = sess.run([org_image, glabels_raw, gbboxes_raw])\n # print('img = {}, labels = {}, boxes = {}'.format(img.shape, labels, boxes))\n print(sess.run(save_image_op))\n\n except tf.errors.OutOfRangeError:\n print('Done training -- epoch limit reached')\n finally:\n # When done, ask the threads to stop.\n coord.request_stop()\n\n # Wait for threads to finish.\n coord.join(threads)\n sess.close()\n"
] |
[
[
"numpy.split",
"numpy.range"
],
[
"tensorflow.clip_by_value",
"tensorflow.transpose",
"tensorflow.FixedLenFeature",
"tensorflow.local_variables_initializer",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.ones_like",
"numpy.copy",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.VarLenFeature",
"tensorflow.tables_initializer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
AmanPriyanshu/entity-embeddings-from-scratch
|
[
"6a8a1f1f0da97d711cf1c71f13ff17f756a052a8"
] |
[
"embedding_layer.py"
] |
[
"import torch\nimport numpy as np\n\nclass EntityEmbedding(torch.nn.Module):\n\tdef __init__(self, vocab_size, hidden_size, input_size):\n\t\tsuper(EntityEmbedding, self).__init__()\n\t\tself.input_size = input_size\n\t\tself.hidden_size = hidden_size\n\t\tself.vocab_size = vocab_size\n\t\tself.softmax = torch.nn.Softmax(dim=1)\n\n\t\tweights = torch.Tensor(self.vocab_size, self.hidden_size)\n\t\tself.weights = torch.nn.Parameter(weights)\n\t\ttorch.nn.init.kaiming_uniform_(self.weights, a=np.sqrt(5))\n\n\tdef forward(self, x):\n\t\tx_onehot = torch.FloatTensor(x.shape[0], self.input_size, self.vocab_size)\n\t\tx_onehot.zero_()\n\t\tfor i in range(x.shape[0]):\n\t\t\tfor row in range(x[i].shape[0]):\n\t\t\t\tx_onehot[i][row][x[i][row]] += 1\n\t\tw_times_x=torch.zeros(x.shape[0], self.input_size, self.hidden_size)\n\t\tfor i in range(x.shape[0]):\n\t\t\tw_times_x[i] = torch.mm(x_onehot[i], self.weights)\n\t\treturn torch.mean(w_times_x, 1)\n\nif __name__ == '__main__':\n\tlayer = EntityEmbedding(10, 2, 3)\n\tprint(layer(torch.tensor([[0, 1, 2], [0, 1, 5]])))"
] |
[
[
"torch.nn.Softmax",
"torch.mean",
"torch.nn.Parameter",
"torch.mm",
"numpy.sqrt",
"torch.Tensor",
"torch.zeros",
"torch.tensor",
"torch.FloatTensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
threewisemonkeys-as/PyTorch-VAE
|
[
"4ed0fc7581d4792b435134aa9e06d5e35a5db118",
"4ed0fc7581d4792b435134aa9e06d5e35a5db118",
"4ed0fc7581d4792b435134aa9e06d5e35a5db118",
"4ed0fc7581d4792b435134aa9e06d5e35a5db118"
] |
[
"tests/test_vae.py",
"models/dfcvae.py",
"models/miwae.py",
"models/swae.py"
] |
[
"import unittest\n\nimport torch\nfrom models import VanillaVAE\nfrom torchsummary import summary\n\n\nclass TestVAE(unittest.TestCase):\n def setUp(self) -> None:\n # self.model2 = VAE(3, 10)\n self.model = VanillaVAE(3, 10)\n\n def test_summary(self):\n print(summary(self.model, (3, 64, 64), device=\"cpu\"))\n # print(summary(self.model2, (3, 64, 64), device='cpu'))\n\n def test_forward(self):\n x = torch.randn(16, 3, 64, 64)\n y = self.model(x)\n print(\"Model Output size:\", y[0].size())\n # print(\"Model2 Output size:\", self.model2(x)[0].size())\n\n def test_loss(self):\n x = torch.randn(16, 3, 64, 64)\n\n result = self.model(x)\n loss = self.model.loss_function(*result, M_N=0.005)\n print(loss)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"from typing import List, Optional\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision.models import vgg19_bn\n\nfrom .base import BaseVAE\n\n\nclass DFCVAE(BaseVAE):\n def __init__(\n self,\n in_channels: int,\n latent_dim: int,\n hidden_dims: List = None,\n alpha: float = 1,\n beta: float = 0.5,\n lr: float = 0.005,\n weight_decay: Optional[float] = 0,\n scheduler_gamma: Optional[float] = 0.95,\n ) -> None:\n super(DFCVAE, self).__init__(\n lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma\n )\n\n self.latent_dim = latent_dim\n self.alpha = alpha\n self.beta = beta\n\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Conv2d(\n in_channels,\n out_channels=h_dim,\n kernel_size=3,\n stride=2,\n padding=1,\n ),\n nn.BatchNorm2d(h_dim),\n nn.LeakyReLU(),\n )\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)\n self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)\n\n # Build Decoder\n modules = []\n\n self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)\n\n hidden_dims.reverse()\n\n for i in range(len(hidden_dims) - 1):\n modules.append(\n nn.Sequential(\n nn.ConvTranspose2d(\n hidden_dims[i],\n hidden_dims[i + 1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1,\n ),\n nn.BatchNorm2d(hidden_dims[i + 1]),\n nn.LeakyReLU(),\n )\n )\n\n self.decoder = nn.Sequential(*modules)\n\n self.final_layer = nn.Sequential(\n nn.ConvTranspose2d(\n hidden_dims[-1],\n hidden_dims[-1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1,\n ),\n nn.BatchNorm2d(hidden_dims[-1]),\n nn.LeakyReLU(),\n nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),\n nn.Tanh(),\n )\n\n self.feature_network = vgg19_bn(pretrained=True)\n\n # Freeze the pretrained feature network\n for param in self.feature_network.parameters():\n param.requires_grad = False\n\n self.feature_network.eval()\n\n def encode(self, input: torch.Tensor) -> List[torch.Tensor]:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (torch.Tensor) Input tensor to encoder [N x C x H x W]\n :return: (torch.Tensor) List of latent codes\n \"\"\"\n result = self.encoder(input)\n result = torch.flatten(result, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n mu = self.fc_mu(result)\n log_var = self.fc_var(result)\n\n return [mu, log_var]\n\n def decode(self, z: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Maps the given latent codes\n onto the image space.\n :param z: (torch.Tensor) [B x D]\n :return: (torch.Tensor) [B x C x H x W]\n \"\"\"\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result)\n return result\n\n def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Reparameterization trick to sample from N(mu, var) from\n N(0,1).\n :param mu: (torch.Tensor) Mean of the latent Gaussian [B x D]\n :param logvar: (torch.Tensor) Standard deviation of the latent Gaussian [B x D]\n :return: (torch.Tensor) [B x D]\n \"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:\n mu, log_var = self.encode(input)\n z = self.reparameterize(mu, log_var)\n recons = self.decode(z)\n\n recons_features = self.extract_features(recons)\n input_features = self.extract_features(input)\n\n return [recons, input, recons_features, input_features, mu, log_var]\n\n def extract_features(\n self, input: torch.Tensor, feature_layers: List = None\n ) -> List[torch.Tensor]:\n \"\"\"\n Extracts the features from the pretrained model\n at the layers indicated by feature_layers.\n :param input: (torch.Tensor) [B x C x H x W]\n :param feature_layers: List of string of IDs\n :return: List of the extracted features\n \"\"\"\n if feature_layers is None:\n feature_layers = [\"14\", \"24\", \"34\", \"43\"]\n features = []\n result = input\n for (key, module) in self.feature_network.features._modules.items():\n result = module(result)\n if key in feature_layers:\n features.append(result)\n\n return features\n\n def loss_function(self, *args, **kwargs) -> dict:\n \"\"\"\n Computes the VAE loss function.\n KL(N(\\mu, \\sigma), N(0, 1)) = \\log \\frac{1}{\\sigma} + \\frac{\\sigma^2 + \\mu^2}{2} - \\frac{1}{2}\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n recons = args[0]\n input = args[1]\n recons_features = args[2]\n input_features = args[3]\n mu = args[4]\n log_var = args[5]\n\n kld_weight = kwargs[\"M_N\"] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, input)\n\n feature_loss = 0.0\n for (r, i) in zip(recons_features, input_features):\n feature_loss += F.mse_loss(r, i)\n\n kld_loss = torch.mean(\n -0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0\n )\n\n loss = (\n self.beta * (recons_loss + feature_loss)\n + self.alpha * kld_weight * kld_loss\n )\n return {\"loss\": loss, \"Reconstruction_Loss\": recons_loss, \"KLD\": -kld_loss}\n\n def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (torch.Tensor)\n \"\"\"\n z = torch.randn(num_samples, self.latent_dim)\n\n z = z.to(current_device)\n\n samples = self.decode(z)\n return samples\n\n def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image\n :param x: (torch.Tensor) [B x C x H x W]\n :return: (torch.Tensor) [B x C x H x W]\n \"\"\"\n\n return self.forward(x)[0]\n",
"from typing import List, Optional\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import Normal\nfrom torch.nn import functional as F\n\nfrom .base import BaseVAE\n\n\nclass MIWAE(BaseVAE):\n def __init__(\n self,\n in_channels: int,\n latent_dim: int,\n hidden_dims: List = None,\n num_samples: int = 5,\n num_estimates: int = 5,\n lr: float = 0.005,\n weight_decay: Optional[float] = 0,\n scheduler_gamma: Optional[float] = 0.95,\n ) -> None:\n super(MIWAE, self).__init__(\n lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma\n )\n\n self.latent_dim = latent_dim\n self.num_samples = num_samples # K\n self.num_estimates = num_estimates # M\n\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Conv2d(\n in_channels,\n out_channels=h_dim,\n kernel_size=3,\n stride=2,\n padding=1,\n ),\n nn.BatchNorm2d(h_dim),\n nn.LeakyReLU(),\n )\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)\n self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)\n\n # Build Decoder\n modules = []\n\n self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)\n\n hidden_dims.reverse()\n\n for i in range(len(hidden_dims) - 1):\n modules.append(\n nn.Sequential(\n nn.ConvTranspose2d(\n hidden_dims[i],\n hidden_dims[i + 1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1,\n ),\n nn.BatchNorm2d(hidden_dims[i + 1]),\n nn.LeakyReLU(),\n )\n )\n\n self.decoder = nn.Sequential(*modules)\n\n self.final_layer = nn.Sequential(\n nn.ConvTranspose2d(\n hidden_dims[-1],\n hidden_dims[-1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1,\n ),\n nn.BatchNorm2d(hidden_dims[-1]),\n nn.LeakyReLU(),\n nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),\n nn.Tanh(),\n )\n\n def encode(self, input: torch.Tensor) -> List[torch.Tensor]:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (torch.Tensor) Input tensor to encoder [N x C x H x W]\n :return: (torch.Tensor) List of latent codes\n \"\"\"\n result = self.encoder(input)\n result = torch.flatten(result, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n mu = self.fc_mu(result)\n log_var = self.fc_var(result)\n\n return [mu, log_var]\n\n def decode(self, z: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Maps the given latent codes of S samples\n onto the image space.\n :param z: (torch.Tensor) [B x S x D]\n :return: (torch.Tensor) [B x S x C x H x W]\n \"\"\"\n B, M, S, D = z.size()\n z = z.view(-1, self.latent_dim) # [BMS x D]\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result) # [BMS x C x H x W ]\n result = result.view(\n [B, M, S, result.size(-3), result.size(-2), result.size(-1)]\n ) # [B x M x S x C x H x W]\n return result\n\n def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n \"\"\"\n :param mu: (torch.Tensor) Mean of the latent Gaussian\n :param logvar: (torch.Tensor) Standard deviation of the latent Gaussian\n :return:\n \"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:\n mu, log_var = self.encode(input)\n mu = mu.repeat(self.num_estimates, self.num_samples, 1, 1).permute(\n 2, 0, 1, 3\n ) # [B x M x S x D]\n log_var = log_var.repeat(self.num_estimates, self.num_samples, 1, 1).permute(\n 2, 0, 1, 3\n ) # [B x M x S x D]\n z = self.reparameterize(mu, log_var) # [B x M x S x D]\n eps = (z - mu) / log_var # Prior samples\n return [self.decode(z), input, mu, log_var, z, eps]\n\n def loss_function(self, *args, **kwargs) -> dict:\n \"\"\"\n KL(N(\\mu, \\sigma), N(0, 1)) = \\log \\frac{1}{\\sigma} + \\frac{\\sigma^2 + \\mu^2}{2} - \\frac{1}{2}\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n recons = args[0]\n input = args[1]\n mu = args[2]\n log_var = args[3]\n z = args[4]\n eps = args[5]\n\n input = input.repeat(self.num_estimates, self.num_samples, 1, 1, 1, 1).permute(\n 2, 0, 1, 3, 4, 5\n ) # [B x M x S x C x H x W]\n\n kld_weight = kwargs[\"M_N\"] # Account for the minibatch samples from the dataset\n\n log_p_x_z = (\n ((recons - input) ** 2).flatten(3).mean(-1)\n ) # Reconstruction Loss # [B x M x S]\n\n kld_loss = -0.5 * torch.sum(\n 1 + log_var - mu ** 2 - log_var.exp(), dim=3\n ) # [B x M x S]\n # Get importance weights\n log_weight = log_p_x_z + kld_weight * kld_loss # .detach().data\n\n # Rescale the weights (along the sample dim) to lie in [0, 1] and sum to 1\n weight = F.softmax(log_weight, dim=-1) # [B x M x S]\n\n loss = torch.mean(\n torch.mean(torch.sum(weight * log_weight, dim=-1), dim=-2), dim=0\n )\n\n return {\n \"loss\": loss,\n \"Reconstruction_Loss\": log_p_x_z.mean(),\n \"KLD\": -kld_loss.mean(),\n }\n\n def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (torch.Tensor)\n \"\"\"\n z = torch.randn(num_samples, 1, 1, self.latent_dim)\n\n z = z.to(current_device)\n\n samples = self.decode(z).squeeze()\n return samples\n\n def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image.\n Returns only the first reconstructed sample\n :param x: (torch.Tensor) [B x C x H x W]\n :return: (torch.Tensor) [B x C x H x W]\n \"\"\"\n\n return self.forward(x)[0][:, 0, 0, :]\n",
"from typing import List, Optional\n\nimport torch\nfrom torch import distributions as dist\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .base import BaseVAE\n\n\nclass SWAE(BaseVAE):\n def __init__(\n self,\n in_channels: int,\n latent_dim: int,\n hidden_dims: List = None,\n reg_weight: int = 100,\n wasserstein_deg: float = 2.0,\n num_projections: int = 50,\n projection_dist: str = \"normal\",\n lr: float = 0.005,\n weight_decay: Optional[float] = 0,\n scheduler_gamma: Optional[float] = 0.95,\n ) -> None:\n super(SWAE, self).__init__(\n lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma\n )\n\n self.latent_dim = latent_dim\n self.reg_weight = reg_weight\n self.p = wasserstein_deg\n self.num_projections = num_projections\n self.proj_dist = projection_dist\n\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Conv2d(\n in_channels,\n out_channels=h_dim,\n kernel_size=3,\n stride=2,\n padding=1,\n ),\n nn.BatchNorm2d(h_dim),\n nn.LeakyReLU(),\n )\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_z = nn.Linear(hidden_dims[-1] * 4, latent_dim)\n\n # Build Decoder\n modules = []\n\n self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)\n\n hidden_dims.reverse()\n\n for i in range(len(hidden_dims) - 1):\n modules.append(\n nn.Sequential(\n nn.ConvTranspose2d(\n hidden_dims[i],\n hidden_dims[i + 1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1,\n ),\n nn.BatchNorm2d(hidden_dims[i + 1]),\n nn.LeakyReLU(),\n )\n )\n\n self.decoder = nn.Sequential(*modules)\n\n self.final_layer = nn.Sequential(\n nn.ConvTranspose2d(\n hidden_dims[-1],\n hidden_dims[-1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1,\n ),\n nn.BatchNorm2d(hidden_dims[-1]),\n nn.LeakyReLU(),\n nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),\n nn.Tanh(),\n )\n\n def encode(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (torch.Tensor) Input tensor to encoder [N x C x H x W]\n :return: (torch.Tensor) List of latent codes\n \"\"\"\n result = self.encoder(input)\n result = torch.flatten(result, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n z = self.fc_z(result)\n return z\n\n def decode(self, z: torch.Tensor) -> torch.Tensor:\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result)\n return result\n\n def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:\n z = self.encode(input)\n return [self.decode(z), input, z]\n\n def loss_function(self, *args, **kwargs) -> dict:\n recons = args[0]\n input = args[1]\n z = args[2]\n\n batch_size = input.size(0)\n bias_corr = batch_size * (batch_size - 1)\n reg_weight = self.reg_weight / bias_corr\n\n recons_loss_l2 = F.mse_loss(recons, input)\n recons_loss_l1 = F.l1_loss(recons, input)\n\n swd_loss = self.compute_swd(z, self.p, reg_weight)\n\n loss = recons_loss_l2 + recons_loss_l1 + swd_loss\n return {\n \"loss\": loss,\n \"Reconstruction_Loss\": (recons_loss_l2 + recons_loss_l1),\n \"SWD\": swd_loss,\n }\n\n def get_random_projections(self, latent_dim: int, num_samples: int) -> torch.Tensor:\n \"\"\"\n Returns random samples from latent distribution's (Gaussian)\n unit sphere for projecting the encoded samples and the\n distribution samples.\n\n :param latent_dim: (Int) Dimensionality of the latent space (D)\n :param num_samples: (Int) Number of samples required (S)\n :return: Random projections from the latent unit sphere\n \"\"\"\n if self.proj_dist == \"normal\":\n rand_samples = torch.randn(num_samples, latent_dim)\n elif self.proj_dist == \"cauchy\":\n rand_samples = (\n dist.Cauchy(torch.tensor([0.0]), torch.tensor([1.0]))\n .sample((num_samples, latent_dim))\n .squeeze()\n )\n else:\n raise ValueError(\"Unknown projection distribution.\")\n\n rand_proj = rand_samples / rand_samples.norm(dim=1).view(-1, 1)\n return rand_proj # [S x D]\n\n def compute_swd(self, z: torch.Tensor, p: float, reg_weight: float) -> torch.Tensor:\n \"\"\"\n Computes the Sliced Wasserstein Distance (SWD) - which consists of\n randomly projecting the encoded and prior vectors and computing\n their Wasserstein distance along those projections.\n\n :param z: Latent samples # [N x D]\n :param p: Value for the p^th Wasserstein distance\n :param reg_weight:\n :return:\n \"\"\"\n prior_z = torch.randn_like(z) # [N x D]\n device = z.device\n\n proj_matrix = (\n self.get_random_projections(\n self.latent_dim, num_samples=self.num_projections\n )\n .transpose(0, 1)\n .to(device)\n )\n\n latent_projections = z.matmul(proj_matrix) # [N x S]\n prior_projections = prior_z.matmul(proj_matrix) # [N x S]\n\n # The Wasserstein distance is computed by sorting the two projections\n # across the batches and computing their element-wise l2 distance\n w_dist = (\n torch.sort(latent_projections.t(), dim=1)[0]\n - torch.sort(prior_projections.t(), dim=1)[0]\n )\n w_dist = w_dist.pow(p)\n return reg_weight * w_dist.mean()\n\n def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (torch.Tensor)\n \"\"\"\n z = torch.randn(num_samples, self.latent_dim)\n\n z = z.to(current_device)\n\n samples = self.decode(z)\n return samples\n\n def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image\n :param x: (torch.Tensor) [B x C x H x W]\n :return: (torch.Tensor) [B x C x H x W]\n \"\"\"\n\n return self.forward(x)[0]\n"
] |
[
[
"torch.randn"
],
[
"torch.randn_like",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.exp",
"torch.nn.Linear",
"torch.nn.functional.mse_loss",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.flatten"
],
[
"torch.randn_like",
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.nn.ConvTranspose2d",
"torch.randn",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Tanh",
"torch.exp",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.flatten"
],
[
"torch.randn_like",
"torch.nn.Sequential",
"torch.nn.functional.l1_loss",
"torch.nn.ConvTranspose2d",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.functional.mse_loss",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.flatten"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gnoses/TensorFlow
|
[
"4ac9c09d5ca57a03b8daa5fb9e295947b1619854",
"63a21e054007d86269ed1ad0145ebce04ee57a81",
"63a21e054007d86269ed1ad0145ebce04ee57a81",
"63a21e054007d86269ed1ad0145ebce04ee57a81"
] |
[
"tensorflow/contrib/learn/python/learn/utils/input_fn_utils.py",
"tensorflow/python/debug/session_debug_testlib.py",
"tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py",
"tensorflow/contrib/distributions/python/ops/mixture.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utilities for creating input_fns.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import parsing_ops\n\n\nclass InputFnOps(collections.namedtuple('InputFnOps',\n ['features',\n 'labels',\n 'default_inputs'])):\n \"\"\"A return type for an input_fn.\n\n This return type is currently only supported for serving input_fn.\n Training and eval input_fn should return a `(features, labels)` tuple.\n\n The expected return values are:\n features: A dict of string to `Tensor` or `SparseTensor`, specifying the\n features to be passed to the model.\n labels: A `Tensor`, `SparseTensor`, or a dict of string to `Tensor` or\n `SparseTensor`, specifying labels for training or eval. For serving, set\n `labels` to `None`.\n default_inputs: a dict of string to `Tensor` or `SparseTensor`, specifying\n the input placeholders (if any) that this input_fn expects to be fed.\n Typically, this is used by a serving input_fn, which expects to be fed\n serialized `tf.Example` protos.\n \"\"\"\n\n\ndef build_parsing_serving_input_fn(feature_spec, default_batch_size=None):\n \"\"\"Build an input_fn appropriate for serving, expecting fed tf.Examples.\n\n Creates an input_fn that expects a serialized tf.Example fed into a string\n placeholder. The function parses the tf.Example according to the provided\n feature_spec, and returns all parsed Tensors as features. This input_fn is\n for use at serving time, so the labels return value is always None.\n\n Args:\n feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.\n default_batch_size: the number of query examples expected per batch.\n Leave unset for variable batch size (recommended).\n\n Returns:\n An input_fn suitable for use in serving.\n \"\"\"\n def input_fn():\n \"\"\"An input_fn that expects a serialized tf.Example.\"\"\"\n serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,\n shape=[default_batch_size],\n name='input_example_tensor')\n inputs = {'examples': serialized_tf_example}\n features = parsing_ops.parse_example(serialized_tf_example, feature_spec)\n labels = None # these are not known in serving!\n return InputFnOps(features, labels, inputs)\n return input_fn\n\n\ndef build_default_serving_input_fn(features, default_batch_size=None):\n \"\"\"Build an input_fn appropriate for serving, expecting feature Tensors.\n\n Creates an input_fn that expects all features to be fed directly.\n This input_fn is for use at serving time, so the labels return value is always\n None.\n\n Args:\n features: a dict of string to `Tensor`.\n default_batch_size: the number of query examples expected per batch.\n Leave unset for variable batch size (recommended).\n\n Returns:\n An input_fn suitable for use in serving.\n \"\"\"\n def input_fn():\n \"\"\"an input_fn that expects all features to be fed directly.\"\"\"\n features_placeholders = {}\n for name, t in features.items():\n shape_list = t.get_shape().as_list()\n shape_list[0] = default_batch_size\n shape = tensor_shape.TensorShape(shape_list)\n\n features_placeholders[name] = array_ops.placeholder(dtype=t.dtype,\n shape=shape,\n name=t.name)\n labels = None # these are not known in serving!\n return InputFnOps(features_placeholders, labels, features_placeholders)\n return input_fn\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for debugger functionalities in tf.Session.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport glob\nimport os\nimport shutil\nimport tempfile\nimport threading\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.util import event_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug import debug_data\nfrom tensorflow.python.debug import debug_utils\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\n\n\nclass SessionDebugTestBase(test_util.TensorFlowTestCase):\n \"\"\"Base class for unit tests of tfdbg running with tf.Session.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n if test.is_gpu_available():\n cls._expected_partition_graph_count = 2\n cls._expected_num_devices = 2\n cls._main_device = \"/job:localhost/replica:0/task:0/gpu:0\"\n else:\n cls._expected_partition_graph_count = 1\n cls._expected_num_devices = 1\n cls._main_device = \"/job:localhost/replica:0/task:0/cpu:0\"\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def setUp(self):\n self._dump_root = tempfile.mkdtemp()\n\n def tearDown(self):\n ops.reset_default_graph()\n\n # Tear down temporary dump directory.\n if os.path.isdir(self._dump_root):\n shutil.rmtree(self._dump_root)\n\n def _debug_urls(self, run_number=None):\n raise NotImplementedError(\n \"_debug_urls() method is not implemented in the base test class.\")\n\n def _debug_dump_dir(self, run_number=None):\n raise NotImplementedError(\n \"_debug_dump_dir() method is not implemented in the base test class.\")\n\n def _generate_dump_from_simple_addition_graph(self):\n with session.Session() as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n v_init_val = np.array([[2.0], [-1.0]])\n\n # Use node names with overlapping namespace (i.e., parent directory) to\n # test concurrent, non-racing directory creation.\n u_name = \"u\"\n v_name = \"v\"\n w_name = \"w\"\n\n u_init = constant_op.constant(u_init_val, shape=[2, 2])\n u = variables.Variable(u_init, name=u_name)\n v_init = constant_op.constant(v_init_val, shape=[2, 1])\n v = variables.Variable(v_init, name=v_name)\n\n w = math_ops.matmul(u, v, name=w_name)\n\n u.initializer.run()\n v.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = \"file://%s\" % self._dump_root\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % u_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for v.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % v_name, 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n\n # Invoke Session.run().\n sess.run(w, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n simple_add_results = collections.namedtuple(\"SimpleAddResults\", [\n \"u_init_val\", \"v_init_val\", \"u\", \"v\", \"w\", \"u_name\", \"v_name\", \"w_name\",\n \"dump\"\n ])\n return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,\n w_name, dump)\n\n def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):\n results = self._generate_dump_from_simple_addition_graph()\n self.assertTrue(results.dump.loaded_partition_graphs())\n\n # Since global_step is not explicitly specified, it should take its default\n # value: -1.\n self.assertEqual(-1, results.dump.core_metadata.global_step)\n self.assertGreaterEqual(results.dump.core_metadata.session_run_count, 0)\n self.assertGreaterEqual(results.dump.core_metadata.executor_step_count, 0)\n self.assertEqual([], results.dump.core_metadata.input_names)\n self.assertEqual([results.w.name], results.dump.core_metadata.output_names)\n self.assertEqual([], results.dump.core_metadata.target_nodes)\n\n # Verify the dumped tensor values for u and v.\n self.assertEqual(2, results.dump.size)\n\n self.assertAllClose([results.u_init_val],\n results.dump.get_tensors(\"%s/read\" % results.u_name, 0,\n \"DebugIdentity\"))\n self.assertAllClose([results.v_init_val],\n results.dump.get_tensors(\"%s/read\" % results.v_name, 0,\n \"DebugIdentity\"))\n\n self.assertGreaterEqual(\n results.dump.get_rel_timestamps(\"%s/read\" % results.u_name, 0,\n \"DebugIdentity\")[0], 0)\n self.assertGreaterEqual(\n results.dump.get_rel_timestamps(\"%s/read\" % results.v_name, 0,\n \"DebugIdentity\")[0], 0)\n\n self.assertGreater(\n results.dump.get_dump_sizes_bytes(\"%s/read\" % results.u_name, 0,\n \"DebugIdentity\")[0], 0)\n self.assertGreater(\n results.dump.get_dump_sizes_bytes(\"%s/read\" % results.v_name, 0,\n \"DebugIdentity\")[0], 0)\n\n def testGetOpTypeWorks(self):\n results = self._generate_dump_from_simple_addition_graph()\n\n self.assertEqual(results.u.op.type,\n results.dump.node_op_type(results.u_name))\n self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))\n self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))\n\n with self.assertRaisesRegexp(\n ValueError, \"Node 'foo_bar' does not exist in partition graphs.\"):\n results.dump.node_op_type(\"foo_bar\")\n\n def testDumpStringTensorsWorks(self):\n with session.Session() as sess:\n str1_init_val = np.array(b\"abc\")\n str2_init_val = np.array(b\"def\")\n\n str1_init = constant_op.constant(str1_init_val)\n str2_init = constant_op.constant(str2_init_val)\n\n str1_name = \"str1\"\n str2_name = \"str2\"\n str1 = variables.Variable(str1_init, name=str1_name)\n str2 = variables.Variable(str2_init, name=str2_name)\n # Concatenate str1 and str2\n str_concat = math_ops.add(str1, str2, name=\"str_concat\")\n\n str1.initializer.run()\n str2.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % str1_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for v.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % str2_name, 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(str_concat, options=run_options, run_metadata=run_metadata)\n\n # String ops are located on CPU.\n self.assertEqual(1, len(run_metadata.partition_graphs))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n self.assertIn(str1_name, dump.nodes())\n self.assertIn(str2_name, dump.nodes())\n\n self.assertEqual(2, dump.size)\n\n self.assertEqual([str1_init_val],\n dump.get_tensors(\"%s/read\" % str1_name, 0,\n \"DebugIdentity\"))\n self.assertEqual([str2_init_val],\n dump.get_tensors(\"%s/read\" % str2_name, 0,\n \"DebugIdentity\"))\n\n self.assertGreaterEqual(\n dump.get_rel_timestamps(\"%s/read\" % str1_name, 0, \"DebugIdentity\")[0],\n 0)\n self.assertGreaterEqual(\n dump.get_rel_timestamps(\"%s/read\" % str2_name, 0, \"DebugIdentity\")[0],\n 0)\n\n self.assertGreater(\n dump.get_dump_sizes_bytes(\"%s/read\" % str1_name, 0,\n \"DebugIdentity\")[0], 0)\n self.assertGreater(\n dump.get_dump_sizes_bytes(\"%s/read\" % str2_name, 0,\n \"DebugIdentity\")[0], 0)\n\n def testDumpUninitializedVariable(self):\n op_namespace = \"testDumpUninitializedVariable\"\n with session.Session() as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n s_init_val = b\"str1\"\n\n u_name = \"%s/u\" % op_namespace\n s_name = \"%s/s\" % op_namespace\n\n u_init = constant_op.constant(u_init_val, shape=[2, 2])\n u = variables.Variable(u_init, name=u_name)\n s_init = constant_op.constant(s_init_val)\n s = variables.Variable(s_init, name=s_name)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s\" % u_name, 0, debug_urls=debug_urls)\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s\" % s_name, 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n\n # Initialize u and s.\n sess.run(variables.global_variables_initializer(),\n options=run_options,\n run_metadata=run_metadata)\n\n # Verify the dump file for the uninitialized value of u.\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n self.assertEqual(2, dump.size)\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n # Verify that the variable is properly initialized by the run() call.\n u_vals = dump.get_tensors(u_name, 0, \"DebugIdentity\")\n s_vals = dump.get_tensors(s_name, 0, \"DebugIdentity\")\n self.assertEqual(1, len(u_vals))\n self.assertIsNone(u_vals[0])\n self.assertEqual(1, len(s_vals))\n self.assertIsNone(s_vals[0])\n\n # Call run() again, to check that u is initialized properly.\n self.assertAllClose(u_init_val, sess.run(u))\n self.assertEqual(s_init_val, sess.run(s))\n\n def testDebugWhileLoopGeneratesMultipleDumps(self):\n with session.Session() as sess:\n num_iter = 10\n\n # \"u\" is the Variable being updated in the loop.\n u_name = \"testDumpToFileWhileLoop/u\"\n u_namespace = u_name.split(\"/\")[0]\n\n u_init_val = np.array(11.0)\n u_init = constant_op.constant(u_init_val)\n u = variables.Variable(u_init, name=u_name)\n\n # \"v\" is the increment.\n v_name = \"testDumpToFileWhileLoop/v\"\n v_namespace = v_name.split(\"/\")[0]\n\n v_init_val = np.array(2.0)\n v_init = constant_op.constant(v_init_val)\n v = variables.Variable(v_init, name=v_name)\n\n u.initializer.run()\n v.initializer.run()\n\n i = constant_op.constant(0, name=\"testDumpToFileWhileLoop/i\")\n\n def cond(i):\n return math_ops.less(i, num_iter)\n\n def body(i):\n new_u = state_ops.assign_add(u, v)\n new_i = math_ops.add(i, 1)\n op = control_flow_ops.group(new_u)\n new_i = control_flow_ops.with_dependencies([op], new_i)\n return [new_i]\n\n loop = control_flow_ops.while_loop(cond, body, [i], parallel_iterations=1)\n\n # Create RunOptions for debug-watching tensors\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_urls = self._debug_urls()\n\n # Add debug tensor watch for u.\n debug_utils.add_debug_tensor_watch(\n run_options, u_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for v.\n debug_utils.add_debug_tensor_watch(\n run_options, \"%s/read\" % v_name, 0, debug_urls=debug_urls)\n # Add debug tensor watch for while/Identity.\n debug_utils.add_debug_tensor_watch(\n run_options, \"while/Identity\", 0, debug_urls=debug_urls)\n # Add debug tensor watch for while/Add/y.\n debug_utils.add_debug_tensor_watch(\n run_options, \"while/Add/y\", 0, debug_urls=debug_urls)\n\n run_metadata = config_pb2.RunMetadata()\n r = sess.run(loop, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n self.assertEqual(num_iter, r)\n\n u_val_final = sess.run(u)\n self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)\n\n # Verify dump files\n self.assertTrue(os.path.isdir(self._dump_root))\n\n self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))\n self.assertTrue(\n os.path.isdir(os.path.join(self._dump_root, v_namespace, \"v\")))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Expected dumped tensors: u, v/read, 10 iterations of while/Identity,\n # and 10 iterations of while/Add/y.\n self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)\n\n # Verify tensor values.\n self.assertAllClose([u_init_val],\n dump.get_tensors(u_name, 0, \"DebugIdentity\"))\n self.assertAllClose([v_init_val],\n dump.get_tensors(\"%s/read\" % v_name, 0,\n \"DebugIdentity\"))\n\n while_id_tensors = dump.get_tensors(\"while/Identity\", 0, \"DebugIdentity\")\n self.assertEqual(10, len(while_id_tensors))\n for k in xrange(len(while_id_tensors)):\n self.assertAllClose(np.array(k), while_id_tensors[k])\n\n # Verify ascending timestamps from the while loops.\n while_id_rel_timestamps = dump.get_rel_timestamps(\"while/Identity\", 0,\n \"DebugIdentity\")\n while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes(\"while/Identity\", 0,\n \"DebugIdentity\")\n self.assertEqual(10, len(while_id_rel_timestamps))\n prev_rel_time = 0\n prev_dump_size_bytes = while_id_dump_sizes_bytes[0]\n for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,\n while_id_dump_sizes_bytes):\n self.assertGreaterEqual(rel_time, prev_rel_time)\n self.assertEqual(dump_size_bytes, prev_dump_size_bytes)\n prev_rel_time = rel_time\n prev_dump_size_bytes = dump_size_bytes\n\n # Test querying debug watch keys from node name.\n watch_keys = dump.debug_watch_keys(\"while/Identity\")\n self.assertEqual([\"while/Identity:0:DebugIdentity\"], watch_keys)\n\n # Test querying debug datum instances from debug watch key.\n self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))\n self.assertEqual([], dump.watch_key_to_data(\"foo\"))\n\n def testFindNodesWithBadTensorValues(self):\n with session.Session() as sess:\n u_name = \"testFindNodesWithBadTensorValues/u\"\n v_name = \"testFindNodesWithBadTensorValues/v\"\n w_name = \"testFindNodesWithBadTensorValues/w\"\n x_name = \"testFindNodesWithBadTensorValues/x\"\n y_name = \"testFindNodesWithBadTensorValues/y\"\n z_name = \"testFindNodesWithBadTensorValues/z\"\n\n u_init = constant_op.constant([2.0, 4.0])\n u = variables.Variable(u_init, name=u_name)\n v_init = constant_op.constant([2.0, 1.0])\n v = variables.Variable(v_init, name=v_name)\n\n # Expected output: [0.0, 3.0]\n w = math_ops.subtract(u, v, name=w_name)\n\n # Expected output: [inf, 1.3333]\n x = math_ops.div(u, w, name=x_name)\n\n # Expected output: [nan, 4.0]\n y = math_ops.multiply(w, x, name=y_name)\n\n z = math_ops.multiply(y, y, name=z_name)\n\n u.initializer.run()\n v.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(z, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n def has_bad_value(_, tensor):\n return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))\n\n # Find all \"offending tensors\".\n bad_data = dump.find(has_bad_value)\n\n # Verify that the nodes with bad values are caught through running find\n # on the debug dump.\n self.assertEqual(3, len(bad_data))\n self.assertEqual(x_name, bad_data[0].node_name)\n self.assertEqual(y_name, bad_data[1].node_name)\n self.assertEqual(z_name, bad_data[2].node_name)\n\n # Test first_n kwarg of find(): Find the first offending tensor.\n first_bad_datum = dump.find(has_bad_value, first_n=1)\n\n self.assertEqual(1, len(first_bad_datum))\n self.assertEqual(x_name, first_bad_datum[0].node_name)\n\n def _session_run_for_graph_structure_lookup(self):\n with session.Session() as sess:\n u_name = \"testDumpGraphStructureLookup/u\"\n v_name = \"testDumpGraphStructureLookup/v\"\n w_name = \"testDumpGraphStructureLookup/w\"\n\n u_init = constant_op.constant([2.0, 4.0])\n u = variables.Variable(u_init, name=u_name)\n v = math_ops.add(u, u, name=v_name)\n w = math_ops.add(v, v, name=w_name)\n\n u.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(w, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n return u_name, v_name, w_name, dump\n\n def testGraphStructureLookupGivesDevicesAndNodesInfo(self):\n u_name, _, _, dump = self._session_run_for_graph_structure_lookup()\n\n # Test num_devices().\n self.assertEqual(self._expected_num_devices, len(dump.devices()))\n\n # Test node_device().\n self.assertEqual(self._main_device, dump.node_device(u_name))\n\n with self.assertRaisesRegexp(ValueError,\n \"does not exist in partition graphs\"):\n dump.node_device(u_name + \"foo\")\n\n # Test node_exists().\n self.assertTrue(dump.node_exists(u_name))\n self.assertTrue(dump.node_exists(u_name + \"/read\"))\n self.assertFalse(dump.node_exists(u_name + \"/read\" + \"/foo\"))\n\n def testGraphStructureLookupGivesNodesAndAttributes(self):\n u_name, _, _, dump = self._session_run_for_graph_structure_lookup()\n\n u_read_name = u_name + \"/read\"\n\n # Test node name list lookup of the DebugDumpDir object.\n node_names = dump.nodes()\n self.assertTrue(u_name in node_names)\n self.assertTrue(u_read_name in node_names)\n\n # Test querying node attributes.\n u_attr = dump.node_attributes(u_name)\n self.assertEqual(dtypes.float32, u_attr[\"dtype\"].type)\n self.assertEqual(1, len(u_attr[\"shape\"].shape.dim))\n self.assertEqual(2, u_attr[\"shape\"].shape.dim[0].size)\n\n with self.assertRaisesRegexp(ValueError, \"No node named \\\"foo\\\" exists\"):\n dump.node_attributes(\"foo\")\n\n def testGraphStructureLookupGivesDebugWatchKeys(self):\n u_name, v_name, w_name, dump = (\n self._session_run_for_graph_structure_lookup())\n\n # Test querying the debug watch keys with node names.\n self.assertEqual([\"%s:0:DebugIdentity\" % u_name],\n dump.debug_watch_keys(u_name))\n self.assertEqual([\"%s:0:DebugIdentity\" % v_name],\n dump.debug_watch_keys(v_name))\n self.assertEqual([\"%s:0:DebugIdentity\" % w_name],\n dump.debug_watch_keys(w_name))\n self.assertEqual([], dump.debug_watch_keys(\"foo\"))\n\n # Test querying debug datum instances from debug watch.\n u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])\n self.assertEqual(1, len(u_data))\n self.assertEqual(u_name, u_data[0].node_name)\n self.assertEqual(0, u_data[0].output_slot)\n self.assertEqual(\"DebugIdentity\", u_data[0].debug_op)\n self.assertGreaterEqual(u_data[0].timestamp, 0)\n\n self.assertEqual([], dump.watch_key_to_data(\"foo\"))\n\n def testGraphStructureLookupGivesNodeInputsAndRecipients(self):\n u_name, v_name, w_name, dump = (\n self._session_run_for_graph_structure_lookup())\n\n u_read_name = u_name + \"/read\"\n\n # Test the inputs lookup of the DebugDumpDir object.\n self.assertEqual([], dump.node_inputs(u_name))\n self.assertEqual([u_name], dump.node_inputs(u_read_name))\n self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))\n self.assertEqual([v_name] * 2, dump.node_inputs(w_name))\n\n self.assertEqual([], dump.node_inputs(u_name, is_control=True))\n self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))\n self.assertEqual([], dump.node_inputs(v_name, is_control=True))\n self.assertEqual([], dump.node_inputs(w_name, is_control=True))\n\n # Test the outputs recipient lookup of the DebugDumpDir object.\n self.assertTrue(u_read_name in dump.node_recipients(u_name))\n self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))\n self.assertEqual(2, dump.node_recipients(v_name).count(w_name))\n\n self.assertEqual([], dump.node_recipients(u_name, is_control=True))\n self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))\n self.assertEqual([], dump.node_recipients(v_name, is_control=True))\n self.assertEqual([], dump.node_recipients(w_name, is_control=True))\n\n # Test errors raised on invalid node names.\n with self.assertRaisesRegexp(ValueError,\n \"does not exist in partition graphs\"):\n dump.node_inputs(u_name + \"foo\")\n\n with self.assertRaisesRegexp(ValueError,\n \"does not exist in partition graphs\"):\n dump.node_recipients(u_name + \"foo\")\n\n # Test transitive_inputs().\n self.assertEqual([], dump.transitive_inputs(u_name))\n self.assertEqual([u_name], dump.transitive_inputs(u_read_name))\n self.assertEqual(\n set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))\n self.assertEqual(\n set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))\n\n with self.assertRaisesRegexp(ValueError,\n \"does not exist in partition graphs\"):\n dump.transitive_inputs(u_name + \"foo\")\n\n def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):\n _, _, _, dump = self._session_run_for_graph_structure_lookup()\n\n # Now load the dump again, without the partition graphs, so we can check\n # errors are not raised because the partition graphs are loaded from the\n # dump directory.\n dump = debug_data.DebugDumpDir(self._dump_root, validate=False)\n self.assertTrue(dump.loaded_partition_graphs())\n\n def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):\n with session.Session() as sess:\n u_name = \"testDumpCausalityCheck/u\"\n v_name = \"testDumpCausalityCheck/v\"\n w_name = \"testDumpCausalityCheck/w\"\n\n u_init = constant_op.constant([2.0, 4.0])\n u = variables.Variable(u_init, name=u_name)\n v = math_ops.add(u, u, name=v_name)\n w = math_ops.add(v, v, name=w_name)\n\n u.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(w, options=run_options, run_metadata=run_metadata)\n\n self.assertEqual(self._expected_partition_graph_count,\n len(run_metadata.partition_graphs))\n\n # First, loading the original dump without supplying the\n # partition_graphs should not cause a LookupError, validation occurs\n # only with partition_graphs loaded.\n debug_data.DebugDumpDir(self._dump_root)\n\n # Now, loading the original dump with partition graphs supplied should\n # succeed. The validation should pass quietly.\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Get the dump file names and compute their timestamps.\n self.assertEqual(\n 1, len(dump.get_tensor_file_paths(u_name, 0, \"DebugIdentity\")))\n u_file_path = dump.get_tensor_file_paths(u_name, 0, \"DebugIdentity\")[0]\n\n self.assertEqual(\n 1, len(dump.get_tensor_file_paths(v_name, 0, \"DebugIdentity\")))\n v_file_path = dump.get_tensor_file_paths(v_name, 0, \"DebugIdentity\")[0]\n\n u_timestamp = int(u_file_path[u_file_path.rindex(\"_\") + 1:])\n v_timestamp = int(v_file_path[v_file_path.rindex(\"_\") + 1:])\n\n # Swap the time stamps\n new_u_file_path = u_file_path[:u_file_path.rindex(\n \"_\")] + \"_%d\" % v_timestamp\n new_v_file_path = v_file_path[:v_file_path.rindex(\n \"_\")] + \"_%d\" % u_timestamp\n\n os.rename(u_file_path, new_u_file_path)\n os.rename(v_file_path, new_v_file_path)\n\n # Load the dump directory again. Now a ValueError is expected to be\n # raised due to the timestamp swap.\n with self.assertRaisesRegexp(ValueError, \"Causality violated\"):\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Loading the dump directory with kwarg \"validate\" set explicitly to\n # False should get rid of the error.\n dump = debug_data.DebugDumpDir(\n self._dump_root,\n partition_graphs=run_metadata.partition_graphs,\n validate=False)\n\n def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):\n with session.Session() as sess:\n x_name = \"oneOfTwoSlots/x\"\n u_name = \"oneOfTwoSlots/u\"\n v_name = \"oneOfTwoSlots/v\"\n w_name = \"oneOfTwoSlots/w\"\n y_name = \"oneOfTwoSlots/y\"\n\n x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)\n sess.run(x.initializer)\n\n unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)\n\n v = math_ops.add(unique_x, unique_x, name=v_name)\n w = math_ops.add(indices, indices, name=w_name)\n y = math_ops.add(w, w, name=y_name)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n # Watch only the first output slot of u, even though it has two output\n # slots.\n debug_utils.add_debug_tensor_watch(\n run_options, u_name, 0, debug_urls=self._debug_urls())\n debug_utils.add_debug_tensor_watch(\n run_options, w_name, 0, debug_urls=self._debug_urls())\n debug_utils.add_debug_tensor_watch(\n run_options, y_name, 0, debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run([v, y], options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root,\n partition_graphs=run_metadata.partition_graphs,\n validate=True)\n\n self.assertAllClose([1, 3, 7],\n dump.get_tensors(u_name, 0, \"DebugIdentity\")[0])\n\n def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):\n \"\"\"Test watching output slots not attached to any outgoing edges.\"\"\"\n\n with session.Session() as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n u = constant_op.constant(u_init_val, shape=[2, 2], name=\"u\")\n\n # Create a control edge from a node with an output: From u to z.\n # Node u will get executed only because of the control edge. The output\n # tensor u:0 is not attached to any outgoing edge in the graph. This test\n # checks that the debugger can watch such a tensor.\n with ops.control_dependencies([u]):\n z = control_flow_ops.no_op(name=\"z\")\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(z, options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Assert that the DebugIdentity watch on u works properly.\n self.assertEqual(1, len(dump.dumped_tensor_data))\n datum = dump.dumped_tensor_data[0]\n self.assertEqual(\"u\", datum.node_name)\n self.assertEqual(0, datum.output_slot)\n self.assertEqual(\"DebugIdentity\", datum.debug_op)\n self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())\n\n def testWatchingVariableUpdateOpsSeesUpdatedValues(self):\n \"\"\"Watch output slots on Variable-updating ops, with no emitted edges.\"\"\"\n\n with session.Session() as sess:\n u_init = constant_op.constant(10.0)\n u = variables.Variable(u_init, name=\"gdo/u\")\n v_init = constant_op.constant(20.0)\n v = variables.Variable(v_init, name=\"gdo/v\")\n\n w = math_ops.multiply(u, v, name=\"gdo/w\")\n # gdo stands for GradientDescentOptimizer.\n\n train_op = gradient_descent.GradientDescentOptimizer(\n learning_rate=0.1).minimize(\n w, name=\"gdo/train\")\n\n u.initializer.run()\n v.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(train_op, options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n update_u_data = dump.watch_key_to_data(\n \"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity\")\n self.assertEqual(1, len(update_u_data))\n\n # Gradient descent on u: w = u * v, so dw / du = v.\n # Updated value of u should be:\n # 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0\n self.assertAllClose(8.0, update_u_data[0].get_tensor())\n\n update_v_data = dump.watch_key_to_data(\n \"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity\")\n self.assertEqual(1, len(update_v_data))\n\n # Gradient descent on u: w = u * v, so dw / dv = u.\n # Updated value of u should be:\n # 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0\n self.assertAllClose(19.0, update_v_data[0].get_tensor())\n\n # Verify that the Variables u and v are updated properly.\n self.assertAllClose(8.0, sess.run(u))\n self.assertAllClose(19.0, sess.run(v))\n\n def testAllowsWatchingUnconnectedOutputTensor(self):\n \"\"\"Watch an output slot not emitting any edges.\n\n (Not even control edges from the node.)\n \"\"\"\n\n with session.Session() as sess:\n x_init = constant_op.constant([2, 2, 3, 5, 5])\n x = variables.Variable(x_init, name=\"unconnected/x\")\n\n # The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the\n # graph. Let the debugger watch the unused slot 1.\n unique_x, _ = array_ops.unique(x, name=\"unconnected/unique_x\")\n y = math_ops.add(unique_x, [0, 1, 2], name=\"unconnected/y\")\n\n x.initializer.run()\n\n # Verify that only slot 0 of unique_x has recipients, while slot 1 of the\n # same node does not have recipients.\n unique_x_slot_0_recipients = []\n unique_x_slot_1_recipients = []\n for op in sess.graph.get_operations():\n for inp in op.inputs:\n if inp.name == \"unconnected/unique_x:0\":\n unique_x_slot_0_recipients.append(op.name)\n elif inp.name == \"unconnected/unique_x:1\":\n unique_x_slot_1_recipients.append(op.name)\n\n self.assertEqual([\"unconnected/y\"], unique_x_slot_0_recipients)\n self.assertEqual([], unique_x_slot_1_recipients)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n run_metadata = config_pb2.RunMetadata()\n result = sess.run(y, options=run_options, run_metadata=run_metadata)\n self.assertAllClose([2, 4, 7], result)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Assert that the connected slot (slot 0) is dumped properly.\n unique_x_slot_0_dumps = dump.watch_key_to_data(\n \"unconnected/unique_x:0:DebugIdentity\")\n self.assertEqual(1, len(unique_x_slot_0_dumps))\n self.assertEqual(\"unconnected/unique_x\",\n unique_x_slot_0_dumps[0].node_name)\n self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)\n self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())\n\n # Assert that the unconnected slot (slot 1) is dumped properly.\n unique_x_slot_1_dumps = dump.watch_key_to_data(\n \"unconnected/unique_x:1:DebugIdentity\")\n self.assertEqual(1, len(unique_x_slot_1_dumps))\n self.assertEqual(\"unconnected/unique_x\",\n unique_x_slot_1_dumps[0].node_name)\n self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)\n self.assertAllClose([0, 0, 1, 2, 2],\n unique_x_slot_1_dumps[0].get_tensor())\n\n def testSuccessiveDebuggingRunsIncreasesCounters(self):\n \"\"\"Test repeated Session.run() calls with debugger increments counters.\"\"\"\n\n with session.Session() as sess:\n ph = array_ops.placeholder(dtypes.float32, name=\"successive/ph\")\n x = array_ops.transpose(ph, name=\"mismatch/x\")\n y = array_ops.squeeze(ph, name=\"mismatch/y\")\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=self._debug_urls(), global_step=1)\n\n sess.run(x, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)\n dump1 = debug_data.DebugDumpDir(self._dump_root)\n self.assertEqual(1, dump1.core_metadata.global_step)\n self.assertGreaterEqual(dump1.core_metadata.session_run_count, 0)\n self.assertEqual(0, dump1.core_metadata.executor_step_count)\n self.assertEqual([ph.name], dump1.core_metadata.input_names)\n self.assertEqual([x.name], dump1.core_metadata.output_names)\n self.assertEqual([], dump1.core_metadata.target_nodes)\n shutil.rmtree(self._dump_root)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=self._debug_urls(), global_step=2)\n\n # Calling run() with the same feed, same output and same debug watch\n # options should increment both session_run_count and\n # executor_step_count.\n sess.run(x, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)\n dump2 = debug_data.DebugDumpDir(self._dump_root)\n self.assertEqual(2, dump2.core_metadata.global_step)\n self.assertEqual(dump1.core_metadata.session_run_count + 1,\n dump2.core_metadata.session_run_count)\n self.assertEqual(dump1.core_metadata.executor_step_count + 1,\n dump2.core_metadata.executor_step_count)\n self.assertEqual([ph.name], dump2.core_metadata.input_names)\n self.assertEqual([x.name], dump2.core_metadata.output_names)\n self.assertEqual([], dump2.core_metadata.target_nodes)\n shutil.rmtree(self._dump_root)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)\n\n # Calling run() with a different output should increment\n # session_run_count, but not executor_step_count.\n sess.run(y, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)\n dump3 = debug_data.DebugDumpDir(self._dump_root)\n self.assertEqual(3, dump3.core_metadata.global_step)\n self.assertEqual(dump2.core_metadata.session_run_count + 1,\n dump3.core_metadata.session_run_count)\n self.assertEqual(0, dump3.core_metadata.executor_step_count)\n self.assertEqual([ph.name], dump3.core_metadata.input_names)\n self.assertEqual([y.name], dump3.core_metadata.output_names)\n self.assertEqual([], dump3.core_metadata.target_nodes)\n\n def testDebuggingDuringOpError(self):\n \"\"\"Test the debug tensor dumping when error occurs in graph runtime.\"\"\"\n\n with session.Session() as sess:\n ph = array_ops.placeholder(dtypes.float32, name=\"mismatch/ph\")\n x = array_ops.transpose(ph, name=\"mismatch/x\")\n m = constant_op.constant(\n np.array(\n [[1.0, 2.0]], dtype=np.float32), name=\"mismatch/m\")\n y = math_ops.matmul(m, x, name=\"mismatch/y\")\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=self._debug_urls())\n\n with self.assertRaises(errors.OpError):\n sess.run(y,\n options=run_options,\n feed_dict={ph: np.array([[-3.0], [0.0]])})\n\n dump = debug_data.DebugDumpDir(self._dump_root)\n\n self.assertGreaterEqual(dump.core_metadata.session_run_count, 0)\n self.assertGreaterEqual(dump.core_metadata.executor_step_count, 0)\n self.assertEqual([ph.name], dump.core_metadata.input_names)\n self.assertEqual([y.name], dump.core_metadata.output_names)\n self.assertEqual([], dump.core_metadata.target_nodes)\n\n # Despite the fact that the run() call errored out and partition_graphs\n # are not available via run_metadata, the partition graphs should still\n # have been loaded from the dump directory.\n self.assertTrue(dump.loaded_partition_graphs())\n\n m_dumps = dump.watch_key_to_data(\"mismatch/m:0:DebugIdentity\")\n self.assertEqual(1, len(m_dumps))\n self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())\n\n x_dumps = dump.watch_key_to_data(\"mismatch/x:0:DebugIdentity\")\n self.assertEqual(1, len(x_dumps))\n self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())\n\n def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):\n with session.Session() as sess:\n a = variables.Variable(\n [\n np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,\n -np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan\n ],\n dtype=np.float32,\n name=\"numeric_summary/a\")\n b = variables.Variable(\n [0.0] * 18, dtype=np.float32, name=\"numeric_summary/b\")\n c = math_ops.add(a, b, name=\"numeric_summary/c\")\n\n sess.run(variables.global_variables_initializer())\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=self._debug_urls())\n\n sess.run(c, options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n self.assertTrue(dump.loaded_partition_graphs())\n\n self.assertAllClose([[\n 1.0, 18.0, 2.0, 2.0, 3.0, 2.0, 5.0, 4.0, -3.0, 7.0, 0.85714286,\n 8.97959184\n ]], dump.get_tensors(\"numeric_summary/a/read\", 0, \"DebugNumericSummary\"))\n\n def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):\n with session.Session() as sess:\n a = variables.Variable(\n [42], dtype=np.float32, name=\"numeric_summary_uninit/a\")\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=self._debug_urls())\n\n sess.run(a.initializer, options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n self.assertTrue(dump.loaded_partition_graphs())\n\n # DebugNumericSummary output should reflect the uninitialized state of\n # the watched tensor.\n numeric_summary = dump.get_tensors(\"numeric_summary_uninit/a\", 0,\n \"DebugNumericSummary\")[0]\n self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n numeric_summary[0:8])\n self.assertTrue(np.isinf(numeric_summary[8]))\n self.assertGreater(numeric_summary[8], 0.0)\n self.assertTrue(np.isinf(numeric_summary[9]))\n self.assertLess(numeric_summary[9], 0.0)\n self.assertTrue(np.isnan(numeric_summary[10]))\n self.assertTrue(np.isnan(numeric_summary[11]))\n\n def testDebugQueueOpsDoesNotoErrorOut(self):\n with session.Session() as sess:\n q = data_flow_ops.FIFOQueue(3, \"float\", name=\"fifo_queue\")\n q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name=\"enqueue_many\")\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_urls=self._debug_urls())\n\n sess.run(q_init, options=run_options, run_metadata=run_metadata)\n\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n self.assertTrue(dump.loaded_partition_graphs())\n\n self.assertIsNone(dump.get_tensors(\"fifo_queue\", 0, \"DebugIdentity\")[0])\n self.assertAllClose(\n [101.0, 202.0, 303.0],\n dump.get_tensors(\"enqueue_many/component_0\", 0, \"DebugIdentity\")[0])\n\n def testLookUpNodePythonTracebackWorks(self):\n with session.Session() as sess:\n u_init = constant_op.constant(10.0)\n u = variables.Variable(u_init, name=\"traceback/u\")\n v_init = constant_op.constant(20.0)\n v = variables.Variable(v_init, name=\"traceback/v\")\n\n w = math_ops.multiply(u, v, name=\"traceback/w\")\n\n sess.run(variables.global_variables_initializer())\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=self._debug_urls())\n\n sess.run(w, options=run_options, run_metadata=run_metadata)\n dump = debug_data.DebugDumpDir(\n self._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Prior to setting the Python graph, attempts to do traceback lookup\n # should lead to exceptions.\n with self.assertRaisesRegexp(\n LookupError, \"Python graph is not available for traceback lookup\"):\n dump.node_traceback(\"traceback/w\")\n\n dump.set_python_graph(sess.graph)\n\n # After setting the Python graph, attempts to look up nonexistent nodes\n # should lead to exceptions.\n with self.assertRaisesRegexp(KeyError,\n r\"Cannot find node \\\"foo\\\" in Python graph\"):\n dump.node_traceback(\"foo\")\n\n # Lookup should work with node name input.\n traceback = dump.node_traceback(\"traceback/w\")\n self.assertIsInstance(traceback, list)\n self.assertGreater(len(traceback), 0)\n for trace in traceback:\n self.assertIsInstance(trace, tuple)\n\n # Lookup should also work with tensor name input.\n traceback = dump.node_traceback(\"traceback/w:0\")\n self.assertIsInstance(traceback, list)\n self.assertGreater(len(traceback), 0)\n for trace in traceback:\n self.assertIsInstance(trace, tuple)\n\n\nclass DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):\n \"\"\"Test for debugging concurrent Session.run() calls.\"\"\"\n\n def _get_concurrent_debug_urls(self):\n \"\"\"Abstract method to generate debug URLs for concurrent debugged runs.\"\"\"\n raise NotImplementedError(\n \"_get_concurrent_debug_urls is not implemented in the base test class\")\n\n def testDebugConcurrentVariableUpdates(self):\n if test.is_gpu_available():\n self.skipTest(\"No testing concurrent runs on a single GPU.\")\n\n with session.Session() as sess:\n v = variables.Variable(30.0, name=\"v\")\n constants = []\n for i in xrange(self._num_concurrent_runs):\n constants.append(constant_op.constant(1.0, name=\"c%d\" % i))\n incs = [\n state_ops.assign_add(\n v, c, use_locking=True, name=(\"inc%d\" % i))\n for (i, c) in enumerate(constants)\n ]\n sess.run(v.initializer)\n\n concurrent_debug_urls = self._get_concurrent_debug_urls()\n\n def inc_job(index):\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options, sess.graph, debug_urls=concurrent_debug_urls[index])\n for _ in xrange(100):\n sess.run(incs[index], options=run_options)\n\n inc_threads = []\n for index in xrange(self._num_concurrent_runs):\n inc_thread = threading.Thread(target=functools.partial(inc_job, index))\n inc_thread.start()\n inc_threads.append(inc_thread)\n for inc_thread in inc_threads:\n inc_thread.join()\n\n self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,\n sess.run(v))\n\n all_session_run_counts = []\n for index in xrange(self._num_concurrent_runs):\n dump = debug_data.DebugDumpDir(self._dump_roots[index])\n self.assertTrue(dump.loaded_partition_graphs())\n\n v_data = dump.get_tensors(\"v\", 0, \"DebugIdentity\")\n self.assertEqual(100, len(v_data))\n\n # Examine all the core metadata files\n core_metadata_files = glob.glob(\n os.path.join(self._dump_roots[index], \"_tfdbg_core*\"))\n\n timestamps = []\n session_run_counts = []\n executor_step_counts = []\n for core_metadata_file in core_metadata_files:\n with open(core_metadata_file, \"rb\") as f:\n event = event_pb2.Event()\n event.ParseFromString(f.read())\n core_metadata = (\n debug_data.extract_core_metadata_from_event_proto(event))\n timestamps.append(event.wall_time)\n session_run_counts.append(core_metadata.session_run_count)\n executor_step_counts.append(core_metadata.executor_step_count)\n\n all_session_run_counts.extend(session_run_counts)\n\n # Assert that executor_step_count increases by one at a time.\n executor_step_counts = zip(timestamps, executor_step_counts)\n executor_step_counts = sorted(executor_step_counts, key=lambda x: x[0])\n for i in xrange(len(executor_step_counts) - 1):\n self.assertEquals(executor_step_counts[i][1] + 1,\n executor_step_counts[i + 1][1])\n\n # Assert that session_run_count increase monotonically.\n session_run_counts = zip(timestamps, session_run_counts)\n session_run_counts = sorted(session_run_counts, key=lambda x: x[0])\n for i in xrange(len(session_run_counts) - 1):\n self.assertGreater(session_run_counts[i + 1][1],\n session_run_counts[i][1])\n\n # Assert that the session_run_counts from the concurrent run() calls are\n # all unique.\n self.assertEqual(len(all_session_run_counts),\n len(set(all_session_run_counts)))\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for KMeans.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport sys\nimport time\n\n# TODO: #6568 Remove this hack that makes dlopen() not crash.\nif hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):\n import ctypes\n sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)\n\nimport numpy as np\nfrom sklearn.cluster import KMeans as SklearnKMeans\n\nfrom tensorflow.contrib import factorization\nfrom tensorflow.contrib.learn.python import learn\nfrom tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import benchmark\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import input as input_lib\n\nFLAGS = flags.FLAGS\n\n\ndef normalize(x):\n return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n\n\ndef cosine_similarity(x, y):\n return np.dot(normalize(x), np.transpose(normalize(y)))\n\n\ndef make_random_centers(num_centers, num_dims, center_norm=500):\n return np.round(\n np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)\n\n\ndef make_random_points(centers, num_points, max_offset=20):\n num_centers, num_dims = centers.shape\n assignments = np.random.choice(num_centers, num_points)\n offsets = np.round(\n np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)\n return (centers[assignments] + offsets, assignments,\n np.add.reduce(offsets * offsets, 1))\n\n\nclass KMeansTestBase(test.TestCase):\n\n def input_fn(self, batch_size=None, points=None, randomize=None,\n num_epochs=None):\n \"\"\"Returns an input_fn that randomly selects batches from given points.\"\"\"\n batch_size = batch_size or self.batch_size\n points = points if points is not None else self.points\n num_points = points.shape[0]\n if randomize is None:\n randomize = (self.use_mini_batch and\n self.mini_batch_steps_per_iteration <= 1)\n def _fn():\n x = constant_op.constant(points)\n if batch_size == num_points:\n return input_lib.limit_epochs(x, num_epochs=num_epochs), None\n if randomize:\n indices = random_ops.random_uniform(\n constant_op.constant([batch_size]),\n minval=0, maxval=num_points-1,\n dtype=dtypes.int32,\n seed=10)\n else:\n # We need to cycle through the indices sequentially. We create a queue\n # to maintain the list of indices.\n q = data_flow_ops.FIFOQueue(self.num_points, dtypes.int32, ())\n # Conditionally initialize the Queue.\n def _init_q():\n with ops.control_dependencies([q.enqueue_many(\n math_ops.range(self.num_points))]):\n return control_flow_ops.no_op()\n init_q = control_flow_ops.cond(q.size() <= 0,\n _init_q,\n control_flow_ops.no_op)\n with ops.control_dependencies([init_q]):\n offsets = q.dequeue_many(self.batch_size)\n with ops.control_dependencies([q.enqueue_many(offsets)]):\n indices = array_ops.identity(offsets)\n batch = array_ops.gather(x, indices)\n return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)\n return _fn\n\n @staticmethod\n def config(tf_random_seed):\n return run_config.RunConfig(tf_random_seed=tf_random_seed)\n\n @property\n def batch_size(self):\n return self.num_points\n\n @property\n def use_mini_batch(self):\n return False\n\n @property\n def mini_batch_steps_per_iteration(self):\n return 1\n\n\nclass KMeansTest(KMeansTestBase):\n\n def setUp(self):\n np.random.seed(3)\n self.num_centers = 5\n self.num_dims = 2\n self.num_points = 1000\n self.true_centers = make_random_centers(self.num_centers, self.num_dims)\n self.points, _, self.scores = make_random_points(self.true_centers,\n self.num_points)\n self.true_score = np.add.reduce(self.scores)\n\n def _kmeans(self, relative_tolerance=None):\n return kmeans_lib.KMeansClustering(\n self.num_centers,\n initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,\n distance_metric=factorization.SQUARED_EUCLIDEAN_DISTANCE,\n use_mini_batch=self.use_mini_batch,\n mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,\n random_seed=24,\n relative_tolerance=relative_tolerance)\n\n def test_clusters(self):\n kmeans = self._kmeans()\n kmeans.fit(input_fn=self.input_fn(), steps=1)\n clusters = kmeans.clusters()\n self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])\n\n def test_fit(self):\n kmeans = self._kmeans()\n kmeans.fit(input_fn=self.input_fn(), steps=1)\n score1 = kmeans.score(\n input_fn=self.input_fn(batch_size=self.num_points), steps=1)\n steps = 10 * self.num_points // self.batch_size\n kmeans.fit(input_fn=self.input_fn(), steps=steps)\n score2 = kmeans.score(\n input_fn=self.input_fn(batch_size=self.num_points), steps=1)\n self.assertTrue(score1 > score2)\n self.assertNear(self.true_score, score2, self.true_score * 0.05)\n\n def test_monitor(self):\n if self.use_mini_batch:\n # We don't test for use_mini_batch case since the loss value can be noisy.\n return\n kmeans = kmeans_lib.KMeansClustering(\n self.num_centers,\n initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,\n distance_metric=factorization.SQUARED_EUCLIDEAN_DISTANCE,\n use_mini_batch=self.use_mini_batch,\n mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,\n config=learn.RunConfig(tf_random_seed=14),\n random_seed=12,\n relative_tolerance=1e-4)\n\n kmeans.fit(\n input_fn=self.input_fn(),\n # Force it to train until the relative tolerance monitor stops it.\n steps=None)\n score = kmeans.score(\n input_fn=self.input_fn(batch_size=self.num_points), steps=1)\n self.assertNear(self.true_score, score, self.true_score * 0.01)\n\n def test_infer(self):\n kmeans = self._kmeans()\n # Make a call to fit to initialize the cluster centers.\n max_steps = 1\n kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)\n clusters = kmeans.clusters()\n\n # Make a small test set\n num_points = 10\n points, true_assignments, true_offsets = make_random_points(clusters,\n num_points)\n # Test predict\n assignments = list(kmeans.predict_cluster_idx(input_fn=self.input_fn(\n batch_size=num_points, points=points, num_epochs=1)))\n self.assertAllEqual(assignments, true_assignments)\n\n # Test score\n score = kmeans.score(\n input_fn=lambda: (constant_op.constant(points), None), steps=1)\n self.assertNear(score, np.sum(true_offsets), 0.01 * score)\n\n # Test transform\n transform = kmeans.transform(\n input_fn=lambda: (constant_op.constant(points), None))\n true_transform = np.maximum(\n 0,\n np.sum(np.square(points), axis=1, keepdims=True) - 2 * np.dot(\n points, np.transpose(clusters)) +\n np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))\n self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)\n\n def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):\n points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)\n\n with self.assertRaisesOpError('less'):\n kmeans = learn.KMeansClustering(\n num_clusters=3,\n use_mini_batch=self.use_mini_batch,\n mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,\n initial_clusters=factorization.RANDOM_INIT)\n kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),\n steps=10)\n\n def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(\n self):\n points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)\n\n with self.assertRaisesOpError(AssertionError):\n kmeans = learn.KMeansClustering(\n num_clusters=3,\n use_mini_batch=self.use_mini_batch,\n mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,\n initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT)\n kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),\n steps=10)\n\n\nclass MiniBatchKMeansTest(KMeansTest):\n\n @property\n def batch_size(self):\n return 50\n\n @property\n def use_mini_batch(self):\n return True\n\n\nclass FullBatchAsyncKMeansTest(KMeansTest):\n\n @property\n def batch_size(self):\n return 50\n\n @property\n def use_mini_batch(self):\n return True\n\n @property\n def mini_batch_steps_per_iteration(self):\n return self.num_points // self.batch_size\n\n\nclass KMeansCosineDistanceTest(KMeansTestBase):\n\n def setUp(self):\n self.points = np.array(\n [[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],\n [0.1, 3], [0.2, 4]],\n dtype=np.float32)\n self.num_points = self.points.shape[0]\n self.true_centers = np.array(\n [\n normalize(\n np.mean(\n normalize(self.points)[0:4, :], axis=0, keepdims=True))[0],\n normalize(\n np.mean(\n normalize(self.points)[4:, :], axis=0, keepdims=True))[0]\n ],\n dtype=np.float32)\n self.true_assignments = np.array([0] * 4 + [1] * 4)\n self.true_score = len(self.points) - np.tensordot(\n normalize(self.points), self.true_centers[self.true_assignments])\n\n self.num_centers = 2\n self.kmeans = kmeans_lib.KMeansClustering(\n self.num_centers,\n initial_clusters=factorization.RANDOM_INIT,\n distance_metric=factorization.COSINE_DISTANCE,\n use_mini_batch=self.use_mini_batch,\n mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,\n config=self.config(3))\n\n def test_fit(self):\n max_steps = 10 * self.num_points // self.batch_size\n self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)\n centers = normalize(self.kmeans.clusters())\n centers = centers[centers[:, 0].argsort()]\n true_centers = self.true_centers[self.true_centers[:, 0].argsort()]\n self.assertAllClose(centers, true_centers, atol=0.04)\n\n def test_transform(self):\n self.kmeans.fit(input_fn=self.input_fn(), steps=10)\n centers = normalize(self.kmeans.clusters())\n true_transform = 1 - cosine_similarity(self.points, centers)\n transform = self.kmeans.transform(input_fn=self.input_fn(\n batch_size=self.num_points))\n self.assertAllClose(transform, true_transform, atol=1e-3)\n\n def test_predict(self):\n max_steps = 10 * self.num_points // self.batch_size\n self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)\n centers = normalize(self.kmeans.clusters())\n\n assignments = list(self.kmeans.predict_cluster_idx(\n input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))\n self.assertAllClose(\n centers[assignments],\n self.true_centers[self.true_assignments],\n atol=1e-2)\n\n centers = centers[centers[:, 0].argsort()]\n true_centers = self.true_centers[self.true_centers[:, 0].argsort()]\n self.assertAllClose(centers, true_centers, atol=0.04)\n score = self.kmeans.score(input_fn=self.input_fn(\n batch_size=self.num_points), steps=1)\n self.assertAllClose(score, self.true_score, atol=1e-2)\n\n def test_predict_kmeans_plus_plus(self):\n # Most points are concetrated near one center. KMeans++ is likely to find\n # the less populated centers.\n points = np.array(\n [[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],\n [-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],\n [-3., -3.]],\n dtype=np.float32)\n true_centers = np.array(\n [\n normalize(\n np.mean(\n normalize(points)[0:2, :], axis=0, keepdims=True))[0],\n normalize(\n np.mean(\n normalize(points)[2:4, :], axis=0, keepdims=True))[0],\n normalize(np.mean(\n normalize(points)[4:, :], axis=0, keepdims=True))[0]\n ],\n dtype=np.float32)\n true_assignments = [0] * 2 + [1] * 2 + [2] * 8\n true_score = len(points) - np.tensordot(\n normalize(points), true_centers[true_assignments])\n\n kmeans = kmeans_lib.KMeansClustering(\n 3,\n initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,\n distance_metric=factorization.COSINE_DISTANCE,\n use_mini_batch=self.use_mini_batch,\n mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,\n config=self.config(3))\n kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)\n\n centers = normalize(kmeans.clusters())\n self.assertAllClose(\n sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)\n\n def _input_fn():\n return (\n input_lib.limit_epochs(constant_op.constant(points), num_epochs=1),\n None)\n assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))\n self.assertAllClose(\n centers[assignments], true_centers[true_assignments], atol=1e-2)\n\n score = kmeans.score(\n input_fn=lambda: (constant_op.constant(points), None), steps=1)\n self.assertAllClose(score, true_score, atol=1e-2)\n\n\nclass MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):\n\n @property\n def batch_size(self):\n return 2\n\n @property\n def use_mini_batch(self):\n return True\n\n\nclass FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):\n\n @property\n def batch_size(self):\n return 2\n\n @property\n def use_mini_batch(self):\n return True\n\n @property\n def mini_batch_steps_per_iteration(self):\n return self.num_points // self.batch_size\n\n\nclass KMeansBenchmark(benchmark.Benchmark):\n \"\"\"Base class for benchmarks.\"\"\"\n\n def SetUp(self,\n dimension=50,\n num_clusters=50,\n points_per_cluster=10000,\n center_norm=500,\n cluster_width=20):\n np.random.seed(123456)\n self.num_clusters = num_clusters\n self.num_points = num_clusters * points_per_cluster\n self.centers = make_random_centers(\n self.num_clusters, dimension, center_norm=center_norm)\n self.points, _, scores = make_random_points(\n self.centers, self.num_points, max_offset=cluster_width)\n self.score = float(np.sum(scores))\n\n def _report(self, num_iters, start, end, scores):\n print(scores)\n self.report_benchmark(\n iters=num_iters,\n wall_time=(end - start) / num_iters,\n extras={'true_sum_squared_distances': self.score,\n 'fit_scores': scores})\n\n def _fit(self, num_iters=10):\n pass\n\n def benchmark_01_2dim_5center_500point(self):\n self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)\n self._fit()\n\n def benchmark_02_20dim_20center_10kpoint(self):\n self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)\n self._fit()\n\n def benchmark_03_100dim_50center_50kpoint(self):\n self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)\n self._fit()\n\n def benchmark_03_100dim_50center_50kpoint_unseparated(self):\n self.SetUp(\n dimension=100,\n num_clusters=50,\n points_per_cluster=1000,\n cluster_width=250)\n self._fit()\n\n def benchmark_04_100dim_500center_500kpoint(self):\n self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)\n self._fit(num_iters=4)\n\n def benchmark_05_100dim_500center_500kpoint_unseparated(self):\n self.SetUp(\n dimension=100,\n num_clusters=500,\n points_per_cluster=1000,\n cluster_width=250)\n self._fit(num_iters=4)\n\n\nclass TensorflowKMeansBenchmark(KMeansBenchmark):\n\n def _fit(self, num_iters=10):\n scores = []\n start = time.time()\n for i in range(num_iters):\n print('Starting tensorflow KMeans: %d' % i)\n tf_kmeans = kmeans_lib.KMeansClustering(\n self.num_clusters,\n initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,\n kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),\n random_seed=i * 42,\n config=run_config.RunConfig(tf_random_seed=3))\n tf_kmeans.fit(input_fn=lambda: (constant_op.constant(self.points), None),\n steps=50,\n relative_tolerance=1e-6)\n _ = tf_kmeans.clusters()\n scores.append(\n tf_kmeans.score(\n input_fn=lambda: (constant_op.constant(self.points), None),\n steps=1))\n self._report(num_iters, start, time.time(), scores)\n\n\nclass SklearnKMeansBenchmark(KMeansBenchmark):\n\n def _fit(self, num_iters=10):\n scores = []\n start = time.time()\n for i in range(num_iters):\n print('Starting sklearn KMeans: %d' % i)\n sklearn_kmeans = SklearnKMeans(\n n_clusters=self.num_clusters,\n init='k-means++',\n max_iter=50,\n n_init=1,\n tol=1e-4,\n random_state=i * 42)\n sklearn_kmeans.fit(self.points)\n scores.append(sklearn_kmeans.inertia_)\n self._report(num_iters, start, time.time(), scores)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Mixture distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import categorical\nfrom tensorflow.contrib.distributions.python.ops import distribution\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\n\n\nclass Mixture(distribution.Distribution):\n \"\"\"Mixture distribution.\n\n The `Mixture` object implements batched mixture distributions.\n The mixture model is defined by a `Categorical` distribution (the mixture)\n and a python list of `Distribution` objects.\n\n Methods supported include `log_prob`, `prob`, `mean`, `sample`, and\n `entropy_lower_bound`.\n \"\"\"\n\n def __init__(self,\n cat,\n components,\n validate_args=False,\n allow_nan_stats=True,\n name=\"Mixture\"):\n \"\"\"Initialize a Mixture distribution.\n\n A `Mixture` is defined by a `Categorical` (`cat`, representing the\n mixture probabilities) and a list of `Distribution` objects\n all having matching dtype, batch shape, event shape, and continuity\n properties (the components).\n\n The `num_classes` of `cat` must be possible to infer at graph construction\n time and match `len(components)`.\n\n Args:\n cat: A `Categorical` distribution instance, representing the probabilities\n of `distributions`.\n components: A list or tuple of `Distribution` instances.\n Each instance must have the same type, be defined on the same domain,\n and have matching `event_shape` and `batch_shape`.\n validate_args: Python `bool`, default `False`. If `True`, raise a runtime\n error if batch or event ranks are inconsistent between cat and any of\n the distributions. This is only checked if the ranks cannot be\n determined statically at graph construction time.\n allow_nan_stats: Boolean, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution (optional).\n\n Raises:\n TypeError: If cat is not a `Categorical`, or `components` is not\n a list or tuple, or the elements of `components` are not\n instances of `Distribution`, or do not have matching `dtype`.\n ValueError: If `components` is an empty list or tuple, or its\n elements do not have a statically known event rank.\n If `cat.num_classes` cannot be inferred at graph creation time,\n or the constant value of `cat.num_classes` is not equal to\n `len(components)`, or all `components` and `cat` do not have\n matching static batch shapes, or all components do not\n have matching static event shapes.\n \"\"\"\n parameters = locals()\n if not isinstance(cat, categorical.Categorical):\n raise TypeError(\"cat must be a Categorical distribution, but saw: %s\" %\n cat)\n if not components:\n raise ValueError(\"components must be a non-empty list or tuple\")\n if not isinstance(components, (list, tuple)):\n raise TypeError(\"components must be a list or tuple, but saw: %s\" %\n components)\n if not all(isinstance(c, distribution.Distribution) for c in components):\n raise TypeError(\n \"all entries in components must be Distribution instances\"\n \" but saw: %s\" % components)\n\n dtype = components[0].dtype\n if not all(d.dtype == dtype for d in components):\n raise TypeError(\"All components must have the same dtype, but saw \"\n \"dtypes: %s\" % [(d.name, d.dtype) for d in components])\n is_continuous = components[0].is_continuous\n if not all(d.is_continuous == is_continuous for d in components):\n raise TypeError(\n \"All components must either be continuous or not, but continuity \"\n \"values are: %s\" % [(d.name, d.is_continuous) for d in components])\n static_event_shape = components[0].event_shape\n static_batch_shape = cat.batch_shape\n for d in components:\n static_event_shape = static_event_shape.merge_with(d.event_shape)\n static_batch_shape = static_batch_shape.merge_with(d.batch_shape)\n if static_event_shape.ndims is None:\n raise ValueError(\n \"Expected to know rank(event_shape) from components, but \"\n \"none of the components provide a static number of ndims\")\n\n # Ensure that all batch and event ndims are consistent.\n with ops.name_scope(name, values=[cat.logits]) as ns:\n num_components = cat.event_size\n static_num_components = tensor_util.constant_value(num_components)\n if static_num_components is None:\n raise ValueError(\n \"Could not infer number of classes from cat and unable \"\n \"to compare this value to the number of components passed in.\")\n # Possibly convert from numpy 0-D array.\n static_num_components = int(static_num_components)\n if static_num_components != len(components):\n raise ValueError(\"cat.num_classes != len(components): %d vs. %d\" %\n (static_num_components, len(components)))\n\n cat_batch_shape = cat.batch_shape_tensor()\n cat_batch_rank = array_ops.size(cat_batch_shape)\n if validate_args:\n batch_shapes = [d.batch_shape_tensor() for d in components]\n batch_ranks = [array_ops.size(bs) for bs in batch_shapes]\n check_message = (\"components[%d] batch shape must match cat \"\n \"batch shape\")\n self._assertions = [\n check_ops.assert_equal(\n cat_batch_rank, batch_ranks[di], message=check_message % di)\n for di in range(len(components))\n ]\n self._assertions += [\n check_ops.assert_equal(\n cat_batch_shape, batch_shapes[di], message=check_message % di)\n for di in range(len(components))\n ]\n else:\n self._assertions = []\n\n self._cat = cat\n self._components = list(components)\n self._num_components = static_num_components\n self._static_event_shape = static_event_shape\n self._static_batch_shape = static_batch_shape\n\n # We let the Mixture distribution access _graph_parents since its arguably\n # more like a baseclass.\n graph_parents = self._cat._graph_parents # pylint: disable=protected-access\n for c in self._components:\n graph_parents += c._graph_parents # pylint: disable=protected-access\n\n super(Mixture, self).__init__(\n dtype=dtype,\n reparameterization_type=distribution.NOT_REPARAMETERIZED,\n is_continuous=is_continuous,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=graph_parents,\n name=ns)\n\n @property\n def cat(self):\n return self._cat\n\n @property\n def components(self):\n return self._components\n\n @property\n def num_components(self):\n return self._num_components\n\n def _batch_shape_tensor(self):\n return self._cat.batch_shape_tensor()\n\n def _batch_shape(self):\n return self._static_batch_shape\n\n def _event_shape_tensor(self):\n return self._components[0].event_shape_tensor()\n\n def _event_shape(self):\n return self._static_event_shape\n\n def _mean(self):\n with ops.control_dependencies(self._assertions):\n distribution_means = [d.mean() for d in self.components]\n cat_probs = self._cat_probs(log_probs=False)\n # This was checked to not be None at construction time.\n static_event_rank = self.event_shape.ndims\n # Expand the rank of x up to static_event_rank times so that\n # broadcasting works correctly.\n def expand(x):\n expanded_x = x\n for _ in range(static_event_rank):\n expanded_x = array_ops.expand_dims(expanded_x, -1)\n return expanded_x\n cat_probs = [expand(c_p) for c_p in cat_probs]\n partial_means = [\n c_p * m for (c_p, m) in zip(cat_probs, distribution_means)\n ]\n # These should all be the same shape by virtue of matching\n # batch_shape and event_shape.\n return math_ops.add_n(partial_means)\n\n def _log_prob(self, x):\n with ops.control_dependencies(self._assertions):\n x = ops.convert_to_tensor(x, name=\"x\")\n distribution_log_probs = [d.log_prob(x) for d in self.components]\n cat_log_probs = self._cat_probs(log_probs=True)\n final_log_probs = [\n cat_lp + d_lp\n for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)\n ]\n concat_log_probs = array_ops.stack(final_log_probs, 0)\n log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])\n return log_sum_exp\n\n def _prob(self, x):\n return math_ops.exp(self._log_prob(x))\n\n def _sample_n(self, n, seed=None):\n with ops.control_dependencies(self._assertions):\n n = ops.convert_to_tensor(n, name=\"n\")\n static_n = tensor_util.constant_value(n)\n n = int(static_n) if static_n is not None else n\n cat_samples = self.cat.sample(n, seed=seed)\n\n static_samples_shape = cat_samples.get_shape()\n if static_samples_shape.is_fully_defined():\n samples_shape = static_samples_shape.as_list()\n samples_size = static_samples_shape.num_elements()\n else:\n samples_shape = array_ops.shape(cat_samples)\n samples_size = array_ops.size(cat_samples)\n static_batch_shape = self.batch_shape\n if static_batch_shape.is_fully_defined():\n batch_shape = static_batch_shape.as_list()\n batch_size = static_batch_shape.num_elements()\n else:\n batch_shape = self.batch_shape_tensor()\n batch_size = array_ops.reduce_prod(batch_shape)\n static_event_shape = self.event_shape\n if static_event_shape.is_fully_defined():\n event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)\n else:\n event_shape = self.event_shape_tensor()\n\n # Get indices into the raw cat sampling tensor. We will\n # need these to stitch sample values back out after sampling\n # within the component partitions.\n samples_raw_indices = array_ops.reshape(\n math_ops.range(0, samples_size), samples_shape)\n\n # Partition the raw indices so that we can use\n # dynamic_stitch later to reconstruct the samples from the\n # known partitions.\n partitioned_samples_indices = data_flow_ops.dynamic_partition(\n data=samples_raw_indices,\n partitions=cat_samples,\n num_partitions=self.num_components)\n\n # Copy the batch indices n times, as we will need to know\n # these to pull out the appropriate rows within the\n # component partitions.\n batch_raw_indices = array_ops.reshape(\n array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)\n\n # Explanation of the dynamic partitioning below:\n # batch indices are i.e., [0, 1, 0, 1, 0, 1]\n # Suppose partitions are:\n # [1 1 0 0 1 1]\n # After partitioning, batch indices are cut as:\n # [batch_indices[x] for x in 2, 3]\n # [batch_indices[x] for x in 0, 1, 4, 5]\n # i.e.\n # [1 1] and [0 0 0 0]\n # Now we sample n=2 from part 0 and n=4 from part 1.\n # For part 0 we want samples from batch entries 1, 1 (samples 0, 1),\n # and for part 1 we want samples from batch entries 0, 0, 0, 0\n # (samples 0, 1, 2, 3).\n partitioned_batch_indices = data_flow_ops.dynamic_partition(\n data=batch_raw_indices,\n partitions=cat_samples,\n num_partitions=self.num_components)\n samples_class = [None for _ in range(self.num_components)]\n\n for c in range(self.num_components):\n n_class = array_ops.size(partitioned_samples_indices[c])\n seed = distribution_util.gen_new_seed(seed, \"mixture\")\n samples_class_c = self.components[c].sample(n_class, seed=seed)\n\n # Pull out the correct batch entries from each index.\n # To do this, we may have to flatten the batch shape.\n\n # For sample s, batch element b of component c, we get the\n # partitioned batch indices from\n # partitioned_batch_indices[c]; and shift each element by\n # the sample index. The final lookup can be thought of as\n # a matrix gather along locations (s, b) in\n # samples_class_c where the n_class rows correspond to\n # samples within this component and the batch_size columns\n # correspond to batch elements within the component.\n #\n # Thus the lookup index is\n # lookup[c, i] = batch_size * s[i] + b[c, i]\n # for i = 0 ... n_class[c] - 1.\n lookup_partitioned_batch_indices = (\n batch_size * math_ops.range(n_class) +\n partitioned_batch_indices[c])\n samples_class_c = array_ops.reshape(\n samples_class_c,\n array_ops.concat([[n_class * batch_size], event_shape], 0))\n samples_class_c = array_ops.gather(\n samples_class_c, lookup_partitioned_batch_indices,\n name=\"samples_class_c_gather\")\n samples_class[c] = samples_class_c\n\n # Stitch back together the samples across the components.\n lhs_flat_ret = data_flow_ops.dynamic_stitch(\n indices=partitioned_samples_indices, data=samples_class)\n # Reshape back to proper sample, batch, and event shape.\n ret = array_ops.reshape(lhs_flat_ret,\n array_ops.concat([samples_shape,\n self.event_shape_tensor()], 0))\n ret.set_shape(\n tensor_shape.TensorShape(static_samples_shape).concatenate(\n self.event_shape))\n return ret\n\n def entropy_lower_bound(self, name=\"entropy_lower_bound\"):\n r\"\"\"A lower bound on the entropy of this mixture model.\n\n The bound below is not always very tight, and its usefulness depends\n on the mixture probabilities and the components in use.\n\n A lower bound is useful for ELBO when the `Mixture` is the variational\n distribution:\n\n \\\\(\n \\log p(x) >= ELBO = \\int q(z) \\log p(x, z) dz + H[q]\n \\\\)\n\n where \\\\( p \\\\) is the prior distribution, \\\\( q \\\\) is the variational,\n and \\\\( H[q] \\\\) is the entropy of \\\\( q \\\\). If there is a lower bound\n \\\\( G[q] \\\\) such that \\\\( H[q] \\geq G[q] \\\\) then it can be used in\n place of \\\\( H[q] \\\\).\n\n For a mixture of distributions \\\\( q(Z) = \\sum_i c_i q_i(Z) \\\\) with\n \\\\( \\sum_i c_i = 1 \\\\), by the concavity of \\\\( f(x) = -x \\log x \\\\), a\n simple lower bound is:\n\n \\\\(\n \\begin{align}\n H[q] & = - \\int q(z) \\log q(z) dz \\\\\\\n & = - \\int (\\sum_i c_i q_i(z)) \\log(\\sum_i c_i q_i(z)) dz \\\\\\\n & \\geq - \\sum_i c_i \\int q_i(z) \\log q_i(z) dz \\\\\\\n & = \\sum_i c_i H[q_i]\n \\end{align}\n \\\\)\n\n This is the term we calculate below for \\\\( G[q] \\\\).\n\n Args:\n name: A name for this operation (optional).\n\n Returns:\n A lower bound on the Mixture's entropy.\n \"\"\"\n with self._name_scope(name, values=[self.cat.logits]):\n with ops.control_dependencies(self._assertions):\n distribution_entropies = [d.entropy() for d in self.components]\n cat_probs = self._cat_probs(log_probs=False)\n partial_entropies = [\n c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)\n ]\n # These are all the same shape by virtue of matching batch_shape\n return math_ops.add_n(partial_entropies)\n\n def _cat_probs(self, log_probs):\n \"\"\"Get a list of num_components batchwise probabilities.\"\"\"\n which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax\n cat_probs = which_softmax(self.cat.logits)\n cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)\n return cat_probs\n"
] |
[
[
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.parsing_ops.parse_example",
"tensorflow.python.framework.tensor_shape.TensorShape"
],
[
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.math_ops.add",
"tensorflow.core.util.event_pb2.Event",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.array_ops.unique",
"tensorflow.python.debug.debug_utils.add_debug_tensor_watch",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.debug.debug_data.DebugDumpDir",
"numpy.isnan",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.math_ops.div",
"tensorflow.python.debug.debug_data.extract_core_metadata_from_event_proto",
"tensorflow.python.client.session.Session",
"numpy.array",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.ops.array_ops.unique_with_counts",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.debug.debug_utils.watch_graph",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.isinf",
"tensorflow.python.framework.constant_op.constant"
],
[
"sklearn.cluster.KMeans",
"tensorflow.python.training.input.limit_epochs",
"tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig",
"numpy.random.randn",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.ops.array_ops.identity",
"numpy.square",
"numpy.add.reduce",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.platform.test.main",
"tensorflow.contrib.learn.python.learn.RunConfig",
"tensorflow.python.framework.ops.control_dependencies",
"numpy.random.choice",
"numpy.random.rand",
"numpy.transpose",
"numpy.array",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"numpy.sum",
"tensorflow.python.ops.math_ops.range",
"numpy.random.seed",
"tensorflow.contrib.learn.python.learn.KMeansClustering",
"tensorflow.contrib.learn.python.learn.estimators.kmeans.KMeansClustering",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.data_flow_ops.dynamic_stitch",
"tensorflow.python.ops.math_ops.reduce_logsumexp",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.contrib.distributions.python.ops.distribution_util.gen_new_seed",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.reduce_prod",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.data_flow_ops.dynamic_partition",
"tensorflow.python.ops.array_ops.expand_dims"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.0"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.