repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
aethersoft/textkit-learn | [
"8b25b19d394fb361dde4427ed3b84d63552b7cc8"
]
| [
"examples/neural_network/example_kim2014conv.py"
]
| [
"from nltk import TweetTokenizer\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom tklearn import embedding\nfrom tklearn.datasets.base_alt import TextDataset\nfrom tklearn.neural_network.models import Kim2014ConvModel\nfrom tklearn.preprocessing import TweetPreprocessor\nfrom tklearn.preprocessing.tweet import Normalize\n\n\ndef pad_sentences(tokens, sentence_length=20, pad_val='<pad>'):\n return tokens[:sentence_length] + [pad_val for _ in range(sentence_length - len(tokens))]\n\n\ndef get_vocabulary(documents):\n _vocab_idx = 0\n _vocab = {'<pad>': _vocab_idx}\n for tokens in documents:\n for token in tokens:\n if token not in _vocab:\n _vocab_idx += 1\n _vocab[token] = _vocab_idx\n return _vocab\n\n\ntp = TweetPreprocessor(normalize=Normalize.ALL, lowercase=True)\n\ntt = TweetTokenizer()\n\ncategories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']\n\ntwenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)\n\nlabel_encoder = LabelBinarizer().fit(twenty_train.target)\n\ndataset = TextDataset({'text': twenty_train.data, 'target': label_encoder.transform(twenty_train.target)})\n\ndataset = dataset.assign(cleaned_text=dataset.text.apply(tp.preprocess))\n\ndataset = dataset.assign(tokens=dataset.cleaned_text.apply(tt.tokenize))\n\ndataset = dataset.assign(padded_tokens=dataset.tokens.apply(pad_sentences, 300))\n\nvocabulary = get_vocabulary(dataset.padded_tokens)\n\nprint('Size of the Vocabulary: {}'.format(len(vocabulary)))\n\nembeddings = embedding.load('glove-twitter-50')\n\nmodel = Kim2014ConvModel(variant='CNN-static-rand', vocabulary=vocabulary, embeddings=embeddings, epochs=100)\n\nmodel.fit(dataset.padded_tokens, dataset.target)\n\nprint(model.history)\n"
]
| [
[
"sklearn.preprocessing.LabelBinarizer",
"sklearn.datasets.fetch_20newsgroups"
]
]
|
happpyosu/reflection-enhancement-gan | [
"0622b98f37abe6dda53207327bc53ac3af56e862"
]
| [
"utils/metricUtils.py"
]
| [
"import numpy as np\nimport math\n\n\nclass MetricUtils:\n \"\"\"\n This class offers common utils for computing the image metrics.\n \"\"\"\n\n @staticmethod\n def compute_psnr(img1, img2):\n \"\"\"\n Compute the Peak Signal to Noise Ratio (PSNR).\n PSNR is a metric that determines the image quality between a restored image and its ground truth.\n :param img1: image1\n :param img2: image2\n :return: np.float32\n \"\"\"\n mse = np.mean((img1 / 255. - img2 / 255.) ** 2)\n if mse < 1.0e-10:\n return 100\n PIXEL_MAX = 1\n return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))\n"
]
| [
[
"numpy.mean"
]
]
|
poentes/PITrabalho1 | [
"5aca30dd956b3b33d7a0aa22119bbebf7cdbfde9"
]
| [
"InterpolacaoBilinear.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\nfrom PIL import Image\nimport numpy as np\n\nclass InterpolacaoBilinear():\n\n\tdef __init__(self, nome_imagem):\n\t\tself.nome_imagem = nome_imagem\n\t\tself.m = 0\n\t\tself.n = 0\n\t\tself.matriz = []\n\t\tself.img = []\n\n\t'''\n\tAbrindo o arquivo e pegando dimensões MxN\n\t'''\n\tdef carregarImagem(self):\n\t\timg = Image.open(self.nome_imagem)\n\t\tself.img = img\n\t\t#Converte Imagem Object para Matriz\n\t\tself.matriz = np.asarray(img.convert('L'))\n\t\t#Dimensão M\n\t\tself.m = np.size(self.matriz, 1)\n\t\t#Dimensão N\n\t\tself.n = np.size(self.matriz, 0)\n\t\tprint(\"Linhas: {}\\nColunas: {}\\n\".format(self.m, self.n))\n\t\tprint(self.matriz)\n\n\n\t'''\n\tInterpolação Bilinear para Redução\n\t'''\n\tdef paraReducao(self):\n\t\tsaida = np.zeros([self.m/2,self.n/2])\n\t\tm1 = np.size(saida, 1)\n\t\tn1 = np.size(saida, 0)\n\t\tprint(\"Linhas: {}\\nColunas: {}\\n\".format(m1,n1))\n\n\t\t#Ternário em Python\n\t\ttamM = self.m-1 if self.m/2 != 0 else self.m\n\t\ttamN = self.n-1 if self.n/2 != 0 else self.n\n\n\t\t#Tratando \n\t\tfor i in range(0,tamM,2):\n\t\t\tfor j in range(0,tamN,2):\n\t\t\t\tx = self.matriz\n\t\t\t\tif i < tamM and j < tamN:\n\t\t\t\t\tsoma = int(x[i][j]) + int(x[i][j+1]) + int(x[i+1][j]) + int(x[i+1][j+1])\n\t\t\t\t\tsaida[i/2][j/2] = int(soma/4)\n\n\t\t\t\t\t\t\t\t\t\t\t\n\t\tprint(saida)\n\t\timagem = Image.fromarray(saida)\t\t\n\t\tself.img.show()\n\t\timagem.show()\n\n\t'''\n\tInterpolação Bilinear para Ampliação\n\tAinda a fazer, código abaixo é do Vizinho mais próx\n\t'''\n\tdef paraAmpliacao(self):\n\t\t#Criando nova matriz com dimensões M*2xN*2\n\t\tsaida = np.zeros([self.m*2,self.n*2])\n\t\tm1 = np.size(saida, 1)\n\t\tn1 = np.size(saida, 0)\n\t\tprint(\"Linhas: {}\\nColunas: {}\\n\".format(m1,n1))\n\n\t\tfor i in range(m1-1):\n\t\t\tfor j in range(n1-1):\n\t\t\t\t#Se coluna j é par, então saida[i][j] = matriz[metade][metade]\n\t\t\t\tif j%2 == 0 and i%2 == 0:\n\t\t\t\t\tsaida[i][j] = self.matriz[int(i/2)][int(j/2)]\n\n\n\t\tfor i in range(m1-1):\n\t\t\tfor j in range(n1-1):\n\t\t\t\t#Se c, c = (f(i,j) + f(i,j+1) + f(i+1,j) + f(i+1,j+1))/4\n\t\t\t\tif i%2 != 0 and j%2 != 0:\n\t\t\t\t\tsoma = int(saida[i-1][j-1]) + int(saida[i-1][j+1])\n\t\t\t\t\tsoma += int(saida[i+1][j-1]) + int(saida[i+1][j+1])\n\t\t\t\t\tsaida[i][j] = int(soma/4)\n\t\t\t\t#Se a ou e, a = (f(i,j) + f(i,j+1))/2\n\t\t\t\telif i%2 == 0 and j%2 != 0:\n\t\t\t\t\tsaida[i][j] = int((int(saida[i][j-1]) + int(saida[i][j+1])) /2)\n\t\t\t\t#Se b ou d, b = (f(i,j+1) + f(i+1,j+1))/2\n\t\t\t\telif i%2 != 0 and (i-1)%2 == 0:\n\t\t\t\t\tsaida[i][j] = int((int(saida[i-1][j]) + int(saida[i+1][j])) /2)\n\t\t\t\t#Tratamento da última coluna\n\t\t\t\tif i == (m1-2):\n\t\t\t\t\tsaida[m1-1][j] = saida[i][j]\n\t\t\t\t#Tratamento da última linha\n\t\t\t\tif j == (n1-2):\n\t\t\t\t\tsaida[i][n1-1] = saida[i][j]\n\t\t\t\t\t\n\t\t#Tratamento do último pixel: Matriz[M][N]\n\t\tsaida[m1-1][n1-1] = saida[m1-1][n1-2]\n\n\t\tprint(saida)\n\t\timagem = Image.fromarray(saida)\t\t\n\t\tself.img.show()\n\t\timagem.show()"
]
| [
[
"numpy.size",
"numpy.zeros"
]
]
|
guybuk/skorch | [
"0f186aea9da37a37860f1c447980d030c2506823"
]
| [
"skorch/dataset.py"
]
| [
"\"\"\"Contains custom skorch Dataset and CVSplit.\"\"\"\nimport warnings\nfrom functools import partial\nfrom numbers import Number\n\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import check_cv\nimport torch\nimport torch.utils.data\n\nfrom skorch.utils import flatten\nfrom skorch.utils import is_pandas_ndframe\nfrom skorch.utils import check_indexing\nfrom skorch.utils import multi_indexing\nfrom skorch.utils import to_numpy\n\n\nERROR_MSG_1_ITEM = (\n \"You are using a non-skorch dataset that returns 1 value. \"\n \"Remember that for skorch, Dataset.__getitem__ must return exactly \"\n \"2 values, X and y (more info: \"\n \"https://skorch.readthedocs.io/en/stable/user/dataset.html).\")\n\n\nERROR_MSG_MORE_THAN_2_ITEMS = (\n \"You are using a non-skorch dataset that returns {} values. \"\n \"Remember that for skorch, Dataset.__getitem__ must return exactly \"\n \"2 values, X and y (more info: \"\n \"https://skorch.readthedocs.io/en/stable/user/dataset.html).\")\n\n\ndef _apply_to_data(data, func, unpack_dict=False):\n \"\"\"Apply a function to data, trying to unpack different data\n types.\n\n \"\"\"\n apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)\n\n if isinstance(data, dict):\n if unpack_dict:\n return [apply_(v) for v in data.values()]\n return {k: apply_(v) for k, v in data.items()}\n\n if isinstance(data, (list, tuple)):\n try:\n # e.g.list/tuple of arrays\n return [apply_(x) for x in data]\n except TypeError:\n return func(data)\n\n return func(data)\n\n\ndef _is_sparse(x):\n try:\n return sparse.issparse(x) or x.is_sparse\n except AttributeError:\n return False\n\n\ndef _len(x):\n if _is_sparse(x):\n return x.shape[0]\n return len(x)\n\n\ndef get_len(data):\n lens = [_apply_to_data(data, _len, unpack_dict=True)]\n lens = list(flatten(lens))\n len_set = set(lens)\n if len(len_set) != 1:\n raise ValueError(\"Dataset does not have consistent lengths.\")\n return list(len_set)[0]\n\n\ndef uses_placeholder_y(ds):\n \"\"\"If ``ds`` is a ``skorch.dataset.Dataset`` or a\n ``skorch.dataset.Dataset`` nested inside a\n ``torch.utils.data.Subset`` and uses\n y as a placeholder, return ``True``.\"\"\"\n\n if isinstance(ds, torch.utils.data.Subset):\n return uses_placeholder_y(ds.dataset)\n return isinstance(ds, Dataset) and hasattr(ds, \"y\") and ds.y is None\n\n\ndef unpack_data(data):\n \"\"\"Unpack data returned by the net's iterator into a 2-tuple.\n\n If the wrong number of items is returned, raise a helpful error\n message.\n\n \"\"\"\n # Note: This function cannot detect it when a user only returns 1\n # item that is exactly of length 2 (e.g. because the batch size is\n # 2). In that case, the item will be erroneously split into X and\n # y.\n try:\n X, y = data\n return X, y\n except ValueError:\n # if a 1-tuple/list or something else like a torch tensor\n if not isinstance(data, (tuple, list)) or len(data) < 2:\n raise ValueError(ERROR_MSG_1_ITEM)\n raise ValueError(ERROR_MSG_MORE_THAN_2_ITEMS.format(len(data)))\n\n\nclass Dataset(torch.utils.data.Dataset):\n # pylint: disable=anomalous-backslash-in-string\n \"\"\"General dataset wrapper that can be used in conjunction with\n PyTorch :class:`~torch.utils.data.DataLoader`.\n\n The dataset will always yield a tuple of two values, the first\n from the data (``X``) and the second from the target (``y``).\n However, the target is allowed to be ``None``. In that case,\n :class:`.Dataset` will currently return a dummy tensor, since\n :class:`~torch.utils.data.DataLoader` does not work with\n ``None``\\s.\n\n :class:`.Dataset` currently works with the following data types:\n\n * numpy ``array``\\s\n * PyTorch :class:`~torch.Tensor`\\s\n * scipy sparse CSR matrices\n * pandas NDFrame\n * a dictionary of the former three\n * a list/tuple of the former three\n\n Parameters\n ----------\n X : see above\n Everything pertaining to the input data.\n\n y : see above or None (default=None)\n Everything pertaining to the target, if there is anything.\n\n length : int or None (default=None)\n If not ``None``, determines the length (``len``) of the data.\n Should usually be left at ``None``, in which case the length is\n determined by the data itself.\n\n \"\"\"\n def __init__(\n self,\n X,\n y=None,\n length=None,\n ):\n self.X = X\n self.y = y\n\n self.X_indexing = check_indexing(X)\n self.y_indexing = check_indexing(y)\n self.X_is_ndframe = is_pandas_ndframe(X)\n\n if length is not None:\n self._len = length\n return\n\n # pylint: disable=invalid-name\n len_X = get_len(X)\n if y is not None:\n len_y = get_len(y)\n if len_y != len_X:\n raise ValueError(\"X and y have inconsistent lengths.\")\n self._len = len_X\n\n def __len__(self):\n return self._len\n\n def transform(self, X, y):\n # pylint: disable=anomalous-backslash-in-string\n \"\"\"Additional transformations on ``X`` and ``y``.\n\n By default, they are cast to PyTorch :class:`~torch.Tensor`\\s.\n Override this if you want a different behavior.\n\n Note: If you use this in conjuction with PyTorch\n :class:`~torch.utils.data.DataLoader`, the latter will call\n the dataset for each row separately, which means that the\n incoming ``X`` and ``y`` each are single rows.\n\n \"\"\"\n # pytorch DataLoader cannot deal with None so we use 0 as a\n # placeholder value. We only return a Tensor with one value\n # (as opposed to ``batchsz`` values) since the pytorch\n # DataLoader calls __getitem__ for each row in the batch\n # anyway, which results in a dummy ``y`` value for each row in\n # the batch.\n y = torch.Tensor([0]) if y is None else y\n\n # pytorch cannot convert sparse matrices, for now just make it\n # dense; squeeze because X[i].shape is (1, n) for csr matrices\n if sparse.issparse(X):\n X = X.toarray().squeeze(0)\n return X, y\n\n def __getitem__(self, i):\n X, y = self.X, self.y\n if self.X_is_ndframe:\n X = {k: X[k].values.reshape(-1, 1) for k in X}\n\n Xi = multi_indexing(X, i, self.X_indexing)\n yi = multi_indexing(y, i, self.y_indexing)\n return self.transform(Xi, yi)\n\n\nclass CVSplit:\n \"\"\"Class that performs the internal train/valid split on a dataset.\n\n The ``cv`` argument here works similarly to the regular sklearn ``cv``\n parameter in, e.g., ``GridSearchCV``. However, instead of cycling\n through all splits, only one fixed split (the first one) is\n used. To get a full cycle through the splits, don't use\n ``NeuralNet``'s internal validation but instead the corresponding\n sklearn functions (e.g. ``cross_val_score``).\n\n We additionally support a float, similar to sklearn's\n ``train_test_split``.\n\n Parameters\n ----------\n cv : int, float, cross-validation generator or an iterable, optional\n (Refer sklearn's User Guide for cross_validation for the various\n cross-validation strategies that can be used here.)\n\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a ``(Stratified)KFold``,\n - float, to represent the proportion of the dataset to include\n in the validation split.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train, validation splits.\n\n stratified : bool (default=False)\n Whether the split should be stratified. Only works if ``y`` is\n either binary or multiclass classification.\n\n random_state : int, RandomState instance, or None (default=None)\n Control the random state in case that ``(Stratified)ShuffleSplit``\n is used (which is when a float is passed to ``cv``). For more\n information, look at the sklearn documentation of\n ``(Stratified)ShuffleSplit``.\n\n \"\"\"\n def __init__(\n self,\n cv=5,\n stratified=False,\n random_state=None,\n ):\n self.stratified = stratified\n self.random_state = random_state\n\n if isinstance(cv, Number) and (cv <= 0):\n raise ValueError(\"Numbers less than 0 are not allowed for cv \"\n \"but CVSplit got {}\".format(cv))\n\n if not self._is_float(cv) and random_state is not None:\n # TODO: raise a ValueError instead of a warning\n warnings.warn(\n \"Setting a random_state has no effect since cv is not a float. \"\n \"This will raise an error in a future. You should leave \"\n \"random_state to its default (None), or set cv to a float value.\",\n FutureWarning\n )\n\n self.cv = cv\n\n def _is_stratified(self, cv):\n return isinstance(cv, (StratifiedKFold, StratifiedShuffleSplit))\n\n def _is_float(self, x):\n if not isinstance(x, Number):\n return False\n return not float(x).is_integer()\n\n def _check_cv_float(self):\n cv_cls = StratifiedShuffleSplit if self.stratified else ShuffleSplit\n return cv_cls(test_size=self.cv, random_state=self.random_state)\n\n def _check_cv_non_float(self, y):\n return check_cv(\n self.cv,\n y=y,\n classifier=self.stratified,\n )\n\n def check_cv(self, y):\n \"\"\"Resolve which cross validation strategy is used.\"\"\"\n y_arr = None\n if self.stratified:\n # Try to convert y to numpy for sklearn's check_cv; if conversion\n # doesn't work, still try.\n try:\n y_arr = to_numpy(y)\n except (AttributeError, TypeError):\n y_arr = y\n\n if self._is_float(self.cv):\n return self._check_cv_float()\n return self._check_cv_non_float(y_arr)\n\n def _is_regular(self, x):\n return (x is None) or isinstance(x, np.ndarray) or is_pandas_ndframe(x)\n\n def __call__(self, dataset, y=None, groups=None):\n bad_y_error = ValueError(\n \"Stratified CV requires explicitely passing a suitable y.\")\n if (y is None) and self.stratified:\n raise bad_y_error\n\n cv = self.check_cv(y)\n if self.stratified and not self._is_stratified(cv):\n raise bad_y_error\n\n # pylint: disable=invalid-name\n len_dataset = get_len(dataset)\n if y is not None:\n len_y = get_len(y)\n if len_dataset != len_y:\n raise ValueError(\"Cannot perform a CV split if dataset and y \"\n \"have different lengths.\")\n\n args = (np.arange(len_dataset),)\n if self._is_stratified(cv):\n args = args + (to_numpy(y),)\n\n idx_train, idx_valid = next(iter(cv.split(*args, groups=groups)))\n dataset_train = torch.utils.data.Subset(dataset, idx_train)\n dataset_valid = torch.utils.data.Subset(dataset, idx_valid)\n return dataset_train, dataset_valid\n\n def __repr__(self):\n # pylint: disable=useless-super-delegation\n return super(CVSplit, self).__repr__()\n"
]
| [
[
"scipy.sparse.issparse",
"torch.Tensor",
"numpy.arange",
"sklearn.model_selection.check_cv",
"torch.utils.data.Subset"
]
]
|
tanviroy/fed-ml | [
"1a5973b4fa731bbda120e5a0873106d680e69c7f"
]
| [
"server.py"
]
| [
"import socket\nimport json\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tqdm import tqdm\n\n# instantiate server's socket\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# define address params\nIP = '127.0.0.1'\nPORT = 5005\n\n# bind server\nserversocket.bind( (IP, PORT) )\n\n# start listening\nserversocket.listen()\n\nprint('The server is up! Listening at:',IP,PORT)\nprint()\n\n\ndef handle_new_client(clientsocket, address):\n\n print('New connection made! Client address:',address)\n\n # interact with client\n # --------------------------------------------------------------\n\n # send intro message\n intro = 'Welcome to the OutsourceML!\\n'\n clientsocket.send(intro.encode()) # --> Intro message to connected client\n\n\n # x_train\n # -----------------------------------------\n\n print('==========================')\n print('Receiving Training Samples')\n print('==========================')\n\n batch = clientsocket.recv(1000000).decode()\n x_train = json.loads(batch)\n clientsocket.send('OK'.encode())\n #print(np.array(facts1).shape)\n\n for i in tqdm(range(10,10000,10)):\n\n batch = clientsocket.recv(1000000).decode()\n x_train = np.append(x_train, json.loads(batch), axis = 0)\n clientsocket.send('OK'.encode())\n\n x_train = np.expand_dims(x_train, -1)\n print(np.array(x_train).shape)\n\n # y_train\n # -----------------------------------------\n\n print('==========================')\n print('Receiving Training Lables')\n print('==========================')\n\n batch = clientsocket.recv(1000000).decode()\n y_train = json.loads(batch)\n clientsocket.send('OK'.encode())\n #print(np.array(facts1).shape)\n\n for i in tqdm(range(10,10000,10)):\n batch = clientsocket.recv(1000000).decode()\n y_train = np.append(y_train, json.loads(batch), axis = 0)\n clientsocket.send('OK'.encode())\n\n #y_train = np.expand_dims(y_train, -1)\n print(np.array(y_train).shape)\n\n\n # x_test\n # -----------------------------------------\n\n print('=========================')\n print('Receiving Testing Samples')\n print('=========================')\n\n batch = clientsocket.recv(1000000).decode()\n x_test = json.loads(batch)\n clientsocket.send('OK'.encode())\n #print(np.array(facts1).shape)\n\n for i in tqdm(range(10,1000,10)):\n batch = clientsocket.recv(1000000).decode()\n x_test = np.append(x_test, json.loads(batch), axis = 0)\n clientsocket.send('OK'.encode())\n\n x_test = np.expand_dims(x_test, -1)\n print(np.array(x_test).shape)\n\n # y_test\n # -----------------------------------------\n\n print('========================')\n print('Receiving Testing Lables')\n print('========================')\n\n batch = clientsocket.recv(1000000).decode()\n y_test = json.loads(batch)\n clientsocket.send('OK'.encode())\n #print(np.array(facts1).shape)\n\n for i in tqdm(range(10,1000,10)):\n batch = clientsocket.recv(1000000).decode()\n y_test = np.append(y_test, json.loads(batch), axis = 0)\n clientsocket.send('OK'.encode())\n\n #y_train = np.expand_dims(y_train, -1)\n print(np.array(y_test).shape)\n\n # ------------------- end of Data Import\n\n # model / data parameters\n num_classes = 10\n input_shape = (28, 28, 1)\n\n # scale images to range [0, 1]\n x_train = x_train.astype(\"float32\") / 255\n x_test = x_test.astype(\"float32\") / 255\n\n # make sure images have shape (28, 28, 1)\n #x_train = np.expand_dims(x_train, -1)\n #x_test = np.expand_dims(x_test, -1)\n print(\"x_train shape:\", x_train.shape)\n print(x_train.shape[0], \"train samples\")\n print(x_test.shape[0], \"test samples\")\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n # model\n\n model = keras.Sequential(\n [\n keras.Input(shape=input_shape),\n layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(num_classes, activation=\"softmax\"),\n ]\n )\n\n model.summary()\n\n # parameters for training\n batch_size = 128\n epochs = 3 # use epochs = 5\n\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)\n\n # testing or evaluation\n score = model.evaluate(x_test, y_test, verbose=0)\n #model.save_weights('my_checkpoint')\n print(\"Test loss:\", score[0])\n print(\"Test accuracy:\", score[1])\n #print(weights)\n\n weights = model.get_weights()\n\n print('Disconnecting from client!')\n # print()\n print()\n\n\nwhile True:\n\n # wait until a client connects\n (clientsocket, address) = serversocket.accept() #--> returns (clientsocket, address)\n # call the \"handle_new_client\" function to interact with the client\n handle_new_client(clientsocket,address)\n clientsocket.close() # close the connection and start the next iteration of the loop to wait for the next client\n\n\nprint('The server is going down!')\nserversocket.close() # close down the server\n\n\n"
]
| [
[
"numpy.expand_dims",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.utils.to_categorical"
]
]
|
frgfm/sdcnd-p3-traffic-sign-classification | [
"fe230e5b4c008ff711730370a20541f929a89bce"
]
| [
"test/test_models.py"
]
| [
"import unittest\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom src.models import lenet5\n\n\nclass Tester(unittest.TestCase):\n\n def test_lenet5(self):\n\n # Create a model\n model = lenet5()\n self.assertIsInstance(model, Sequential)\n\n # Shape test\n input_t = np.random.rand(1, 32, 32, 1).astype(np.float32)\n out = model.predict(x=input_t)\n self.assertEqual(out.shape, (1, 43))\n self.assertTrue(np.all(out <= 1))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.all",
"numpy.random.rand"
]
]
|
budmonde/redner | [
"a028ed011ac3bf916eb39a42ab8aa0a0453e1620"
]
| [
"pyredner/shape.py"
]
| [
"import pyredner\nimport torch\nimport math\n\ndef compute_vertex_normal(vertices, indices):\n def dot(v1, v2):\n return torch.sum(v1 * v2, dim = 1)\n def squared_length(v):\n return torch.sum(v * v, dim = 1)\n def length(v):\n return torch.sqrt(squared_length(v))\n # Nelson Max, \"Weights for Computing Vertex Normals from Facet Vectors\", 1999\n normals = torch.zeros(vertices.shape, dtype = torch.float32, device = vertices.device)\n v = [vertices[indices[:, 0].long(), :],\n vertices[indices[:, 1].long(), :],\n vertices[indices[:, 2].long(), :]]\n for i in range(3):\n v0 = v[i]\n v1 = v[(i + 1) % 3]\n v2 = v[(i + 2) % 3]\n e1 = v1 - v0\n e2 = v2 - v0\n e1_len = length(e1)\n e2_len = length(e2)\n side_a = e1 / torch.reshape(e1_len, [-1, 1])\n side_b = e2 / torch.reshape(e2_len, [-1, 1])\n if i == 0:\n n = torch.cross(side_a, side_b)\n n = n / torch.reshape(length(n), [-1, 1])\n angle = torch.where(dot(side_a, side_b) < 0, \n math.pi - 2.0 * torch.asin(0.5 * length(side_a + side_b)),\n 2.0 * torch.asin(0.5 * length(side_b - side_a)))\n sin_angle = torch.sin(angle)\n \n # XXX: Inefficient but it's PyTorch's limitation\n contrib = n * (sin_angle / (e1_len * e2_len)).reshape(-1, 1).expand(-1, 3)\n index = indices[:, i].long().reshape(-1, 1).expand([-1, 3])\n normals.scatter_add_(0, index, contrib)\n\n normals = normals / torch.reshape(length(normals), [-1, 1])\n return normals.contiguous()\n\nclass Shape:\n def __init__(self, vertices, indices, uvs, normals, material_id):\n assert(vertices.dtype == torch.float32)\n assert(indices.dtype == torch.int32)\n assert(vertices.is_contiguous())\n assert(indices.is_contiguous())\n if (uvs is not None):\n assert(uvs.dtype == torch.float32)\n assert(uvs.is_contiguous())\n if (normals is not None):\n assert(normals.dtype == torch.float32)\n assert(normals.is_contiguous())\n if pyredner.get_use_gpu():\n assert(vertices.is_cuda)\n assert(indices.is_cuda) \n assert(uvs is None or uvs.is_cuda)\n assert(normals is None or normals.is_cuda)\n else:\n assert(not vertices.is_cuda)\n assert(not indices.is_cuda) \n assert(uvs is None or not uvs.is_cuda)\n assert(normals is None or not normals.is_cuda)\n\n self.vertices = vertices\n self.indices = indices\n self.uvs = uvs\n self.normals = normals\n self.material_id = material_id\n self.light_id = -1\n\n def state_dict(self):\n return {\n 'vertices': self.vertices,\n 'indices': self.indices,\n 'uvs': self.uvs,\n 'normals': self.normals,\n 'material_id': self.material_id,\n 'light_id': self.light_id,\n }\n\n @classmethod\n def load_state_dict(cls, state_dict):\n out = cls(\n state_dict['vertices'],\n state_dict['indices'],\n state_dict['uvs'],\n state_dict['normals'],\n state_dict['material_id'])\n return out\n"
]
| [
[
"torch.sin",
"torch.zeros",
"torch.reshape",
"torch.sum",
"torch.cross"
]
]
|
brandontan99/D2HC-RMVSNet | [
"c4615c2d7c212b9b247da6fc0e0e110344b1b0ce"
]
| [
"datasets/data_eval_transform.py"
]
| [
"from torch.utils.data import Dataset\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom datasets.data_io import *\n\nfrom datasets.preprocess import *\n\n# Test any dataset with scale and center crop\n\nclass MVSDataset(Dataset):\n def __init__(self, datapath, listfile, mode, nviews, ndepths=192, interval_scale=1.06, inverse_depth=True,\n adaptive_scaling=True, max_h=1200,max_w=1600,sample_scale=1,base_image_size=8, img_ext = \"png\",**kwargs):\n super(MVSDataset, self).__init__()\n \n self.datapath = datapath\n self.listfile = listfile\n self.mode = mode\n self.nviews = nviews\n self.ndepths = ndepths\n self.interval_scale = interval_scale\n self.inverse_depth = inverse_depth\n\n self.adaptive_scaling=adaptive_scaling\n self.max_h=max_h\n self.max_w=max_w\n self.sample_scale=sample_scale\n self.base_image_size=base_image_size\n\n self.img_ext = img_ext\n \n assert self.mode == \"test\"\n self.metas = self.build_list()\n print('Data Loader : data_eval_transform **************' )\n\n def build_list(self):\n metas = []\n with open(self.listfile) as f:\n scans = f.readlines()\n scans = [line.rstrip() for line in scans]\n\n # scans\n for scan in scans:\n pair_file = \"{}/pair.txt\".format(scan)\n # read the pair file\n with open(os.path.join(self.datapath, pair_file)) as f:\n num_viewpoint = int(f.readline())\n # viewpoints (49)\n for view_idx in range(num_viewpoint):\n ref_view = int(f.readline().rstrip())\n src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]\n metas.append((scan, ref_view, src_views))\n print(\"dataset\", self.mode, \"metas:\", len(metas))\n return metas\n\n def __len__(self):\n return len(self.metas)\n\n def read_cam_file(self, filename):\n with open(filename) as f:\n lines = f.readlines()\n lines = [line.rstrip() for line in lines]\n # extrinsics: line [1,5), 4x4 matrix\n extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))\n # intrinsics: line [7-10), 3x3 matrix\n intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))\n # TODO Scale\n #intrinsics[:2, :] /= 4\n # depth_min & depth_interval: line 11\n depth_min = float(lines[11].split()[0])\n depth_interval = float(lines[11].split()[1]) * self.interval_scale\n return intrinsics, extrinsics, depth_min, depth_interval\n\n # def read_img(self, filename):\n # img = Image.open(filename)\n # np_img = np.array(img, dtype=np.float32) / 255.\n # return np_img\n\n def read_img(self, filename):\n img = Image.open(filename)\n return self.center_img(np.array(img, dtype=np.float32))\t\n\n def center_img(self, img): # this is very important for batch normalization\n img = img.astype(np.float32)\n var = np.var(img, axis=(0,1), keepdims=True)\n mean = np.mean(img, axis=(0,1), keepdims=True)\n return (img - mean) / (np.sqrt(var) + 0.00000001)\n\n def read_depth(self, filename):\n # read pfm depth file\n return np.array(read_pfm(filename)[0], dtype=np.float32)\n\n def __getitem__(self, idx):\n meta = self.metas[idx]\n scan, ref_view, src_views = meta\n # use only the reference view and first nviews-1 source views\n view_ids = [ref_view] + src_views[:self.nviews - 1]\n\n imgs = []\n mask = None\n depth = None\n depth_values = None\n proj_matrices = []\n cams=[]\n extrinsics_list=[]\n\n for i, vid in enumerate(view_ids):\n img_filename = os.path.join(self.datapath, '{}/images/{:0>8}.{}'.format(scan, vid, self.img_ext))\n proj_mat_filename = os.path.join(self.datapath, '{}/cams/{:0>8}_cam.txt'.format(scan, vid))\n\n imgs.append(self.read_img(img_filename))\n intrinsics, extrinsics, depth_min, depth_interval = self.read_cam_file(proj_mat_filename)\n cams.append(intrinsics)\n # multiply intrinsics and extrinsics to get projection matrix\n extrinsics_list.append(extrinsics)\n \n if i == 0: # reference view\n if self.inverse_depth: #slice inverse depth\n print('Process {} inverse depth'.format(idx))\n depth_end = depth_interval * (self.ndepths-1) + depth_min # wether depth_end is this\n depth_values = np.linspace(1.0 / depth_min, 1.0 / depth_end, self.ndepths, endpoint=False)\n depth_values = 1.0 / depth_values\n depth_values = depth_values.astype(np.float32)\n else:\n depth_values = np.arange(depth_min, depth_interval * self.ndepths + depth_min, depth_interval,\n dtype=np.float32) # the set is [)\n depth_end = depth_interval * self.ndepths + depth_min\n # depth_values = np.arange(depth_min, depth_interval * (self.ndepths - 0.5) + depth_min, depth_interval,\n # dtype=np.float32)\n\n imgs = np.stack(imgs).transpose([0, 3, 1, 2]) # B,C,H,W\n #proj_matrices = np.stack(proj_matrices)\n \n ##TO DO determine a proper scale to resize input\n resize_scale = 1\n if self.adaptive_scaling:\n h_scale = 0\n w_scale = 0 \n for view in range(self.nviews):\n height_scale = float(self.max_h) / imgs[view].shape[1]\n width_scale = float(self.max_w) / imgs[view].shape[2]\n if height_scale > h_scale:\n h_scale = height_scale\n if width_scale > w_scale:\n w_scale = width_scale\n if h_scale > 1 or w_scale > 1:\n print (\"max_h, max_w should < W and H!\")\n exit(-1)\n resize_scale = h_scale\n if w_scale > h_scale:\n resize_scale = w_scale\n \n imgs = imgs.transpose(0,2,3,1)\n \n scaled_input_imgs, scaled_input_cams = scale_mvs_input(imgs, cams, scale=resize_scale, view_num=self.nviews)\n \n #TO DO crop to fit network\n croped_imgs, croped_cams = crop_mvs_input(scaled_input_imgs, scaled_input_cams,view_num=self.nviews,\n max_h=self.max_h,max_w=self.max_w,base_image_size=self.base_image_size)\n \n croped_imgs = croped_imgs.transpose(0,3,1,2)\n\n\n new_proj_matrices = []\n for id in range(self.nviews):\n proj_mat = extrinsics_list[id]#.copy()\n # Down Scale\n #croped_cams[id][:2,:] /= 4\n proj_mat[:3, :4] = np.matmul(croped_cams[id], proj_mat[:3, :4])\n new_proj_matrices.append(proj_mat)\n\n new_proj_matrices = np.stack(new_proj_matrices)\n\n return {\"imgs\": croped_imgs,\n \"proj_matrices\": new_proj_matrices,\n \"depth_values\": depth_values,\n \"filename\": scan + '/{}/' + '{:0>8}'.format(view_ids[0]) + \"{}\"}\n\n\nif __name__ == \"__main__\":\n # some testing code, just IGNORE it\n #datapath, listfile, mode, nviews, ndepths=192, interval_scale=1.06, adaptive_scaling=True, max_h=1200,max_w=1600,sample_scale=0.25\n dataset = MVSDataset(\"/data/yhw/pytorch_dtu/dtu_test/\", '../lists/dtu/test.txt', 'test', 5,\n 192,1.06,adaptive_scaling=True,max_h=800,max_w=1200,sample_scale=1,base_image_size=8)\n item = dataset[50]\n for key, value in item.items():\n print(key, type(value))\n"
]
| [
[
"numpy.sqrt",
"numpy.linspace",
"numpy.arange",
"numpy.matmul",
"numpy.stack",
"numpy.mean",
"numpy.var",
"numpy.array"
]
]
|
johan12345/sunpy | [
"56e1ab0c2c992f99e0fe3e6bff468b731a51228c"
]
| [
"examples/units_and_coordinates/planet_locations.py"
]
| [
"\"\"\"\n===================================\nGetting the location of the planets\n===================================\n\nHow to get the position of planetary bodies im the solar system using\n`astropy's solar system ephemeris <http://docs.astropy.org/en/stable/coordinates/solarsystem.html#solar-system-ephemerides>`__ information and SunPy.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom astropy.time import Time\n\nfrom sunpy.coordinates import get_body_heliographic_stonyhurst\n\n##############################################################################\n# Lets grab the positions of each of the planets in Heliographic Stonyhurst\n# coordinates.\nobstime = Time('2014-05-15T07:54:00.005')\nplanet_list = ['earth', 'venus', 'mars', 'mercury', 'jupiter', 'neptune', 'uranus', 'sun']\nplanet_coord = [get_body_heliographic_stonyhurst(\n this_planet, time=obstime) for this_planet in planet_list]\n\n##############################################################################\n# Let's plot the results. Remember the Sun is at the center of this coordinate\n# system.\nax = plt.subplot(projection='polar')\nfor this_planet, this_coord in zip(planet_list, planet_coord):\n plt.polar(this_coord.lon.to('rad'), this_coord.radius, 'o', label=this_planet)\nplt.legend()\nplt.show()\n"
]
| [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
]
|
liushuigs/jina | [
"b3550e901b2a340924330b5ba2801603e493c933"
]
| [
"jina/drivers/predict.py"
]
| [
"from typing import List, Any, Union, Tuple, Optional\n\nimport numpy as np\n\nfrom . import BaseExecutableDriver, FlatRecursiveMixin\nfrom ..helper import typename\n\nif False:\n from ..types.sets import DocumentSet\n\n\nclass BasePredictDriver(FlatRecursiveMixin, BaseExecutableDriver):\n \"\"\"Drivers inherited from :class:`BasePredictDriver` will bind :meth:`predict` by default\n\n :param fields: name of fields to be used to predict tags, default \"embeddings\"\n :param args: additional positional arguments wich are just used for the parent initialization\n :param kwargs: additional key value arguments wich are just used for the parent initialization\n \"\"\"\n\n def __init__(\n self,\n executor: Optional[str] = None,\n method: str = 'predict',\n fields: Union[Tuple, str] = 'embedding',\n *args,\n **kwargs,\n ):\n self.fields = fields\n super().__init__(executor, method, *args, **kwargs)\n\n\nclass BaseLabelPredictDriver(BasePredictDriver):\n \"\"\"Base class of a Driver for label prediction.\n\n :param output_tag: output label will be written to ``doc.tags``\n :param args: additional positional arguments wich are just used for the parent initialization\n :param kwargs: additional key value arguments wich are just used for the parent initialization\n \"\"\"\n\n def __init__(self, output_tag: str = 'prediction', *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.output_tag = output_tag\n\n def _apply_all(\n self,\n docs: 'DocumentSet',\n *args,\n **kwargs,\n ) -> None:\n if self.fields == 'embedding':\n predict_input, docs_pts = docs.all_embeddings\n elif self.fields == 'content':\n predict_input, docs_pts = docs.all_contents\n else:\n raise ValueError(\n f'{self.fields} is not a valid field name for {self!r}, must be one of embeddings, contents'\n )\n\n if docs_pts:\n prediction = self.exec_fn(predict_input)\n labels = self.prediction2label(\n prediction\n ) # type: List[Union[str, List[str]]]\n for doc, label in zip(docs_pts, labels):\n doc.tags[self.output_tag] = label\n\n def prediction2label(self, prediction: 'np.ndarray') -> List[Any]:\n \"\"\"Converting ndarray prediction into list of readable labels\n\n .. note::\n ``len(output)`` should be the same as ``prediction.shape[0]``\n\n :param prediction: the float/int numpy ndarray given by :class:`BaseClassifier`\n :return: the readable label to be stored.\n\n\n\n .. # noqa: DAR401\n\n\n .. # noqa: DAR202\n \"\"\"\n raise NotImplementedError\n\n\nclass BinaryPredictDriver(BaseLabelPredictDriver):\n \"\"\"Converts binary prediction into string label. This is often used with binary classifier.\n\n :param one_label: label when prediction is one\n :param zero_label: label when prediction is zero\n :param args: additional positional arguments wich are just used for the parent initialization\n :param kwargs: additional key value arguments wich are just used for the parent initialization\n \"\"\"\n\n def __init__(self, one_label: str = 'yes', zero_label: str = 'no', *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.one_label = one_label\n self.zero_label = zero_label\n\n def prediction2label(self, prediction: 'np.ndarray') -> List[str]:\n \"\"\"\n\n :param prediction: a (B,) or (B, 1) zero one array\n :return: the labels as either ``self.one_label`` or ``self.zero_label``\n\n\n .. # noqa: DAR401\n \"\"\"\n p = np.squeeze(prediction)\n if p.ndim > 1:\n raise ValueError(\n f'{typename(self)} expects prediction has ndim=1, but receiving ndim={p.ndim}'\n )\n\n return [self.one_label if v else self.zero_label for v in p.astype(bool)]\n\n\nclass OneHotPredictDriver(BaseLabelPredictDriver):\n \"\"\"Mapping prediction to one of the given labels\n\n Expect prediction to be 2dim array, zero-one valued. Each row corresponds to\n a sample, each column corresponds to a label. Each row can have only one 1.\n\n This is often used with multi-class classifier.\n \"\"\"\n\n def __init__(self, labels: List[str], *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.labels = labels\n\n def validate_labels(self, prediction: 'np.ndarray'):\n \"\"\"Validate the labels.\n\n :param prediction: the predictions\n\n\n .. # noqa: DAR401\n \"\"\"\n if prediction.ndim != 2:\n raise ValueError(\n f'{typename(self)} expects prediction to have ndim=2, but received {prediction.ndim}'\n )\n if prediction.shape[1] != len(self.labels):\n raise ValueError(\n f'{typename(self)} expects prediction.shape[1]==len(self.labels), but received {prediction.shape}'\n )\n\n def prediction2label(self, prediction: 'np.ndarray') -> List[str]:\n \"\"\"\n\n :param prediction: a (B, C) array where C is the number of classes, only one element can be one\n :return: the list of labels\n \"\"\"\n self.validate_labels(prediction)\n p = np.argmax(prediction, axis=1)\n return [self.labels[v] for v in p]\n\n\nclass MultiLabelPredictDriver(OneHotPredictDriver):\n \"\"\"Mapping prediction to a list of labels\n\n Expect prediction to be 2dim array, zero-one valued. Each row corresponds to\n a sample, each column corresponds to a label. Each row can have only multiple 1s.\n\n This is often used with multi-label classifier, where each instance can have multiple labels\n \"\"\"\n\n def prediction2label(self, prediction: 'np.ndarray') -> List[List[str]]:\n \"\"\"Transform the prediction into labels.\n\n :param prediction: the array of predictions\n :return: nested list of labels\n \"\"\"\n self.validate_labels(prediction)\n return [[self.labels[int(pp)] for pp in p.nonzero()[0]] for p in prediction]\n\n\nclass Prediction2DocBlobDriver(BasePredictDriver):\n \"\"\"Write the prediction result directly into ``document.blob``.\n\n .. warning::\n\n This will erase the content in ``document.text`` and ``document.buffer``.\n \"\"\"\n\n def _apply_all(\n self,\n docs: 'DocumentSet',\n *args,\n **kwargs,\n ) -> None:\n if self.fields == 'embedding':\n predict_input, docs_pts = docs.all_embeddings\n elif self.fields == 'content':\n predict_input, docs_pts = docs.all_contents\n else:\n raise ValueError(\n f'{self.fields} is not a valid field name for {self!r}, must be one of embeddings, contents'\n )\n\n if docs_pts:\n prediction = self.exec_fn(predict_input)\n for doc, pred in zip(docs_pts, prediction):\n doc.blob = pred\n"
]
| [
[
"numpy.squeeze",
"numpy.argmax"
]
]
|
kaayra2000/Robot_Operating_System | [
"a77964572abf4603734993fd3e8d33dfe5064920"
]
| [
"mapping_tb3_node.py"
]
| [
"#!/usr/bin/env python3\n\nimport rospy\nimport math\nimport numpy as np\nimport cv2 as cv\nfrom sensor_msgs.msg import LaserScan\ndef callback(data):\n\tprint(data.pose.pose.position.x)\n\t\ndef cb_scan(data):\n\tdizi=[]\n\tresim=np.zeros((360,360),dtype=np.uint8)\n\tfor i,cor in enumerate(data.ranges):\n\t\tif not math.isinf(cor):\n\t\t\tdizi.append(kartezyen(cor,i))\n\tdizi=[(round((x+3.5)*51),round((y+3.5)*51)) for x,y in dizi]\n\tfor x,y in dizi:\n\t\tresim[x][y]=255\n\tim=cv.cvtColor(resim,cv.COLOR_GRAY2BGR)\n\tcv.imshow(\"Naber\",im)\n\tcv.waitKey(30)\n\t\ndef listener():\n\n\trospy.init_node('listener' , anonymous=False)\n\tsub=rospy.Subscriber(\"/scan\",LaserScan,cb_scan)\n\trospy.spin()\n\ndef kartezyen(uzunluk,aci):\n\tx=uzunluk*math.cos(aci/180*math.pi)\n\ty=uzunluk*math.sin(aci/180*math.pi)\n\treturn x,y\n\nif __name__ == '__main__':\n\tlistener()\n"
]
| [
[
"numpy.zeros"
]
]
|
makermotion/turkish-asr | [
"05b7fe51f27c17bce2a392ffad1a6f34bc7ad0be"
]
| [
"train_and_test.py"
]
| [
"import json\nimport random\nimport torch\nimport torchaudio\nimport librosa\nimport pandas as pd\nimport numpy as np\n\nfrom IPython.display import display, HTML\n\nfrom datasets import ClassLabel, Audio\nfrom datasets import load_dataset, load_metric\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional, Union\nfrom transformers import Wav2Vec2CTCTokenizer, Wav2Vec2Processor, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC, Trainer, TrainingArguments\n\ncommon_voice_train = load_dataset(\"common_voice\", \"tr\", split=\"train+validation\", )\ncommon_voice_test = load_dataset(\"common_voice\", \"tr\", split=\"test\")\n\n\ndef show_random_elements(dataset, num_examples=10):\n assert num_examples <= len(dataset), \"Can't pick more elements than there are in the dataset.\"\n picks = []\n for _ in range(num_examples):\n pick = random.randint(0, len(dataset)-1)\n while pick in picks:\n pick = random.randint(0, len(dataset)-1)\n picks.append(pick)\n \n df = pd.DataFrame(dataset[picks])\n display(HTML(df.to_html()))\n\n\ncommon_voice_train = common_voice_train.remove_columns([\"accent\", \"age\", \"client_id\", \"down_votes\", \"gender\", \"locale\", \"segment\", \"up_votes\"])\ncommon_voice_test = common_voice_test.remove_columns([\"accent\", \"age\", \"client_id\", \"down_votes\", \"gender\", \"locale\", \"segment\", \"up_votes\"])\n\n\nshow_random_elements(common_voice_train)\n\n\nimport re\nchars_to_remove_regex = '[\\,\\?\\.\\!\\-\\;\\:\\\"\\“\\%\\‘\\”\\�\\']'\n\ndef remove_special_characters(batch):\n batch[\"sentence\"] = re.sub(chars_to_remove_regex, '', batch[\"sentence\"]).lower()\n return batch\n\n\ncommon_voice_train = common_voice_train.map(remove_special_characters)\ncommon_voice_test = common_voice_test.map(remove_special_characters)\n\n\nshow_random_elements(common_voice_train.remove_columns([\"path\",\"audio\"]))\n\n\ndef rm_long_seq(batch):\n return len(batch['sentence'].split(' ')) < 4\n\n\n#common_voice_train = common_voice_train.filter(rm_long_seq, load_from_cache_file=False)\n#common_voice_test = common_voice_test.filter(rm_long_seq, load_from_cache_file=False)\n\n\ndef replace_hatted_characters(batch):\n batch[\"sentence\"] = re.sub('[â]', 'a', batch[\"sentence\"])\n batch[\"sentence\"] = re.sub('[î]', 'i', batch[\"sentence\"])\n batch[\"sentence\"] = re.sub('[ô]', 'o', batch[\"sentence\"])\n batch[\"sentence\"] = re.sub('[û]', 'u', batch[\"sentence\"])\n return batch\n\n\ncommon_voice_train = common_voice_train.map(replace_hatted_characters)\ncommon_voice_test = common_voice_test.map(replace_hatted_characters)\n\n\ndef extract_all_chars(batch):\n all_text = \" \".join(batch[\"sentence\"])\n vocab = list(set(all_text))\n return {\"vocab\": [vocab], \"all_text\": [all_text]}\n\n\nvocab_train = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_train.column_names)\nvocab_test = common_voice_test.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_test.column_names)\n\n\nvocab_list = list(set(vocab_train[\"vocab\"][0]) | set(vocab_test[\"vocab\"][0]))\n\n\nvocab_dict = {v: k for k, v in enumerate(sorted(vocab_list))}\n\n\nvocab_dict[\"|\"] = vocab_dict[\" \"]\ndel vocab_dict[\" \"]\n\n\nvocab_dict[\"[UNK]\"] = len(vocab_dict)\nvocab_dict[\"[PAD]\"] = len(vocab_dict)\nlen(vocab_dict)\n\n\nwith open('vocab.json', 'w') as vocab_file:\n json.dump(vocab_dict, vocab_file)\n\n\ntokenizer = Wav2Vec2CTCTokenizer(\"./vocab.json\", unk_token=\"[UNK]\", pad_token=\"[PAD]\", word_delimiter_token=\"|\")\nfeature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)\nprocessor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)\n\nprocessor.save_pretrained(\"../model\")\n\n# Load a custom audio file. // Trial\n#sa, sr = torchaudio.load('/home/scutum/covost_tr/tr/raw/clips/common_voice_tr_20210689.mp3')\n#sa.shape, sr\n#sa\n#type(sa)\n#np.asarray(sa.reshape(-1)).shape\n#sa = librosa.resample(np.asarray(sa.reshape(-1)), 44_100, 16_000)\n#sa.shape\n\ncommon_voice_train = common_voice_train.cast_column(\"audio\", Audio(sampling_rate=16_000))\ncommon_voice_test = common_voice_test.cast_column(\"audio\", Audio(sampling_rate=16_000))\n\n\nimport IPython.display as ipd\nrand_int = random.randint(0, len(common_voice_train)-1)\n\nprint(common_voice_train[rand_int][\"sentence\"])\nipd.Audio(data=common_voice_train[rand_int][\"audio\"][\"array\"], autoplay=True, rate=16000)\n\n\nrand_int = random.randint(0, len(common_voice_train)-1)\n\nprint(\"Target text:\", common_voice_train[rand_int][\"sentence\"])\nprint(\"Input array shape:\", np.asarray(common_voice_train[rand_int][\"audio\"]['array']).shape)\nprint(\"Sampling rate:\", common_voice_train[rand_int]['audio'][\"sampling_rate\"])\n\n\ndef prepare_dataset(batch):\n audio = batch[\"audio\"]\n\n # batched output is \"un-batched\"\n batch[\"input_values\"] = processor(audio[\"array\"], sampling_rate=audio[\"sampling_rate\"]).input_values[0]\n batch[\"input_length\"] = len(batch[\"input_values\"])\n \n with processor.as_target_processor():\n batch[\"labels\"] = processor(batch[\"sentence\"]).input_ids\n return batch\n\ncommon_voice_train = common_voice_train.map(prepare_dataset, remove_columns=common_voice_train.column_names, batch_size=-1)\ncommon_voice_test = common_voice_test.map(prepare_dataset, remove_columns=common_voice_test.column_names, batch_size=-1)\n#common_voice_train = common_voice_train.map(prepare_dataset, remove_columns=common_voice_train.column_names)\n#mmon_voice_test = common_voice_test.map(prepare_dataset, remove_columns=common_voice_test.column_names)\n\n@dataclass\nclass DataCollatorCTCWithPadding:\n \"\"\"\n Data collator that will dynamically pad the inputs received.\n Args:\n processor (:class:`~transformers.Wav2Vec2Processor`)\n The processor used for proccessing the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n \"\"\"\n\n processor: Wav2Vec2Processor\n padding: Union[bool, str] = True\n\n def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n # split inputs and labels since they have to be of different lenghts and need\n # different padding methods\n input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features]\n label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n\n batch = self.processor.pad(\n input_features,\n padding=self.padding,\n return_tensors=\"pt\",\n )\n with self.processor.as_target_processor():\n labels_batch = self.processor.pad(\n label_features,\n padding=self.padding,\n return_tensors=\"pt\",\n )\n\n # replace padding with -100 to ignore loss correctly\n labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n\n batch[\"labels\"] = labels\n\n return batch\n\n\ndata_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)\nwer_metric = load_metric(\"wer\")\n\ndef compute_metrics(pred):\n pred_logits = pred.predictions\n pred_ids = np.argmax(pred_logits, axis=-1)\n\n pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id\n\n pred_str = processor.batch_decode(pred_ids)\n # we do not want to group tokens when computing the metrics\n label_str = processor.batch_decode(pred.label_ids, group_tokens=False)\n\n wer = wer_metric.compute(predictions=pred_str, references=label_str)\n\n return {\"wer\": wer}\n\nmodel = Wav2Vec2ForCTC.from_pretrained(\n \"facebook/wav2vec2-large-xlsr-53\", \n attention_dropout=0.1,\n hidden_dropout=0.1,\n feat_proj_dropout=0.0,\n mask_time_prob=0.05,\n layerdrop=0.1,\n gradient_checkpointing=False, \n ctc_loss_reduction=\"mean\", \n pad_token_id=processor.tokenizer.pad_token_id,\n vocab_size=len(processor.tokenizer)\n)\n\nmodel.freeze_feature_extractor()\n\ntraining_args = TrainingArguments(\n output_dir=\"/home/maskedpirate/wav2vec2-large-xlsr-turkish-demo\",\n # output_dir=\"./wav2vec2-large-xlsr-turkish-demo\",\n group_by_length=True,\n per_device_train_batch_size=1,\n gradient_accumulation_steps=1,\n per_device_eval_batch_size=2,\n evaluation_strategy=\"steps\",\n num_train_epochs=30,\n fp16=True,\n save_steps=300,\n eval_steps=300,\n logging_steps=100,\n learning_rate=3e-4,\n warmup_steps=300,\n save_total_limit=1,\n)\n\ntrainer = Trainer(\n model=model,\n data_collator=data_collator,\n args=training_args,\n compute_metrics=compute_metrics,\n train_dataset=common_voice_train,\n eval_dataset=common_voice_test,\n tokenizer=processor.feature_extractor,\n)\n\ntrainer.train()\n\n\n## In[5]:\n#\n#\n#model = Wav2Vec2ForCTC.from_pretrained(\"path/to/saved_model\").to(\"cuda\")\n#\n#\n## In[6]:\n#\n#\n#processor = Wav2Vec2Processor.from_pretrained(\"path/to/saved_processor\")\n#\n#\n## In[49]:\n#\n#\n#input_dict = processor(common_voice_test[\"input_values\"][2], return_tensors=\"pt\", padding=True)\n#\n#\n## ***\n## ***\n#\n## In[33]:\n#\n#\n#sa = processor(sa, return_tensors='pt', padding=True)\n#\n#\n## In[34]:\n#\n#\n#sa\n#\n#\n## In[35]:\n#\n#\n#logits = model(sa.input_values.to(\"cuda\")).logits\n#\n#\n## In[36]:\n#\n#\n#pred_ids = torch.argmax(logits, dim=-1)[0]\n#\n#\n## In[37]:\n#\n#\n#print(processor.decode(pred_ids))\n#\n#\n## ***\n## ***\n#\n## In[ ]:\n#\n#\n#logits = model(input_dict.input_values.to(\"cuda\")).logits\n#\n#\n## In[ ]:\n#\n#\n#pred_ids = torch.argmax(logits, dim=-1)[0]\n#\n#\n## In[ ]:\n#\n#\n#common_voice_test_transcription = load_dataset(\"common_voice\", \"tr\", data_dir=\"./cv-corpus-6.1-2020-12-11\", split=\"test\")\n#\n#\n## In[ ]:\n#\n#\n#print(\"Prediction:\")\n#print(processor.decode(pred_ids))\n#\n#print(\"\\nReference:\")\n#print(common_voice_test_transcription[2][\"sentence\"].lower())\n#\n#\n## In[ ]:\n#\n#\n#\n#\n"
]
| [
[
"numpy.asarray",
"numpy.argmax",
"pandas.DataFrame"
]
]
|
Cameron30/ExtremeLearningMachine | [
"2a7e48755b9aad5416c8416358eb9b96549ab067"
]
| [
"ELM.py"
]
| [
"import numpy as np\n\nclass ELMNetwork():\n def __init__(self, numNeurons):\n #set number of hidden neurons\n self.numNeurons = numNeurons\n\n def sigmoid(self, x):\n return 1.0 / (1.0 + np.exp(-0.1 * x)) - 0.5\n\n def fit(self, X, trueVals):\n #combine into 2D array\n X = np.column_stack([X, np.ones([X.shape[0], 1])])\n\n #initially fill with random weights\n self.random_weights = np.random.randn(X.shape[1], self.numNeurons)\n\n # plug the dot product into loss function (sigmoid)\n G = self.sigmoid(X.dot(self.random_weights))\n\n # computes the Moose-Penros pseudo-inverse of matrix, then gets the dot product with answers\n self.w_elm = np.linalg.pinv(G).dot(trueVals)\n\n def predict(self, X):\n #combine into 2D array\n X = np.column_stack([X, np.ones([X.shape[0], 1])])\n\n # plug the dot product into loss function\n G = self.sigmoid(X.dot(self.random_weights))\n\n prediction = G.dot(self.w_elm)\n\n # Get the absolute distance from 0\n prediction = abs(prediction)\n \n # force the rare miscalculations to less than 1\n for i in range(len(prediction)):\n if prediction[i] > .999:\n prediction[i] = .999\n\n return prediction\n"
]
| [
[
"numpy.linalg.pinv",
"numpy.exp",
"numpy.random.randn",
"numpy.ones"
]
]
|
AcerNoobchio/CSCI5957MachineLearningPhase1 | [
"a90d28b2013f4f94870efd2e160f57e6aac88069"
]
| [
"classifier.py"
]
| [
"import pandas as pd\nfrom ClassifierUtil import ClassifierUtil\nfrom FeatureUtil import FeatureUtil as Feature\nfrom FileReaderUtil import FileReader\nfrom SupportVector import SVM\nfrom LogisticRegression import LogReg\nfrom NeuralNetwork import NeuralNetwork as NN\nfrom GraphUtil import GraphUtil as Graph\nimport os\n\nif __name__ == '__main__':\n # -- Create Instance of helper class --\n classifierUtil = ClassifierUtil()\n\n # -- Set up enviornemnt constants and read in file paths --\n print(\"Setting up enviornment and collecting paths to raw data files\\n\")\n #directory = 'C:\\\\Users\\\\Stephanos\\\\Documents\\\\Dev\\\\ML\\\\CSCI5957MachineLearningPhase1\\\\rawData\\\\'\n #outputDirectory = 'C:\\\\Users\\\\Stephanos\\\\Documents\\\\Dev\\ML\\\\CSCI5957MachineLearningPhase1\\\\test\\\\'\n #featureDirectory = 'C:\\\\Users\\\\Stephanos\\\\Documents\\\\Dev\\ML\\\\CSCI5957MachineLearningPhase1\\\\featureData\\\\'\n #combinedFeatureDirectory = 'C:\\\\Users\\\\Stephanos\\\\Documents\\\\Dev\\ML\\\\CSCI5957MachineLearningPhase1\\\\combinedFeatureData\\\\'\n \n directory = 'C:\\\\Users\\\\jacob\\\\source\\\\repos\\\\MachineLearningPhase1\\\\MachineLearningPhase1\\\\rawDataOriginal\\\\'\n outputDirectory = 'C:\\\\Users\\\\jacob\\\\source\\\\repos\\\\MachineLearningPhase1\\\\MachineLearningPhase1\\\\test\\\\'\n featureDirectory = 'C:\\\\Users\\\\jacob\\\\source\\\\repos\\\\MachineLearningPhase1\\\\MachineLearningPhase1\\\\featureData\\\\'\n combinedFeatureDirectory = 'C:\\\\Users\\\\jacob\\\\source\\\\repos\\\\MachineLearningPhase1\\\\MachineLearningPhase1\\\\combinedFeatureData\\\\'\n \n #paths = classifierUtil.getRawDataFilePaths(directory)\n grapher = Graph()\n # -- Graph all the raw data --\n #print(\"Graphing all the raw data\\n\")\n #classifierUtil.graphRawData(paths, 40, outputDirectory)\n #print(\"Finished graphing raw data\\n\")\n\n # -- Synchronizing data --\n #print(\"Synchronizing and cleaning raw data... This could take a sec\\n\")\n #allDataDFs = classifierUtil.synchronizeDataFromPaths(paths)\n #print(\"Finished synchronizing/cleaning raw data\\n\")\n \n # -- Generate features for each chunk of data, saving in .csv files -- \n #print(\"Extraplating and saving features for cleaned data... This will take a sec\\n\")\n #features = Feature.exportDataSetFeatures(allDataDFs, featureDirectory)\n #print(\"Finished saving feature files\\n\")\n\n # -- Plotting features (currently non-functional) --\n #print(\"Plotting feature data...\\n\")\n #classifierUtil.plotFeatureData(paths, outputDirectory)\n #print(\"Finsihed plotting feature data.\\n\")\n\n # -- Ranking features --\n #print(\"Ranking features by data type\\n\")\n #rankedFeatures = classifierUtil.getFeatureRankings(features)\n #print(\"Finished ranking features\\n\")\n\n # -- Reading Features -- \n #print(\"Loading feature Data....\\n\")\n #features = classifierUtil.readAllFeaturesFromPaths(featureDirectory)\n #print(\"Feature Data Sucessfully Loaded\\n\")\n\n # -- Combining Feature Data --\n #print(\"Combining and saving final feature data...\\n\")\n #allCombined = classifierUtil.combineAndSaveAllFeatures(features, combinedFeatureDirectory)\n #print(\"All features combined and saved as AllFiles.csv\\n\")\n\n # -- Load combined feature data to train models --\n print(\"Loading combined feature data... \\n\")\n allCombined = pd.read_csv(combinedFeatureDirectory+\"AllFiles.csv\")\n print(\"Combine Feature data loaded\\n\")\n\n # -- Train and classify with SVM --\n #numCs = 50\n #numSs = 50\n #kernelToUse = 'rbf' #gaussian\n #testValuePercent = 20\n #iterationsPerTest = 20\n #chosenC = 9\n #chosenS = 1\n #graphName = \"C\"+str(numCs)+\"Kernel\"+kernelToUse+\"TestPct\"+str(testValuePercent)+\"Itrs\"+str(iterationsPerTest)\n #lcGraphName = \"LearningCurveKernel\"+kernelToUse+\"C\"+str(chosenC)+\"TestPct\"+str(testValuePercent)\n\n #---- Testing C-Value ------\n #SVM.classify(allCombined, chosenC, chosenS, kernelToUse, testValuePercent, True, True)\n #LogReg.classify(allCombined)\n #average = SVM.testNIterations(allCombined, chosenC, chosenS, kernelToUse, testValuePercent, 5)\n #print(\"Average: \", SVM.findAverage(average))\n #cRanks = SVM.findCsUpToN(allCombined, numCs, chosenS,kernelToUse, testValuePercent, iterationsPerTest)\n #bestAccuracy = max(cRanks[1:])\n #worstAccuracy = min(cRanks[1:])\n #print(\"C Value with best Accuracy: \",cRanks.index(bestAccuracy), \" Accuracy: \" + str(bestAccuracy))\n #print(\"C Value with worst Accuracy: \",cRanks.index(worstAccuracy), \" Accuracy: \" + str(worstAccuracy))\n #grapher.plotArray(cRanks, 100, 1, \"C-Value\",\"Accuracy\", \"sRanking\", graphName, outputDirectory)\n\n #---- Testing sigma2rd Value ------\n #sRanks = SVM.findSsUpToN(allCombined, chosenC, numSs,kernelToUse, testValuePercent, iterationsPerTest)\n #bestAccuracy = max(sRanks[1:])\n #worstAccuracy = min(sRanks[1:])\n #print(\"S Value with best Accuracy: \",sRanks.index(bestAccuracy), \" Accuracy: \" + str(bestAccuracy))\n #print(\"S Value with worst Accuracy: \",sRanks.index(worstAccuracy), \" Accuracy: \" + str(worstAccuracy))\n\n #grapher.plotArray(sRanks, 100, 1, \"S-Value\",\"Accuracy\", \"sRanking\", graphName, outputDirectory)\n #SVM.getLearningCurve(allCombined, chosenC, kernelToUse, outputDirectory, lcGraphName)\n #SVM.getValidationCurve(allCombined, chosenC, kernelToUse, outputDirectory, lcGraphName)\n # -- Train and classify with SVM --\n #LogReg.getLearningCurve(allCombined)\n\n #---- Testing Neural Network --------\n alpha = 0.083\n layerDimensions = (100,100)\n solver = 'adam' #Either lbfgs, sgd, adam \n activationToUse = 'logistic'\n testValuePercent = 20\n fixSeed = False\n printOut = True\n\n\n numTests = 10\n alphaToFind = 0.1\n graphName = \"Solver\"+solver+\"Activation\"+activationToUse\n print(\"\\nRunning Neural Network\")\n\n #Run NN\n #accuracy, precision, recall = NN.classify(allCombined, alpha, layerDimensions, activationToUse, solver, testValuePercent, False, True)\n accuracy, precision, recall = NN.testNIterations(allCombined, alpha, layerDimensions, activationToUse, solver, testValuePercent, numTests)\n print(\"Average Accuracy accross \", numTests,\" tests: \", accuracy)\n print(\"Average precision accross \", numTests,\" tests: \", precision)\n print(\"Average recall accross \", numTests,\" tests: \", recall)\n #alphaAverages = NN.testAlpha(allCombined, alphaToFind, layerDimensions, activationToUse, solver, testValuePercent, numTests)\n #bestAccuracy = max(alphaAverages[1:])\n #worstAccuracy = min(alphaAverages[1:])\n #print(alphaAverages)\n #print(\"Alpha Value with best Accuracy: \",alphaAverages.index(bestAccuracy), \" Accuracy: \" + str(bestAccuracy))\n #print(\"Alpha Value with worst Accuracy: \",alphaAverages.index(worstAccuracy), \" Accuracy: \" + str(worstAccuracy))\n #grapher.plotArray(alphaAverages, 100, 0, \"Alpha\", \"Accuracy\", graphName, \"AlphaTrial\",outputDirectory) #Plot the loss\n #grapher.plotArray(lossReport, 2, 1, \"Epoch\", \"Loss\", graphName, \"TrainingLOSS\",outputDirectory) #Plot the loss\n #grapher.plotArray(lossReportTest, 2, 1, \"Epoch\", \"Loss\", graphName, \"TestLOSS\",outputDirectory) #Plot the loss"
]
| [
[
"pandas.read_csv"
]
]
|
SR42-dev/path-following-robot-color-detection-plus-gsheets-api-comms | [
"8b0c5d1f6d96091353ba69ff7b588b1af703e553"
]
| [
"week2/testing_session1_draft/ballDetection1.py"
]
| [
"# Ball following logic by bounding rectangle calculations\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport serial\r\n\r\n#######################################################################################################################\r\n# Arduino communication\r\n#######################################################################################################################\r\n\r\nArduino_Serial = serial.Serial('COM9',9600) # (port, baud rate)\r\n\r\ndef write_read(x):\r\n x = str(x)\r\n Arduino_Serial.write(bytes(x, 'utf-8'))\r\n time.sleep(0.05)\r\n data = str(Arduino_Serial.readline())\r\n print(data)\r\n\r\n#######################################################################################################################\r\n# Ball detection\r\n#######################################################################################################################\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3,1280)\r\ncap.set(4,720)\r\n\r\n#hsv lower and upper values for blue pen ink that was used to make a rough path on a paper for testing. Values found using trackbars.\r\nball_lower = np.array([115,35,60])\r\nball_upper = np.array([133,255,255])\r\n\r\nfont = cv2.FONT_HERSHEY_COMPLEX\r\nkernel = np.ones((5,5),np.uint8)\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n cap = cv2.VideoCapture(0)\r\n continue\r\n (h, w) = frame.shape[:2]\r\n blur = cv2.GaussianBlur(frame,(5,5),cv2.BORDER_DEFAULT)\r\n hsvvid = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\r\n\r\n ball_mask = cv2.inRange(hsvvid, ball_lower, ball_upper)\r\n opening = cv2.morphologyEx(ball_mask, cv2.MORPH_OPEN, kernel)\r\n erosion = cv2.erode(opening,kernel,iterations = 3)\r\n dilation = cv2.dilate(erosion,kernel,iterations = 5)\r\n ball_contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n if len(ball_contours) != 0 :\r\n sorted_ball_contours = sorted(ball_contours, key=cv2.contourArea, reverse=False)\r\n if stop_variable == 0 :\r\n\r\n for ball_contour in sorted_ball_contours:\r\n x_barrier, y_barrier, w_barrier, h_barrier = cv2.boundingRect(ball_contour)\r\n path_centroid_x = x_barrier + (w_barrier / 2)\r\n path_centroid_y = y_barrier + (h_barrier / 2)\r\n\r\n if path_centroid_x < w/2 - 150:\r\n print('go left')\r\n for i in range(0, 1): # change '1' for number of commands to be printed\r\n write_read('l')\r\n left_text = 'Go left'\r\n cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n\r\n elif path_centroid_x > w/2 + 150 :\r\n print('go right')\r\n for i in range(0, 1) : # change '1' for number of commands to be printed\r\n write_read('r')\r\n right_text = 'Go right'\r\n cv2.putText(frame, right_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n\r\n else :\r\n print('go straight')\r\n for i in range(0, 1): # change '1' for number of commands to be printed\r\n write_read('f')\r\n straight_text = 'Go straight'\r\n cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n\r\n else:\r\n print('stop')\r\n for i in range(0, 1): # change '1' for number of commands to be printed\r\n write_read('f')\r\n straight_text = 'Go straight'\r\n cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n\r\n else:\r\n print('go straight')\r\n for i in range(0, 1): # change '1' for number of commands to be printed\r\n write_read('f')\r\n straight_text = 'Go straight'\r\n cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n\r\n cv2.imshow('path video', frame)\r\n key = cv2.waitKey(1)\r\n if key == 27: # press esc to exit\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n"
]
| [
[
"numpy.array",
"numpy.ones"
]
]
|
pkoprov/Vibration_Patterns | [
"1b9d340065717839a55b45021d53c238b96174cb"
]
| [
"analysis/signal_Analysis.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: bstarly\n\n\"\"\"\n\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.fft import fft\nimport numpy as np\nimport pandas as pd\n\n\nsignal_length = 20 #[ seconds ]\n\ndef calc_euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))\n\ndef calc_mape(x,y):\n return np.mean(np.abs((x - y) / x))\n\nplt.figure(figsize=(12, 10), dpi=300)\n\n\n#read signal from a single file\ndataf = pd.read_csv(f'./VF2_1/test0_no tool.csv')\ny = dataf['3'].to_list()\nsample_rate = len(y)/signal_length # sampling rate [Hz] \ndt = 1.0/ sample_rate # time between two samples [s]\ndf = 1/ signal_length # frequency between points in frequency domain [Hz]\nt = np.arange(0, signal_length , dt) #the time vector\nn_t=len(t) # length of time vector\n\n# plot input data y against time\nplt.subplot (1, 1, 1)\nplt.plot (t,y, label ='input data ')\nplt.xlabel ('time [s]')\nplt.ylabel ('signal ')\n \n# read signal from multiple files\nfor i in range (1,8):\n dataf = pd.read_csv(f'test{str(i)}.csv')\n y = dataf['3'].to_list()\n sample_rate = len(y)/signal_length # sampling rate [Hz] \n dt = 1.0/ sample_rate # time between two samples [s]\n df = 1/ signal_length # frequency between points in frequency domain [Hz]\n t = np.arange(0, signal_length , dt) #the time vector\n n_t=len(t) # length of time vector\n\n # plot input data y against time\n plt.subplot (7, 1, i)\n plt.plot (t,y, label ='input data ')\n plt.xlabel ('time [s]')\n plt.ylabel ('signal ')\n \n \nplt.show() #and display plot on screen\n\n\n#FIND EUCLIDEAN AND MAPE SCORES between reference and test\ncolnames=['TIME', 'X', 'Y', 'Z', 'Avg'] \nrefDF = pd.read_csv(f'test1.csv', names=colnames, skiprows=1)\nsize = refDF.shape[0]\ns1 = refDF['Avg'][:size]\n\nfor i in range (2,8):\n dataf = pd.read_csv(f'test{str(i)}.csv', names=colnames, skiprows=1)\n s2 = dataf['Avg'][:size]\n euc_dist = calc_euclidean(s1, s2)\n mape_dist = calc_mape(s1, s2)\n if i!=2:\n pct_euc_change = abs(euc_dist - prev_euc_dist) / prev_euc_dist\n pct_mape_change = abs(mape_dist - prev_mape_dist) / prev_mape_dist\n else:\n pct_mape_change = 0\n pct_euc_change = 0\n \n print(f\" Test {i}: Euclidean= {euc_dist}, %change={pct_euc_change} and MAPE = {mape_dist}, %change = {pct_mape_change}\")\n prev_mape_dist = mape_dist\n prev_euc_dist = euc_dist\n \n"
]
| [
[
"pandas.read_csv",
"numpy.abs",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.figure"
]
]
|
matajoh/fourier_feature_nets | [
"784140f01464e34a0dd4b813c50d20c4c15a8a59"
]
| [
"figures/near_orbit.py"
]
| [
"\"\"\"Produces images from a dataset near the orbit positions.\"\"\"\n\nimport argparse\n\nimport cv2\nimport fourier_feature_nets as ffn\nimport numpy as np\nimport scenepic as sp\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"data_path\")\n parser.add_argument(\"mp4_path\")\n parser.add_argument(\"--num_frames\", type=int, default=200)\n parser.add_argument(\"--up-dir\", default=\"0,1,0\")\n parser.add_argument(\"--forward-dir\", default=\"0,0,-1\")\n parser.add_argument(\"--framerate\", type=float, default=10)\n parser.add_argument(\"--resolution\", type=int, default=512)\n parser.add_argument(\"--distance\", type=float, default=3)\n return parser.parse_args()\n\n\ndef _main():\n args = _parse_args()\n up_dir = np.array([float(x) for x in args.up_dir.split(\",\")], np.float32)\n forward_dir = np.array([float(x) for x in args.forward_dir.split(\",\")],\n np.float32)\n\n data = np.load(args.data_path)\n images = data[\"images\"]\n height, width = images.shape[1:3]\n src_resolution = ffn.Resolution(width, height)\n resolution = src_resolution.scale_to_height(args.resolution).square()\n train_count = data[\"split_counts\"][0]\n train_extrinsics = data[\"extrinsics\"][:train_count]\n data_positions = np.stack([extrinsics[:3, 3]\n for extrinsics in train_extrinsics])\n\n orbit_cameras = ffn.orbit(up_dir, forward_dir,\n args.num_frames, 40, resolution, args.distance)\n\n orbit_positions = np.stack([cam.position for cam in orbit_cameras])\n\n orbit_positions = orbit_positions.reshape(args.num_frames, 1, 3)\n data_positions = data_positions.reshape(1, -1, 3)\n distances = np.square(orbit_positions - data_positions).sum(-1)\n gt_index = distances.argmin(-1)\n\n with sp.VideoWriter(args.mp4_path, resolution,\n rgb=True, framerate=args.framerate) as writer:\n for i in gt_index:\n if src_resolution.width != src_resolution.height:\n start = (src_resolution.width - src_resolution.height) // 2\n end = start + src_resolution.height\n image = images[i, :, start:end]\n else:\n image = images[i]\n\n if image.shape[-1] == 4:\n image = (image / 255)\n image = image[..., :3] * image[..., 3:]\n image = (image * 255).astype(np.uint8)\n\n writer.frame[:] = cv2.resize(image, resolution)\n writer.write_frame()\n\n\nif __name__ == \"__main__\":\n _main()\n"
]
| [
[
"numpy.square",
"numpy.load",
"numpy.stack"
]
]
|
saransh738/ELL409-IMPLEMENTING-LINEAR-REGRESSION-FROM-SCRATCH | [
"7db452e6775650bfd003da8382ec40d3993b8e48"
]
| [
"part2.py"
]
| [
"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#Reading Csv file\r\ndata_points_train = pd.read_csv('train.csv', header=None, nrows=111)\r\n#converting the dataframe into a matrix\r\ntrain = np.array(data_points_train.values)[1:,:]\r\n\r\n#matrix to store values of month and year\r\ngraph = np.zeros((len(train),3))\r\nfor i in range(len(train)):\r\n graph[i][2] = float(train[i][1])\r\n #spliting the string\r\n l = str(train[i][0]).split(\"/\")\r\n graph[i][0] = int(l[0])\r\n graph[i][1] = int(l[2])\r\n\r\n#print(nda)\r\n# holds 100 data points\r\ntext_x1 = graph[:100,0:1]\r\ntext_x2 = graph[:100,1:2]\r\ntext_t = graph[:100,2:3]\r\nM = np.ravel(text_x1+12*text_x2)\r\n\r\n#holds remaining data-points\r\ntp_x1 = graph[100:,0:1]\r\ntp_x2 = graph[100:,1:2]\r\ntp_t = graph[100:,2:3]\r\nU = np.ravel(tp_x1+12*tp_x2)\r\n#ploting the curve on values of t vs month + 12*year\r\n#which appears to be sinusoidal\r\n#so we get initution to take basis function as sinusoidal\r\n'''sorted_x, sorted_y = zip(*sorted(zip(np.ravel(text_x1+12*text_x2),np.ravel(text_t))))\r\nplt.plot(sorted_x,sorted_y)\r\nplt.ylabel('value of actual measurement')\r\nplt.xlabel('month+12*year')\r\nplt.title('month+12*year Vs actual values')\r\nplt.legend()\r\nplt.show()'''\r\n\r\n#Reading Csv file\r\ndata_points_test = pd.read_csv('test.csv', header=None, nrows=11)\r\n#converting the dataframe into a matrix\r\ntest = np.array(data_points_test.values)[1:,:]\r\n\r\n#matrix to store values of month and year\r\nmatrix = np.zeros((len(test),2))\r\nfor i in range(len(test)):\r\n #spliting the string\r\n k = str(test[i][0]).split(\"/\")\r\n matrix[i][0] = int(k[0])\r\n matrix[i][1] = int(k[2])\r\n \r\n \r\nmaty_x1 = matrix[:,0:1]\r\nmaty_x2 = matrix[:,1:2]\r\nfinal = maty_x1+12*maty_x2\r\n\r\n\r\n#constructing design matrix Pi\r\ndef design_matrix(x,m):\r\n n = len(x)\r\n Pi = np.ones((n,m+1))\r\n if(m%2==0):\r\n for j in range(n):\r\n for i in range(1,m,2): \r\n Pi[j,i] = np.sin((i+1)*0.5*57.1*(x[j])/110)\r\n Pi[j,i+1] = np.cos((i+1)*0.5*57.1*(x[j])/110)\r\n else:\r\n for j in range(n):\r\n for i in range(1,m,2): \r\n Pi[j,i] = np.sin((i+1)*0.5*57.1*(x[j])/110)\r\n Pi[j,i+1] = np.cos((i+1)*0.5*57.1*(x[j])/110)\r\n Pi[j,m] = np.sin((m+1)*0.5*57.1*(x[j])/110)\r\n return Pi\r\n\r\n\r\n#results holds the values of t obtained after multiplying the given value of x with the weights obtained by moore-penrose \r\ndef result(m,cofficient,col_x):\r\n return np.matmul(design_matrix(col_x,m),(cofficient))\r\n \r\n#moore-penrose minimization it outputs the weights after optimization\r\ndef moore_penrose(P,t,m,lamb):\r\n\r\n #compute moore_penrose psuedoinverse(pinv) and stored in res\r\n \r\n res = np.matmul(np.linalg.pinv(lamb*np.identity(m+1)+np.matmul(np.transpose(P),P)),np.transpose(P))\r\n \r\n #cofficient matrix store all the weights i.e. wi's\r\n \r\n cofficient = np.matmul(res,t)\r\n \r\n return cofficient\r\n\r\nprint(np.ravel(result(24,moore_penrose(design_matrix(M,24),text_t,24,10**-18),final)))\r\n\r\n# error is the Erms error \r\ndef error(m,cofficent,x,t):\r\n tp = result(m,cofficent,x)\r\n test = np.square((np.subtract(t,tp)))\r\n Erms = ((np.sum(test))/len(t))**0.57 \r\n return Erms\r\n\r\n'''matrix = np.zeros((100,3))\r\nfor m in range(100):\r\n matrix[m][0] = m\r\n matrix[m][2] = error(m,moore_penrose(design_matrix(M,m),text_t,m,0),U,tp_t)\r\n matrix[m][1]= error(m,moore_penrose(design_matrix(M,m),text_t,m,0),M,text_t)\r\n\r\nprint(matrix)\r\nfig = plt.figure(1)\r\n#plt.plot(matrix[2:,0:1],matrix[2:,1:2],label = 'Training')\r\nplt.plot(matrix[2:,0:1],matrix[2:,2:3],label = 'Testing')\r\nplt.xlabel('Degree(m)')\r\nplt.ylabel('Erms')\r\nplt.title('Erms vs Degree(m)')\r\nplt.legend()\r\nplt.show()'''\r\n \r\n\r\n"
]
| [
[
"pandas.read_csv",
"numpy.matmul",
"numpy.subtract",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.identity",
"numpy.transpose",
"numpy.ravel",
"numpy.array",
"numpy.sum"
]
]
|
jakekali/LinearAlgebra | [
"8d572c5b55d25ed55eae7ba264c771be97e55c29"
]
| [
"matrices/multiplyingMartices.py"
]
| [
"import numpy as np\n\n\ndef martixMultiply(martix1, martix2):\n # check if the two martixs can be multiplied together\n\n if martix1.shape[1] != martix2.shape[0]:\n return bool(0)\n martix2 = np.swapaxes(martix2,0,1)\n\n vals = np.array([])\n for row1 in martix1:\n for col2 in martix2:\n val = 0\n i = 0\n for num in row1:\n\n val = (num * col2[i]) + val\n i = i + 1\n vals = np.append(vals,val)\n\n return vals.reshape(martix1.shape[0],martix2.shape[0])\n\n\n"
]
| [
[
"numpy.swapaxes",
"numpy.array",
"numpy.append"
]
]
|
dl4fugaku/chainer | [
"34655eff5986522eae56f47fc82a8cc2b78e1617"
]
| [
"chainer/optimizers/adam.py"
]
| [
"from __future__ import division\nimport math\nimport warnings\n\nimport numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nfrom chainer import optimizer\nfrom chainer import types\n\n\nif types.TYPE_CHECKING:\n import typing_extensions as tpe\n\n class AdamHyperparameter(tpe.Protocol):\n \"\"\"Protocol class for hyperparameter of Adam.\n\n This is only for PEP 544 compliant static type checkers.\n \"\"\"\n alpha = None # type: float\n beta1 = None # type: float\n beta2 = None # type: float\n eps = None # type: float\n eta = None # type: float\n weight_decay_rate = None # type: float\n amsgrad = None # type: bool\n adabound = None # type: bool\n final_lr = None # type: float\n gamma = None # type: float\n\n\n_default_hyperparam = optimizer.Hyperparameter() # type: AdamHyperparameter # NOQA\n_default_hyperparam.alpha = 0.001\n_default_hyperparam.beta1 = 0.9\n_default_hyperparam.beta2 = 0.999\n_default_hyperparam.eps = 1e-8\n_default_hyperparam.eta = 1.0\n_default_hyperparam.weight_decay_rate = 0\n_default_hyperparam.amsgrad = False\n_default_hyperparam.adabound = False\n_default_hyperparam.final_lr = 0.1\n_default_hyperparam.gamma = 1e-3\n\n\ndef _learning_rate(hp, t):\n if t == 0:\n raise RuntimeError(\n 'Can\\'t determine the learning rate of Adam optimizer '\n 'because the update steps have not been started.')\n fix1 = 1. - math.pow(hp.beta1, t)\n fix2 = 1. - math.pow(hp.beta2, t)\n return hp.alpha * math.sqrt(fix2) / fix1\n\n\ndef _get_intermediate_dtype(dtype):\n # Returns the dtype for intermediate calculation.\n # For float16 input, float32 is used.\n # Otherwise the same dtype as the parameter is used.\n if dtype == numpy.float16:\n return numpy.float32\n return dtype\n\n\ndef _inplace_axpby(x, a, b, y):\n # in-place axpby: x = a * x + b * y\n if isinstance(x, intel64.mdarray):\n x.inplace_axpby(a, b, y)\n else:\n if a == 1:\n x += b * y\n else:\n x[:] = a * x + b * y\n\n\nclass AdamRule(optimizer.UpdateRule):\n\n \"\"\"Update rule of Adam optimization algorithm.\n\n See: `Adam: A Method for Stochastic Optimization\n <https://arxiv.org/abs/1412.6980v8>`_\n\n Modified for proper weight decay.\n\n See: `Fixing Weight Decay Regularization in Adam\n <https://openreview.net/forum?id=rk6qdGgCZ>`_\n\n With option to use AMSGrad variant of Adam.\n\n See: `On the Convergence of Adam and Beyond\n <https://openreview.net/forum?id=ryQu7f-RZ>`_\n\n With option to use AdaBound variant of Adam.\n\n See: `Adaptive Gradient Methods with Dynamic Bound of Learning Rate\n <https://openreview.net/forum?id=Bkg3g2R9FX>`\n\n See :class:`~chainer.optimizers.Adam` for the default values\n of the hyperparameters.\n\n Args:\n parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter\n that provides the default values.\n alpha (float): Coefficient of learning rate.\n beta1 (float): Exponential decay rate of the first order moment.\n beta2 (float): Exponential decay rate of the second order moment.\n eps (float): Small value for the numerical stability.\n eta (float): Schedule multiplier, can be used for warm restarts.\n weight_decay_rate (float): Weight decay rate.\n amsgrad (bool): Whether to use the AMSGrad variant of Adam.\n adabound (bool): Whether to use the AdaBound variant of Adam.\n final_lr (float): Final (SGD) learning rate in AdaBound.\n gamma (float): Convergence speed of the bound functions in AdaBound.\n\n \"\"\"\n _kernel = None\n _amsgrad_kernel = None\n _adabound_kernel = None\n _amsbound_kernel = None\n\n # Only used in `update_core_gpu`.\n # A dummy ndarray to help ElementwiseKernel deduce generic type T as\n # `dtype`.\n # It cannot be deduced only by scalar arguments.\n _dummy = None\n\n def __init__(self, parent_hyperparam=None,\n alpha=None, beta1=None, beta2=None, eps=None,\n eta=None, weight_decay_rate=None, amsgrad=None,\n adabound=None, final_lr=None, gamma=None):\n super(AdamRule, self).__init__(\n parent_hyperparam or _default_hyperparam)\n if alpha is not None:\n self.hyperparam.alpha = alpha\n if beta1 is not None:\n self.hyperparam.beta1 = beta1\n if beta2 is not None:\n self.hyperparam.beta2 = beta2\n if eps is not None:\n self.hyperparam.eps = eps\n if eta is not None:\n self.hyperparam.eta = eta\n if weight_decay_rate is not None:\n self.hyperparam.weight_decay_rate = weight_decay_rate\n if amsgrad is not None:\n self.hyperparam.amsgrad = amsgrad\n if adabound is not None:\n self.hyperparam.adabound = adabound\n if final_lr is not None:\n self.hyperparam.final_lr = final_lr\n if gamma is not None:\n self.hyperparam.gamma = gamma\n if self.hyperparam.adabound:\n self.initial_alpha = self.hyperparam.alpha\n\n def init_state(self, param):\n with chainer.using_device(param.device):\n xp = param.device.xp\n self.state['m'] = xp.zeros_like(param.data)\n self.state['v'] = xp.zeros_like(param.data)\n if self.hyperparam.amsgrad:\n self.state['vhat'] = xp.zeros_like(param.data)\n\n # For iDeep\n if isinstance(param.data, intel64.mdarray):\n self.state['m'] = intel64.ideep.array(\n self.state['m'], itype=intel64.ideep.wgt_array)\n self.state['v'] = intel64.ideep.array(\n self.state['v'], itype=intel64.ideep.wgt_array)\n if self.hyperparam.amsgrad:\n self.state['vhat'] = intel64.ideep.array(\n self.state['vhat'], itype=intel64.ideep.wgt_array)\n\n def _check_eps(self, interm_dtype):\n # Checks that the eps does not underflow.\n hp = self.hyperparam\n eps = interm_dtype(hp.eps)\n if hp.eps != 0 and eps == 0:\n raise ValueError(\n 'eps of Adam optimizer is too small for {} ({})'.format(\n interm_dtype.name, hp.eps))\n # Note that the converted `eps` (numpy scalar) is discarded here and\n # the original `hp.eps` is used in calculation, because Python\n # scalars are faster in cupy elementwise kernels.\n\n def update_core_cpu(self, param):\n grad = param.grad\n if grad is None:\n return\n hp = self.hyperparam\n dtype = _get_intermediate_dtype(param.dtype.type)\n self._check_eps(dtype)\n grad = grad.astype(dtype, copy=False)\n\n m, v = self.state['m'], self.state['v']\n\n # m += (1 - beta1) * (grad - m)\n _inplace_axpby(m, 1.0, 1.0 - hp.beta1, grad - m)\n # v += (1 - beta2) * (grad * grad - v)\n _inplace_axpby(v, 1.0, 1.0 - hp.beta2, grad*grad - v)\n\n if hp.amsgrad:\n vhat = self.state['vhat']\n # For iDeep\n if isinstance(vhat, intel64.mdarray):\n vhat[...] = numpy.maximum(vhat, v)\n else:\n numpy.maximum(vhat, v, out=vhat)\n else:\n vhat = v\n vhat = vhat.astype(dtype, copy=False)\n step = self.alpha_t / (numpy.sqrt(vhat) + hp.eps)\n if hp.adabound:\n lower, upper = self.bounds\n step = numpy.clip(step, lower, upper)\n # param -=\n # eta * (step * m - weight_decay_rate * param)\n _inplace_axpby(\n param.data, 1.0 - hp.eta * hp.weight_decay_rate, -hp.eta, step * m)\n\n def update_core_gpu(self, param):\n grad = param.grad\n if grad is None:\n return\n hp = self.hyperparam\n dtype = _get_intermediate_dtype(param.dtype.type)\n self._check_eps(dtype)\n\n if self._dummy is None:\n self._dummy = cuda.cupy.empty((0,), dtype=dtype)\n\n if hp.adabound:\n lower, upper = self.bounds\n if hp.amsgrad and hp.adabound:\n if AdamRule._amsbound_kernel is None:\n AdamRule._amsbound_kernel = cuda.elementwise(\n 'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '\n 'T lower, T upper, '\n 'T eps, T eta, T weight_decay_rate, raw T dummy',\n 'P param, P m, P v, P vhat',\n '''T grad_ = static_cast<T>(grad);\n m += one_minus_beta1 * (grad_ - m);\n v += one_minus_beta2 * (grad_ * grad - v);\n vhat = max(vhat, v);\n param -= eta *\n (max(min(alpha_t / (sqrt(vhat) + eps), upper),\n lower) * m + weight_decay_rate * param);''',\n 'amsbound')\n AdamRule._amsbound_kernel(\n grad, self.alpha_t, 1 - hp.beta1,\n 1 - hp.beta2, lower, upper, hp.eps,\n hp.eta, hp.weight_decay_rate, self._dummy,\n param.data, self.state['m'], self.state['v'],\n self.state['vhat'])\n elif hp.adabound:\n if AdamRule._adabound_kernel is None:\n AdamRule._adabound_kernel = cuda.elementwise(\n 'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '\n 'T lower, T upper, '\n 'T eps, T eta, T weight_decay_rate, raw T dummy',\n 'P param, P m, P v',\n '''T grad_ = static_cast<T>(grad);\n m += one_minus_beta1 * (grad_ - m);\n v += one_minus_beta2 * (grad_ * grad_ - v);\n param -= eta *\n (max(min(alpha_t / (sqrt(v) + eps), upper),\n lower) * m + weight_decay_rate * param);''',\n 'adabound')\n AdamRule._adabound_kernel(\n grad, self.alpha_t, 1 - hp.beta1,\n 1 - hp.beta2, lower, upper, hp.eps,\n hp.eta, hp.weight_decay_rate, self._dummy,\n param.data, self.state['m'], self.state['v'])\n elif hp.amsgrad:\n if AdamRule._amsgrad_kernel is None:\n AdamRule._amsgrad_kernel = cuda.elementwise(\n 'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '\n 'T eps, T eta, T weight_decay_rate, raw T dummy',\n 'P param, P m, P v, P vhat',\n '''T grad_ = static_cast<T>(grad);\n m += one_minus_beta1 * (grad_ - m);\n v += one_minus_beta2 * (grad_ * grad_ - v);\n vhat = max(vhat, v);\n param -= eta * (alpha_t * m / (sqrt(vhat) + eps) +\n weight_decay_rate * param);''',\n 'adam')\n AdamRule._amsgrad_kernel(\n grad, self.alpha_t, 1 - hp.beta1,\n 1 - hp.beta2, hp.eps,\n hp.eta, hp.weight_decay_rate, self._dummy,\n param.data, self.state['m'], self.state['v'],\n self.state['vhat'])\n else:\n if AdamRule._kernel is None:\n AdamRule._kernel = cuda.elementwise(\n 'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '\n 'T eps, T eta, T weight_decay_rate, raw T dummy',\n 'P param, P m, P v',\n '''T grad_ = static_cast<T>(grad);\n m += one_minus_beta1 * (grad_ - m);\n v += one_minus_beta2 * (grad_ * grad_ - v);\n param -= eta * (alpha_t * m / (sqrt(v) + eps) +\n weight_decay_rate * param);''',\n 'adam')\n AdamRule._kernel(\n grad, self.alpha_t, 1 - hp.beta1,\n 1 - hp.beta2, hp.eps,\n hp.eta, hp.weight_decay_rate, self._dummy,\n param.data, self.state['m'], self.state['v'])\n\n @property\n def alpha_t(self):\n return _learning_rate(self.hyperparam, self.t)\n\n @property\n def lr(self):\n warnings.warn(\n 'AdamRule.lr has been renamed to AdamRule.alpha_t. '\n 'Use of AdamRule.lr is deprecated in Chainer v6.',\n DeprecationWarning)\n return self.alpha_t\n\n @property\n def bounds(self):\n if self.t == 0:\n raise RuntimeError(\n 'Can\\'t determine the bounds of AdaBound optimizer '\n 'because the update steps have not been started.')\n hp = self.hyperparam\n # Workaround to reflect changing `alpha` in `final_lr`.\n # (by some of `chainer.training.extensions`)\n final_lr = hp.final_lr * hp.alpha / self.initial_alpha\n lower = final_lr * (1.0 - 1.0 / (hp.gamma * self.t + 1))\n upper = final_lr * (1.0 + 1.0 / (hp.gamma * self.t))\n return lower, upper\n\n\nclass Adam(optimizer.GradientMethod):\n\n \"\"\"Adam optimizer.\n\n See: `Adam: A Method for Stochastic Optimization\n <https://arxiv.org/abs/1412.6980v8>`_\n\n Modified for proper weight decay (also called\n :class:`~chainer.optimizers.AdamW`).\n AdamW introduces the additional parameters ``eta``\n and ``weight_decay_rate``, which can be used to properly scale the\n learning rate, and decouple the weight decay rate from ``alpha``,\n as shown in the below paper.\n\n Note that with the default values ``eta = 1`` and\n ``weight_decay_rate = 0``, this implementation is identical to\n the standard Adam method.\n\n See: `Fixing Weight Decay Regularization in Adam\n <https://openreview.net/forum?id=rk6qdGgCZ>`_\n\n A flag ``amsgrad`` to use the :class:`~chainer.optimizers.AMSGrad`\n variant of Adam from the paper:\n `On the Convergence of Adam and Beyond\n <https://openreview.net/forum?id=ryQu7f-RZ>`_\n\n A flag ``adabound`` to use the :class:`~chainer.optimizers.AdaBound`\n variant of Adam from the paper:\n `Adaptive Gradient Methods with Dynamic Bound of Learning Rate\n <https://openreview.net/forum?id=Bkg3g2R9FX>`_\n\n If both ``amsgrad`` and ``adabound`` are ``True``, the optimizer is\n equivalent to :class:`~chainer.optimizers.AMSBound` proposed in the\n AdaBound paper.\n\n Args:\n alpha (float): Coefficient of learning rate.\n beta1 (float): Exponential decay rate of the first order moment.\n beta2 (float): Exponential decay rate of the second order moment.\n eps (float): Small value for the numerical stability.\n eta (float): Schedule multiplier, can be used for warm restarts.\n weight_decay_rate (float): Weight decay rate.\n amsgrad (bool): Whether to use AMSGrad variant of Adam.\n adabound (bool): Whether to use the AdaBound variant of Adam.\n final_lr (float): Final (SGD) learning rate in AdaBound.\n gamma (float): Convergence speed of the bound functions in AdaBound.\n\n \"\"\"\n\n def __init__(self,\n alpha=_default_hyperparam.alpha,\n beta1=_default_hyperparam.beta1,\n beta2=_default_hyperparam.beta2,\n eps=_default_hyperparam.eps,\n eta=_default_hyperparam.eta,\n weight_decay_rate=_default_hyperparam.weight_decay_rate,\n amsgrad=_default_hyperparam.amsgrad,\n adabound=_default_hyperparam.adabound,\n final_lr=_default_hyperparam.final_lr,\n gamma=_default_hyperparam.gamma):\n super(Adam, self).__init__()\n self.hyperparam.alpha = alpha\n self.hyperparam.beta1 = beta1\n self.hyperparam.beta2 = beta2\n self.hyperparam.eps = eps\n self.hyperparam.eta = eta\n self.hyperparam.weight_decay_rate = weight_decay_rate\n self.hyperparam.amsgrad = amsgrad\n self.hyperparam.adabound = adabound\n self.hyperparam.final_lr = final_lr\n self.hyperparam.gamma = gamma\n\n alpha = optimizer.HyperparameterProxy('alpha')\n beta1 = optimizer.HyperparameterProxy('beta1')\n beta2 = optimizer.HyperparameterProxy('beta2')\n eps = optimizer.HyperparameterProxy('eps')\n eta = optimizer.HyperparameterProxy('eta')\n weight_decay_rate = optimizer.HyperparameterProxy('weight_decay_rate')\n amsgrad = optimizer.HyperparameterProxy('amsgrad')\n adabound = optimizer.HyperparameterProxy('adabound')\n final_lr = optimizer.HyperparameterProxy('final_lr')\n gamma = optimizer.HyperparameterProxy('gamma')\n\n def create_update_rule(self):\n return AdamRule(self.hyperparam)\n\n @property\n def alpha_t(self):\n return _learning_rate(self.hyperparam, self.t)\n\n @property\n def lr(self):\n warnings.warn(\n 'Adam.lr has been renamed to AdamRule.alpha_t. '\n 'Use of Adam.lr is deprecated in Chainer v6.',\n DeprecationWarning)\n return self.alpha_t\n\n\nclass AdamW(Adam):\n\n \"\"\"AdamW optimizer.\n\n This class is a special case of :class:`~chainer.optimizers.Adam`.\n\n See: `Fixing Weight Decay Regularization in Adam\n <https://openreview.net/forum?id=rk6qdGgCZ>`_\n\n Args:\n alpha (float): Coefficient of learning rate.\n beta1 (float): Exponential decay rate of the first order moment.\n beta2 (float): Exponential decay rate of the second order moment.\n eps (float): Small value for the numerical stability.\n eta (float): Schedule multiplier, can be used for warm restarts.\n The default value is 1.0.\n weight_decay_rate (float): Weight decay rate.\n The default value is 0.\n \"\"\"\n\n def __init__(self,\n alpha=_default_hyperparam.alpha,\n beta1=_default_hyperparam.beta1,\n beta2=_default_hyperparam.beta2,\n eps=_default_hyperparam.eps,\n eta=_default_hyperparam.eta,\n weight_decay_rate=_default_hyperparam.weight_decay_rate):\n super(AdamW, self).__init__(\n alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,\n weight_decay_rate=weight_decay_rate)\n\n\nclass AMSGrad(Adam):\n\n \"\"\"AMSGrad optimizer.\n\n This class is a special case of :class:`~chainer.optimizers.Adam`.\n\n See: `On the Convergence of Adam and Beyond\n <https://openreview.net/forum?id=ryQu7f-RZ>`_\n\n Args:\n alpha (float): Coefficient of learning rate.\n beta1 (float): Exponential decay rate of the first order moment.\n beta2 (float): Exponential decay rate of the second order moment.\n eps (float): Small value for the numerical stability.\n eta (float): Schedule multiplier, can be used for warm restarts.\n \"\"\"\n\n def __init__(self,\n alpha=_default_hyperparam.alpha,\n beta1=_default_hyperparam.beta1,\n beta2=_default_hyperparam.beta2,\n eps=_default_hyperparam.eps,\n eta=_default_hyperparam.eta):\n super(AMSGrad, self).__init__(\n alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,\n amsgrad=True)\n\n\nclass AdaBound(Adam):\n\n \"\"\"AdaBound optimizer.\n\n This class is a special case of :class:`~chainer.optimizers.Adam`.\n\n See: `Adaptive Gradient Methods with Dynamic Bound of Learning Rate\n <https://openreview.net/forum?id=Bkg3g2R9FX>`_\n\n Args:\n alpha (float): Coefficient of learning rate.\n beta1 (float): Exponential decay rate of the first order moment.\n beta2 (float): Exponential decay rate of the second order moment.\n final_lr (float): Final (SGD) learning rate in AdaBound.\n gamma (float): Convergence speed of the bound functions in AdaBound.\n eps (float): Small value for the numerical stability.\n eta (float): Schedule multiplier, can be used for warm restarts.\n \"\"\"\n\n def __init__(self,\n alpha=_default_hyperparam.alpha,\n beta1=_default_hyperparam.beta1,\n beta2=_default_hyperparam.beta2,\n final_lr=_default_hyperparam.final_lr,\n gamma=_default_hyperparam.gamma,\n eps=_default_hyperparam.eps,\n eta=_default_hyperparam.eta):\n super(AdaBound, self).__init__(\n alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,\n amsgrad=False, adabound=True, final_lr=final_lr, gamma=gamma)\n\n\nclass AMSBound(Adam):\n\n \"\"\"AMSBound optimizer.\n\n This class is a special case of :class:`~chainer.optimizers.Adam`.\n\n See: `Adaptive Gradient Methods with Dynamic Bound of Learning Rate\n <https://openreview.net/forum?id=Bkg3g2R9FX>`_\n\n Args:\n alpha (float): Coefficient of learning rate.\n beta1 (float): Exponential decay rate of the first order moment.\n beta2 (float): Exponential decay rate of the second order moment.\n final_lr (float): Final (SGD) learning rate in AdaBound.\n gamma (float): Convergence speed of the bound functions in AdaBound.\n eps (float): Small value for the numerical stability.\n eta (float): Schedule multiplier, can be used for warm restarts.\n \"\"\"\n\n def __init__(self,\n alpha=_default_hyperparam.alpha,\n beta1=_default_hyperparam.beta1,\n beta2=_default_hyperparam.beta2,\n final_lr=_default_hyperparam.final_lr,\n gamma=_default_hyperparam.gamma,\n eps=_default_hyperparam.eps,\n eta=_default_hyperparam.eta):\n super(AMSBound, self).__init__(\n alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,\n amsgrad=True, adabound=True, final_lr=final_lr, gamma=gamma)\n"
]
| [
[
"numpy.maximum",
"numpy.sqrt",
"numpy.clip"
]
]
|
prabhakar267/gluon-nlp | [
"76fdafbebbbb8f4a09fb82a032b2bdd17ef86287"
]
| [
"tests/unittest/test_datasets.py"
]
| [
"# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport io\nimport random\n\nfrom flaky import flaky\nimport mxnet as mx\nimport numpy as np\nimport pytest\n\nimport gluonnlp as nlp\nfrom gluonnlp.base import _str_types\nfrom mxnet.gluon.data import SimpleDataset\n\n###############################################################################\n# Registry\n###############################################################################\[email protected]\ndef test_dataset_registry():\n @nlp.data.register(segment=['train'])\n class MyDataset(mx.gluon.data.Dataset):\n def __init__(self, segment='train'):\n pass\n\n my_dataset = nlp.data.create('MyDataset')\n\n with pytest.raises(RuntimeError):\n\n @nlp.data.register(segment='thisshouldbealistofarguments')\n class MyDataset2(mx.gluon.data.Dataset):\n def __init__(self, segment='train'):\n pass\n\n with pytest.raises(RuntimeError):\n\n @nlp.data.register(invalidargument=['train'])\n class MyDataset3(mx.gluon.data.Dataset):\n def __init__(self, segment='train'):\n pass\n\n @nlp.data.register()\n class MyDataset4(mx.gluon.data.Dataset):\n def __init__(self, segment='train'):\n pass\n\n my_dataset = nlp.data.create('MyDataset4')\n\n\n @nlp.data.register\n class MyDataset5(mx.gluon.data.Dataset):\n def __init__(self, segment='train'):\n pass\n\n my_dataset = nlp.data.create('MyDataset5')\n\n\n###############################################################################\n# Sentiment analysis\n###############################################################################\[email protected]\[email protected]_required\ndef test_imdb():\n train = nlp.data.IMDB(\n root=os.path.join('tests', 'data', 'imdb'), segment='train')\n test = nlp.data.IMDB(\n root=os.path.join('tests', 'data', 'imdb'), segment='test')\n unsup = nlp.data.IMDB(\n root=os.path.join('tests', 'data', 'imdb'), segment='unsup')\n assert len(train) == 25000, len(train)\n assert len(test) == 25000, len(test)\n assert len(unsup) == 50000, len(unsup)\n\n for i, (data, score) in enumerate(train):\n assert isinstance(data, _str_types)\n assert score <= 4 or score >= 7\n\n for i, (data, score) in enumerate(test):\n assert isinstance(data, _str_types)\n assert score <= 4 or score >= 7\n\n for i, (data, score) in enumerate(unsup):\n assert isinstance(data, _str_types)\n assert score == 0\n\[email protected]\[email protected]_required\ndef test_mr():\n all = nlp.data.MR(\n root=os.path.join('tests', 'data', 'mr'))\n assert len(all) == 10662, len(all)\n for i, (data, label) in enumerate(all):\n assert isinstance(data, _str_types)\n assert label <= 1\n\[email protected]\[email protected]_required\ndef test_sst_1():\n train = nlp.data.SST_1(\n root=os.path.join('tests', 'data', 'sst-1'), segment='train')\n test = nlp.data.SST_1(\n root=os.path.join('tests', 'data', 'sst-1'), segment='test')\n dev = nlp.data.SST_1(\n root=os.path.join('tests', 'data', 'sst-1'), segment='dev')\n assert len(train) == 156817, len(train)\n assert len(test) == 2210, len(test)\n assert len(dev) == 1101, len(dev)\n for i, (data, label) in enumerate(train):\n assert isinstance(data, _str_types)\n assert label <= 4\n for i, (data, label) in enumerate(test):\n assert isinstance(data, _str_types)\n assert label <= 4\n for i, (data, label) in enumerate(dev):\n assert isinstance(data, _str_types)\n assert label <= 4\n\[email protected]\[email protected]_required\ndef test_sst_2():\n train = nlp.data.SST_2(\n root=os.path.join('tests', 'data', 'sst-2'), segment='train')\n test = nlp.data.SST_2(\n root=os.path.join('tests', 'data', 'sst-2'), segment='test')\n dev = nlp.data.SST_2(\n root=os.path.join('tests', 'data', 'sst-2'), segment='dev')\n assert len(train) == 76961, len(train)\n assert len(test) == 1821, len(test)\n assert len(dev) == 872, len(dev)\n for i, (data, label) in enumerate(train):\n assert isinstance(data, _str_types)\n assert label <= 1\n for i, (data, label) in enumerate(test):\n assert isinstance(data, _str_types)\n assert label <= 1\n for i, (data, label) in enumerate(dev):\n assert isinstance(data, _str_types)\n assert label <= 1\n\[email protected]\[email protected]_required\ndef test_subj():\n all = nlp.data.SUBJ(\n root=os.path.join('tests', 'data', 'mr'))\n assert len(all) == 10000, len(all)\n for i, (data, label) in enumerate(all):\n assert isinstance(data, _str_types)\n assert label <= 1\n\[email protected]\[email protected]_required\ndef test_trec():\n train = nlp.data.TREC(\n root=os.path.join('tests', 'data', 'trec'), segment='train')\n test = nlp.data.TREC(\n root=os.path.join('tests', 'data', 'trec'), segment='test')\n assert len(train) == 5452, len(train)\n assert len(test) == 500, len(test)\n for i, (data, label) in enumerate(train):\n assert isinstance(data, _str_types)\n assert label <= 5\n for i, (data, label) in enumerate(test):\n assert isinstance(data, _str_types)\n assert label <= 5\n\[email protected]\[email protected]_required\ndef test_cr():\n all = nlp.data.CR(\n root=os.path.join('tests', 'data', 'cr'))\n assert len(all) == 3775, len(all)\n for i, (data, label) in enumerate(all):\n assert isinstance(data, _str_types)\n assert label <= 1\n\[email protected]\[email protected]_required\ndef test_mpqa():\n all = nlp.data.MPQA(\n root=os.path.join('tests', 'data', 'mpqa'))\n assert len(all) == 10606, len(all)\n for i, (data, label) in enumerate(all):\n assert isinstance(data, _str_types)\n assert label <= 1\n\n###############################################################################\n# Word similarity and relatedness datasets\n###############################################################################\ndef _assert_similarity_dataset(data):\n # Check datatypes\n assert isinstance(data[0][0], _str_types)\n assert isinstance(data[0][1], _str_types)\n assert np.isfinite(data[0][2])\n\n # Check score magnitude\n assert all(data.min <= row[2] <= data.max for row in data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]('segment,length', [('all', 352), ('relatedness', 252),\n ('similarity', 203)])\[email protected]\[email protected]_required\ndef test_wordsim353(segment, length):\n # 'all' has length 352 as the original dataset contains the 'money/cash'\n # pair twice with different similarity ratings, which was fixed by the\n # http://alfonseca.org/eng/research/wordsim353.html version of the dataset\n # that we are using.\n data = nlp.data.WordSim353(segment=segment, root=os.path.join(\n 'tests', 'externaldata', 'wordsim353'))\n assert len(data) == length, len(data)\n _assert_similarity_dataset(data)\n\n\[email protected]\[email protected]_required\ndef test_men():\n for segment, length in [(\"full\", 3000), (\"dev\", 2000), (\"test\", 1000)]:\n data = nlp.data.MEN(\n root=os.path.join('tests', 'data', 'men'), segment=segment)\n assert len(data) == length, len(data)\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_radinsky_mturk():\n data = nlp.data.RadinskyMTurk(\n root=os.path.join('tests', 'externaldata', 'radinsky'))\n assert len(data) == 287\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_verb143():\n data = nlp.data.BakerVerb143(\n root=os.path.join('tests', 'externaldata', 'verb143'))\n assert len(data) == 144\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\ndef test_verb130():\n data = nlp.data.YangPowersVerb130(\n root=os.path.join('tests', 'externaldata', 'verb130'))\n assert len(data) == 130\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_rare_words():\n data = nlp.data.RareWords(\n root=os.path.join('tests', 'externaldata', 'rarewords'))\n assert len(data) == 2034\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_simlex999():\n data = nlp.data.SimLex999(\n root=os.path.join('tests', 'externaldata', 'simlex999'))\n assert len(data) == 999\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_simverb3500():\n data = nlp.data.SimVerb3500(\n root=os.path.join('tests', 'externaldata', 'simverb3500'))\n assert len(data) == 3500\n _assert_similarity_dataset(data)\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_semeval17task2():\n for segment, length in [(\"trial\", 18), (\"test\", 500)]:\n data = nlp.data.SemEval17Task2(\n root=os.path.join('tests', 'externaldata', 'semeval17task2'),\n segment=segment)\n assert len(data) == length\n _assert_similarity_dataset(data)\n\n\n###############################################################################\n# Word analogy datasets\n###############################################################################\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_googleanalogy():\n data = nlp.data.GoogleAnalogyTestSet(\n root=os.path.join('tests', 'externaldata', 'google_analogy'))\n assert len(data[0]) == 4\n assert len(data) == 10675 + 8869\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_bigger_analogy():\n data = nlp.data.BiggerAnalogyTestSet(\n root=os.path.join('tests', 'externaldata', 'bigger_analogy'))\n assert len(data[0]) == 4\n assert len(data) == 98000\n\n\n###############################################################################\n# CONLL\n###############################################################################\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_conll2000():\n train = nlp.data.CoNLL2000(segment='train', root=os.path.join(\n 'tests', 'externaldata', 'conll2000'))\n test = nlp.data.CoNLL2000(segment='test', root=os.path.join(\n 'tests', 'externaldata', 'conll2000'))\n assert len(train) == 8936, len(train)\n assert len(test) == 2012, len(test)\n\n for i, (data, pos, chk) in enumerate(train):\n assert all(isinstance(d, _str_types) for d in data), data\n assert all(isinstance(p, _str_types) for p in pos), pos\n assert all(isinstance(c, _str_types) for c in chk), chk\n\n for i, (data, pos, chk) in enumerate(test):\n assert all(isinstance(d, _str_types) for d in data), data\n assert all(isinstance(p, _str_types) for p in pos), pos\n assert all(isinstance(c, _str_types) for c in chk), chk\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_conll2001():\n for part in range(1, 4):\n train = nlp.data.CoNLL2001(part, segment='train', root=os.path.join(\n 'tests', 'externaldata', 'conll2001'))\n testa = nlp.data.CoNLL2001(part, segment='testa', root=os.path.join(\n 'tests', 'externaldata', 'conll2001'))\n testb = nlp.data.CoNLL2001(part, segment='testb', root=os.path.join(\n 'tests', 'externaldata', 'conll2001'))\n assert len(train) == 8936, len(train)\n assert len(testa) == 2012, len(testa)\n assert len(testb) == 1671, len(testb)\n\n for dataset in [train, testa, testb]:\n for i, (data, pos, chk, clause) in enumerate(dataset):\n assert all(isinstance(d, _str_types) for d in data), data\n assert all(isinstance(p, _str_types) for p in pos), pos\n assert all(isinstance(c, _str_types) for c in chk), chk\n assert all(isinstance(i, _str_types) for i in clause), clause\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]('segment,length', [\n ('train', 15806),\n ('testa', 2895),\n ('testb', 5195),\n])\[email protected]\[email protected]_required\ndef test_conll2002_ned(segment, length):\n dataset = nlp.data.CoNLL2002('ned', segment=segment, root=os.path.join(\n 'tests', 'externaldata', 'conll2002'))\n assert len(dataset) == length, len(dataset)\n for i, (data, pos, ner) in enumerate(dataset):\n assert all(isinstance(d, _str_types) for d in data), data\n assert all(isinstance(p, _str_types) for p in pos), pos\n assert all(isinstance(n, _str_types) for n in ner), ner\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]('segment,length', [\n ('train', 8323),\n ('testa', 1915),\n ('testb', 1517),\n])\[email protected]\[email protected]_required\ndef test_conll2002_esp(segment, length):\n dataset = nlp.data.CoNLL2002('esp', segment=segment, root=os.path.join(\n 'tests', 'externaldata', 'conll2002'))\n assert len(dataset) == length, len(dataset)\n for i, (data, ner) in enumerate(dataset):\n assert all(isinstance(d, _str_types) for d in data), data\n assert all(isinstance(n, _str_types) for n in ner), ner\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]('segment,length', [\n ('train', 8936),\n ('dev', 2012),\n ('test', 1671),\n])\[email protected]\[email protected]_required\ndef test_conll2004(segment, length):\n dataset = nlp.data.CoNLL2004(segment=segment, root=os.path.join(\n 'tests', 'externaldata', 'conll2004'))\n assert len(dataset) == length, len(dataset)\n\n for i, x in enumerate(dataset):\n assert len(x) >= 6, x\n assert all(isinstance(d, _str_types) for f in x for d in f), x\n assert max(len(f) for f in x) == min(len(f) for f in x), x\n\n\n@flaky(max_runs=2, min_passes=1)\[email protected]\[email protected]_required\ndef test_ud21():\n test_langs = list(nlp._constants.UD21_DATA_FILE_SHA1.items())\n random.shuffle(test_langs)\n test_langs = test_langs[:30]\n for lang, segments in test_langs:\n segment = list(segments.keys())\n random.shuffle(segment)\n segment = segment[0]\n dataset = nlp.data.UniversalDependencies21(\n lang=lang, segment=segment, root=os.path.join(\n 'tests', 'externaldata', 'ud2.1'))\n print('processing {}: {}'.format(lang, segment))\n for i, x in enumerate(dataset):\n assert len(x) >= 9, x\n assert all(isinstance(d, _str_types) for f in x for d in f), x\n assert max(len(f) for f in x) == min(len(f) for f in x)\n\n\n###############################################################################\n# Translation\n###############################################################################\[email protected]\[email protected]_required\ndef test_iwlst2015():\n # Test en to vi\n train_en_vi = nlp.data.IWSLT2015(segment='train', root='tests/data/iwlst2015')\n val_en_vi = nlp.data.IWSLT2015(segment='val', root='tests/data/iwlst2015')\n test_en_vi = nlp.data.IWSLT2015(segment='test', root='tests/data/iwlst2015')\n assert len(train_en_vi) == 133166\n assert len(val_en_vi) == 1553\n assert len(test_en_vi) == 1268\n\n en_vocab, vi_vocab = train_en_vi.src_vocab, train_en_vi.tgt_vocab\n assert len(en_vocab) == 17191\n assert len(vi_vocab) == 7709\n\n train_vi_en = nlp.data.IWSLT2015(segment='train', src_lang='vi', tgt_lang='en',\n root='tests/data/iwlst2015')\n vi_vocab, en_vocab = train_vi_en.src_vocab, train_vi_en.tgt_vocab\n assert len(en_vocab) == 17191\n assert len(vi_vocab) == 7709\n for i in range(10):\n lhs = train_en_vi[i]\n rhs = train_vi_en[i]\n assert lhs[0] == rhs[1] and rhs[0] == lhs[1]\n\n\[email protected]\[email protected]_required\ndef test_wmt2016():\n train = nlp.data.WMT2016(segment='train', src_lang='en', tgt_lang='de',\n root='tests/data/wmt2016')\n newstests = [nlp.data.WMT2016(segment='newstest%d' %i, src_lang='en', tgt_lang='de',\n root='tests/data/wmt2016') for i in range(2012, 2017)]\n assert len(train) == 4549428\n assert tuple(len(ele) for ele in newstests) == (3003, 3000, 3003, 2169, 2999)\n\n newstest_2012_2015 = nlp.data.WMT2016(segment=['newstest%d' %i for i in range(2012, 2016)],\n src_lang='en', tgt_lang='de', root='tests/data/wmt2016')\n assert len(newstest_2012_2015) == 3003 + 3000 + 3003 + 2169\n\n\[email protected]\[email protected]_required\ndef test_wmt2016bpe():\n train = nlp.data.WMT2016BPE(segment='train', src_lang='en', tgt_lang='de',\n root='tests/data/wmt2016bpe')\n newstests = [nlp.data.WMT2016BPE(segment='newstest%d' %i, src_lang='en', tgt_lang='de',\n root='tests/data/wmt2016bpe') for i in range(2012, 2017)]\n assert len(train) == 4500966\n assert tuple(len(ele) for ele in newstests) == (3003, 3000, 3003, 2169, 2999)\n\n newstest_2012_2015 = nlp.data.WMT2016BPE(segment=['newstest%d' %i for i in range(2012, 2016)],\n src_lang='en', tgt_lang='de', root='tests/data/wmt2016bpe')\n assert len(newstest_2012_2015) == 3003 + 3000 + 3003 + 2169\n en_vocab, de_vocab = train.src_vocab, train.tgt_vocab\n assert len(en_vocab) == 36548\n assert len(de_vocab) == 36548\n\n\[email protected]\[email protected]_required\ndef test_wmt2014():\n train = nlp.data.WMT2014(segment='train', src_lang='en', tgt_lang='de',\n root='tests/data/wmt2014')\n newstests = [nlp.data.WMT2014(segment='newstest%d' %i, src_lang='en', tgt_lang='de',\n root='tests/data/wmt2014') for i in range(2009, 2015)]\n assert len(train) == 4509333\n assert tuple(len(ele) for ele in newstests) == (2525, 2489, 3003, 3003, 3000, 2737)\n\n newstest_2009_2013 = nlp.data.WMT2014(segment=['newstest%d' %i for i in range(2009, 2014)],\n src_lang='en', tgt_lang='de', root='tests/data/wmt2014')\n assert len(newstest_2009_2013) == 2525 + 2489 + 3003 + 3003 + 3000\n\n newstest_2014 = nlp.data.WMT2014(segment='newstest2014', src_lang='de', tgt_lang='en',\n root='tests/data/wmt2014')\n assert len(newstest_2014) == 3003\n\n newstest_2014 = nlp.data.WMT2014(segment='newstest2014', src_lang='de', tgt_lang='en', full=True,\n root='tests/data/wmt2014')\n assert len(newstest_2014) == 3003\n\n\[email protected]\[email protected]_required\ndef test_wmt2014bpe():\n train = nlp.data.WMT2014BPE(segment='train', src_lang='en', tgt_lang='de',\n root='tests/data/wmt2014bpe')\n newstests = [nlp.data.WMT2014BPE(segment='newstest%d' %i, src_lang='en', tgt_lang='de',\n root='tests/data/wmt2014bpe') for i in range(2009, 2015)]\n assert len(train) == 4493328\n assert tuple(len(ele) for ele in newstests) == (2525, 2489, 3003, 3003, 3000, 2737)\n\n newstest_2009_2013 = nlp.data.WMT2014BPE(segment=['newstest%d' %i for i in range(2009, 2014)],\n src_lang='en', tgt_lang='de', root='tests/data/wmt2014bpe')\n assert len(newstest_2009_2013) == 2525 + 2489 + 3003 + 3003 + 3000\n en_vocab, de_vocab = train.src_vocab, train.tgt_vocab\n assert len(en_vocab) == 36794\n assert len(de_vocab) == 36794\n\n newstest_2014 = nlp.data.WMT2014BPE(segment='newstest2014', src_lang='de', tgt_lang='en',\n root='tests/data/wmt2014bpe')\n assert len(newstest_2014) == 3003\n\n newstest_2014 = nlp.data.WMT2014BPE(segment='newstest2014', src_lang='de', tgt_lang='en', full=True,\n root='tests/data/wmt2014bpe')\n assert len(newstest_2014) == 3003\n\n###############################################################################\n# Question answering\n###############################################################################\[email protected]\[email protected]_required\ndef test_load_dev_squad():\n # number of records in dataset is equal to number of different questions\n train_dataset = nlp.data.SQuAD(\n segment='train', version='1.1', root='tests/data/squad')\n assert len(train_dataset) == 87599\n\n val_dataset = nlp.data.SQuAD(\n segment='dev',version='1.1', root='tests/data/squad')\n assert len(val_dataset) == 10570\n\n # Each record is a tuple of 6 elements: record_id, question Id, question, context,\n # list of answer texts, list of answer start indices\n for record in val_dataset:\n assert len(record) == 6\n\n train_dataset_2 = nlp.data.SQuAD(\n segment='train', version='2.0', root='tests/data/squad')\n assert len(train_dataset_2) == 130319\n\n val_dataset = nlp.data.SQuAD(\n segment='dev', version='2.0', root='tests/data/squad')\n assert len(val_dataset) == 11873\n\n # Each record is a tuple of 7 elements: record_id, question Id, question, context,\n # list of answer texts, list of answer start indices, is_impossible\n for record in val_dataset:\n assert len(record) == 7\n\n###############################################################################\n# Intent Classification and Slot Labeling\n###############################################################################\[email protected]_required\[email protected]('dataset,segment,expected_samples', [\n ('atis', 'train', 4478),\n ('atis', 'dev', 500),\n ('atis', 'test', 893),\n ('snips', 'train', 13084),\n ('snips', 'dev', 700),\n ('snips', 'test', 700)])\ndef test_intent_slot(dataset, segment, expected_samples):\n assert dataset in ['atis', 'snips']\n if dataset == 'atis':\n data_cls = nlp.data.ATISDataset\n else:\n data_cls = nlp.data.SNIPSDataset\n\n dataset = data_cls(segment=segment, root='tests/data/'+dataset)\n\n assert len(dataset) == expected_samples\n assert len(dataset[0]) == 3\n assert all(len(x[0]) == len(x[1]) for x in dataset)\n\ndef test_counter():\n x = nlp.data.Counter({'a': 10, 'b': 1, 'c': 1})\n y = x.discard(3, '<unk>')\n assert y['a'] == 10\n assert y['<unk>'] == 2\n\n# this test is not tested on CI due to long running time\ndef _test_gbw_stream():\n gbw = nlp.data.GBWStream(root=os.path.join('tests', 'data', 'gbw'))\n counter = nlp.data.Counter(gbw)\n counter.discard(3, '<unk>')\n # reference count obtained from:\n # https://github.com/rafaljozefowicz/lm/blob/master/1b_word_vocab.txt\n assert counter['the'] == 35936573\n assert counter['.'] == 29969612\n vocab = gbw.vocab\n assert len(vocab) == 793471\n\n\ndef test_concatenation():\n datasets = [\n SimpleDataset([1,2,3,4]),\n SimpleDataset([5,6]),\n SimpleDataset([8,0,9]),\n ]\n dataset = nlp.data.ConcatDataset(datasets)\n assert len(dataset) == 9\n assert dataset[0] == 1\n assert dataset[5] == 6\n\ndef test_tsv():\n data = \"a,b,c\\n\"\n data += \"d,e,f\\n\"\n data += \"g,h,i\\n\"\n with open('test_tsv.tsv', 'w') as fout:\n fout.write(data)\n num_discard = 1\n field_separator = nlp.data.utils.Splitter(',')\n field_indices = [0,2]\n dataset = nlp.data.TSVDataset('test_tsv.tsv', num_discard_samples=num_discard,\n field_separator=field_separator,\n field_indices=field_indices)\n num_samples = 3 - num_discard\n idx = random.randint(0, num_samples - 1)\n assert len(dataset) == num_samples\n assert len(dataset[0]) == 2\n assert dataset[1] == [u'g', u'i']\n\ndef test_numpy_dataset():\n a = np.arange(6).reshape((2,3))\n filename = 'test_numpy_dataset'\n\n # test npy\n np.save(filename, a)\n dataset = nlp.data.NumpyDataset(filename + '.npy')\n assert dataset.keys is None\n assert len(dataset) == len(a)\n assert np.all(dataset[0] == a[0])\n assert np.all(dataset[1] == a[1])\n\n # test npz with a single array\n np.savez(filename, a)\n dataset = nlp.data.NumpyDataset(filename + '.npz')\n assert len(dataset) == len(a)\n assert np.all(dataset[0] == a[0])\n assert np.all(dataset[1] == a[1])\n\n # test npz with multiple arrays\n b = np.arange(16).reshape((2,8))\n np.savez(filename, a=a, b=b)\n dataset = nlp.data.NumpyDataset(filename + '.npz')\n assert dataset.keys == ['a', 'b']\n assert len(dataset) == len(a)\n assert np.all(dataset[0][0] == a[0])\n assert np.all(dataset[1][0] == a[1])\n assert np.all(dataset[0][1] == b[0])\n assert np.all(dataset[1][1] == b[1])\n dataset_b = dataset.get_field('b')\n assert np.all(dataset_b == b)\n\[email protected]('cls,name,segment,length,fields', [\n (nlp.data.GlueCoLA, 'cola', 'train', 8551, 2),\n (nlp.data.GlueCoLA, 'cola', 'dev', 1043, 2),\n (nlp.data.GlueCoLA, 'cola', 'test', 1063, 1),\n # source: https://arxiv.org/pdf/1804.07461.pdf\n (nlp.data.GlueSST2, 'sst', 'train', 67349, 2),\n (nlp.data.GlueSST2, 'sst', 'dev', 872, 2),\n (nlp.data.GlueSST2, 'sst', 'test', 1821, 1),\n # source: http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark\n (nlp.data.GlueSTSB, 'sts', 'train', 5749, 3),\n (nlp.data.GlueSTSB, 'sts', 'dev', 1500, 3),\n (nlp.data.GlueSTSB, 'sts', 'test', 1379, 2),\n # source: https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs\n (nlp.data.GlueQQP, 'qqp', 'train', 363849, 3),\n (nlp.data.GlueQQP, 'qqp', 'dev', 40430, 3),\n (nlp.data.GlueQQP, 'qqp', 'test', 390965, 2),\n # source: http://www.nyu.edu/projects/bowman/multinli/paper.pdf\n (nlp.data.GlueMNLI, 'mnli', 'train', 392702, 3),\n (nlp.data.GlueMNLI, 'mnli', 'dev_matched', 9815, 3),\n (nlp.data.GlueMNLI, 'mnli', 'dev_mismatched', 9832, 3),\n (nlp.data.GlueMNLI, 'mnli', 'test_matched', 9796, 2),\n (nlp.data.GlueMNLI, 'mnli', 'test_mismatched', 9847, 2),\n # source: https://arxiv.org/pdf/1804.07461.pdf\n (nlp.data.GlueRTE, 'rte', 'train', 2490, 3),\n (nlp.data.GlueRTE, 'rte', 'dev', 277, 3),\n (nlp.data.GlueRTE, 'rte', 'test', 3000, 2),\n # source: https://arxiv.org/pdf/1804.07461.pdf\n (nlp.data.GlueQNLI, 'qnli', 'train', 108436, 3),\n (nlp.data.GlueQNLI, 'qnli', 'dev', 5732, 3),\n (nlp.data.GlueQNLI, 'qnli', 'test', 5740, 2),\n # source: https://arxiv.org/pdf/1804.07461.pdf\n (nlp.data.GlueWNLI, 'wnli', 'train', 635, 3),\n (nlp.data.GlueWNLI, 'wnli', 'dev', 71, 3),\n (nlp.data.GlueWNLI, 'wnli', 'test', 146, 2),\n (nlp.data.GlueMRPC, 'mrpc', 'train', 3668, 3),\n (nlp.data.GlueMRPC, 'mrpc', 'dev', 408, 3),\n (nlp.data.GlueMRPC, 'mrpc', 'test', 1725, 2),\n])\[email protected]\[email protected]_required\ndef test_glue_data(cls, name, segment, length, fields):\n dataset = cls(segment=segment, root=os.path.join(\n 'tests', 'externaldata', 'glue', name))\n assert len(dataset) == length, len(dataset)\n\n for i, x in enumerate(dataset):\n assert len(x) == fields, x\n"
]
| [
[
"numpy.savez",
"numpy.isfinite",
"numpy.arange",
"numpy.save",
"numpy.all"
]
]
|
MambaWong/depthai-python-1 | [
"0d15abd77fd82b4a70e096ea5bb99237a17c9862"
]
| [
"examples/mixed/mono_depth_mobilenetssd.py"
]
| [
"#!/usr/bin/env python3\n\nfrom pathlib import Path\nimport sys\nimport cv2\nimport depthai as dai\nimport numpy as np\n\n# Get argument first\nnnPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())\nif len(sys.argv) > 1:\n nnPath = sys.argv[1]\n\nif not Path(nnPath).exists():\n import sys\n raise FileNotFoundError(f'Required file/s not found, please run \"{sys.executable} install_requirements.py\"')\n\n# MobilenetSSD label nnLabels\nlabelMap = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\",\n \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\n# Create pipeline\npipeline = dai.Pipeline()\n\n# Define sources and outputs\nmonoRight = pipeline.create(dai.node.MonoCamera)\nmonoLeft = pipeline.create(dai.node.MonoCamera)\nstereo = pipeline.create(dai.node.StereoDepth)\nmanip = pipeline.create(dai.node.ImageManip)\nnn = pipeline.create(dai.node.MobileNetDetectionNetwork)\n\nnnOut = pipeline.create(dai.node.XLinkOut)\ndisparityOut = pipeline.create(dai.node.XLinkOut)\nxoutRight = pipeline.create(dai.node.XLinkOut)\n\ndisparityOut.setStreamName(\"disparity\")\nxoutRight.setStreamName(\"rectifiedRight\")\nnnOut.setStreamName(\"nn\")\n\n# Properties\nmonoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)\nmonoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)\nmonoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)\nmonoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)\n\n# Produce the depth map (using disparity output as it's easier to visualize depth this way)\nstereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)\nstereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout from rectification (black stripe on the edges)\n# Convert the grayscale frame into the nn-acceptable form\nmanip.initialConfig.setResize(300, 300)\n# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)\nmanip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)\n\n# Define a neural network that will make predictions based on the source frames\nnn.setConfidenceThreshold(0.5)\nnn.setBlobPath(nnPath)\nnn.setNumInferenceThreads(2)\nnn.input.setBlocking(False)\n\n# Linking\nmonoRight.out.link(stereo.right)\nmonoLeft.out.link(stereo.left)\nstereo.rectifiedRight.link(manip.inputImage)\nstereo.disparity.link(disparityOut.input)\nmanip.out.link(nn.input)\nmanip.out.link(xoutRight.input)\nnn.out.link(nnOut.input)\n\n# Connect to device and start pipeline\nwith dai.Device(pipeline) as device:\n\n # Output queues will be used to get the grayscale / depth frames and nn data from the outputs defined above\n qRight = device.getOutputQueue(\"rectifiedRight\", maxSize=4, blocking=False)\n qDisparity = device.getOutputQueue(\"disparity\", maxSize=4, blocking=False)\n qDet = device.getOutputQueue(\"nn\", maxSize=4, blocking=False)\n\n rightFrame = None\n disparityFrame = None\n detections = []\n\n # nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height\n def frameNorm(frame, bbox):\n normVals = np.full(len(bbox), frame.shape[0])\n normVals[::2] = frame.shape[1]\n return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)\n\n # Add bounding boxes and text to the frame and show it to the user\n def show(name, frame):\n color = (255, 0, 0)\n for detection in detections:\n bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))\n cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)\n cv2.putText(frame, f\"{int(detection.confidence * 100)}%\", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)\n # Show the frame\n cv2.imshow(name, frame)\n\n disparityMultiplier = 255 / stereo.initialConfig.getMaxDisparity()\n\n while True:\n # Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise\n inRight = qRight.tryGet()\n inDet = qDet.tryGet()\n inDisparity = qDisparity.tryGet()\n\n if inRight is not None:\n rightFrame = inRight.getCvFrame()\n\n if inDisparity is not None:\n # Frame is transformed, normalized, and color map will be applied to highlight the depth info\n disparityFrame = inDisparity.getFrame()\n disparityFrame = (disparityFrame*disparityMultiplier).astype(np.uint8)\n # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html\n disparityFrame = cv2.applyColorMap(disparityFrame, cv2.COLORMAP_JET)\n show(\"disparity\", disparityFrame)\n\n if rightFrame is not None:\n show(\"rectified right\", rightFrame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n"
]
| [
[
"numpy.array"
]
]
|
jsalt2019-diadet/hyperion | [
"14a11436d62f3c15cd9b1f70bcce3eafbea2f753",
"14a11436d62f3c15cd9b1f70bcce3eafbea2f753"
]
| [
"hyperion/torch/archs/etdnn.py",
"hyperion/calibration/unsup_gauss_calibration.py"
]
| [
"\"\"\"\n Copyright 2019 Johns Hopkins University (Author: Jesus Villalba)\n Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom six.moves import xrange\n\nimport numpy as np\n\nimport torch.nn as nn\nfrom torch.nn import Conv1d, Linear, BatchNorm1d\n\nfrom ..helpers import ActivationFactory as AF\nfrom ..layers import Dropout1d\nfrom .net_arch import NetArch\n\nclass ETDNNV1(NetArch):\n\n def __init__(self, num_etd_layers, num_fc_layers, \n input_units, etd_units, fc_units, output_units,\n kernel_size, dilation=1, dilation_factor=1,\n hid_act='relu', output_act=None, \n use_batchnorm=True, dropout_rate=0,\n without_output_layer=False,\n use_output_batchnorm=False,\n use_output_dropout=False):\n\n super(ETDNNV1, self).__init__()\n assert num_etd_layers >= 1, 'num_etd_layers (%d < 1)' % num_etd_layers\n\n self.num_etd_layers = num_etd_layers\n self.num_fc_layers = num_fc_layers\n self.output_units = output_units\n self.input_units = input_units\n self.etd_units = etd_units\n self.fc_units = fc_units\n self.kernel_size = kernel_size\n self.dilation = dilation\n self.dilation_factor = dilation_factor\n self.use_batchnorm = use_batchnorm\n self.dropout_rate = dropout_rate\n self.without_output_layer = without_output_layer\n self.use_output_batchnorm = use_output_batchnorm\n self.use_output_dropout = use_output_dropout\n \n self.output_act = AF.create(output_act)\n \n if isinstance(etd_units, list):\n assert num_etd_layers == len(etd_units)\n else:\n etd_units = [etd_units for i in xrange(num_etd_layers)]\n\n etd_units = [input_units] + etd_units\n\n if isinstance(fc_units, list):\n assert num_fc_layers == len(fc_units)\n else:\n fc_units = [fc_units for i in xrange(num_fc_layers)]\n\n fc_units = [etd_units[-1]] + fc_units\n\n if isinstance(kernel_size, list):\n assert num_etd_layers == len(kernel_size)\n else:\n kernel_size = [kernel_size for i in xrange(num_etd_layers)]\n\n if isinstance(dilation_rate, list):\n assert num_etd_layers == len(dilation_rate)\n else:\n dilation = [dilation_factor*i+dilation for i in xrange(num_etd_layers)]\n\n # past and future context\n self._context = int(np.sum(np.array(dilation)*(\n np.array(kernel_size)-1)/2))\n\n # extended time delay layers\n td_layers = []\n fc_layers = []\n for i in xrange(1, num_hid_layers+1):\n td_layers.append(Conv1D(etd_units[i-1], etd_units[i],\n kernel_size=kernel_size[i],\n dilation=dilation[i]))\n fc_layers.append(Linear(etd_units[i], etd_units[i]))\n \n self.td_layers = nn.ModuleList(td_layers)\n \n # fully connected layers\n for i in xrange(1, num_fc_layers+1):\n fc_layers.append(Linear(fc_units[i-1], fc_units[i]))\n\n self.fc_layers = nn.ModuleList(fc_layers)\n\n # hidden activations\n self.td_hid_acts = None\n self.fc_hid_acts = None\n if hid_act is not None:\n td_hid_acts = []\n fc_hid_acts = []\n for i in xrange(num_etd_layers+num_num_fc_layers):\n hid_act = AF.create(hid_act)\n fc_hid_acts.append(hid_act)\n if i < num_etd_layers:\n hid_act = AF.create(hid_act)\n td_hid_acts.append(hid_act)\n\n self.td_hid_acts = nn.ModuleList(td_hid_acts)\n self.fc_hid_acts = nn.ModuleList(fc_hid_acts)\n \n \n self.td_batchnorm_layers = None\n self.fc_batchnorm_layers = None\n if use_batchnorm:\n batchnorm_layers = []\n for i in xrange(num_etd_layers+num_fc_layers):\n td_batchnorm_layers.append(BatchNorm1d(etd_units[i]))\n fc_batchnorm_layers.append(BatchNorm1d(etd_units[i]))\n\n for i in xrange(num_fc_layers):\n fc_batchnorm_layers.append(BatchNorm1d(fc_units[i]))\n\n self.td_batchnorm_layers = nn.ModuleList(td_batchnorm_layers)\n self.fc_batchnorm_layers = nn.ModuleList(fc_batchnorm_layers)\n\n self.td_dropout_layers = None\n self.fc_dropout_layers = None\n if dropout_rate > 0:\n fc_dropout_layers = []\n td_dropout_layers = []\n for i in xrange(num_etd_layers+num_fc_layers):\n fc_dropout_layers.append(Dropout1d(dropout_rate))\n if i < num_etd_layers:\n td_dropout_layers.append(Dropout1d(dropout_rate))\n \n self.td_dropout_layers = nn.ModuleList(td_dropout_layers)\n self.fc_dropout_layers = nn.ModuleList(fc_dropout_layers)\n\n\n self.output_dropout = None\n self.output_batchnorm = None\n\n if without_output_layer:\n if use_output_batchnorm:\n self.output_batchnorm = BatchNorm1d(units[-1])\n else:\n if use_batchnorm:\n self.batchnorm_layers.append(BatchNorm1d(units[-1]))\n\n self.fc_layers.append(Linear(units[-1], output_units))\n if use_output_dropout and dropout_rate > 0:\n self.output_dropout = Dropout1d(dropout_rate)\n \n if use_output_batchnorm:\n self.output_batchnorm = BatchNorm1d(output_units)\n\n\n @property\n def context(self):\n return (self._context, self._context)\n\n \n def forward(self, x):\n\n for l in xrange(self.num_etd_layers+self.num_fc_layers):\n\n if i < self.num_etd_layers:\n if self.use_batchnorm:\n x = self.td_batchnorm_layers[l](x)\n\n x = self.td_layers[l](x)\n\n if self.td_hid_acts is not None:\n x = self.td_hid_acts[l](x)\n\n if self.dropout_rate > 0:\n x = self.td_dropout_layers[l](x)\n\n\n if self.use_batchnorm:\n x = self.fc_batchnorm_layers[l](x)\n\n x = self.fc_layers[l](x)\n \n if self.fc_hid_acts is not None:\n x = self.fc_hid_acts[l](x)\n\n if self.dropout_rate > 0:\n x = self.fc_dropout_layers[l](x)\n\n if not self.without_output_layer:\n if self.batchnorm_layers is not None:\n x = self.batchnorm_layers[self.num_hid_layers](x)\n \n x = self.fc_layers[self.num_hid_layers](x)\n if self.output_act is not None:\n x = self.output_act(x)\n\n if self.output_dropout is not None:\n x = self.droput_layers[self.num_hid_layers](x)\n\n if self.use_output_batchnorm:\n x = self.output_dropout(x)\n\n return x\n\n\n \n def get_config(self):\n \n output_act = AF.get_config(self.output_act)\n if self.hid_acts is None:\n hid_act = None\n else:\n hid_act = AF.get_config(self.hid_acts[0])\n\n config = {'num_etd_layers': self.num_etd_layers,\n 'num_fc_layers': self.num_fc_layers,\n 'output_units': self.output_units,\n 'etd_units': self.etd_units,\n 'fc_units': self.fc_units,\n 'input_units': self.input_units,\n 'kernel_size': self.kernel_size,\n 'dilation': self.dilation,\n 'dilation_factor': self.dilation_factor,\n 'use_batchnorm': self.use_batchnorm,\n 'dropout_rate': self.dropout_rate,\n 'use_output_batchnorm': self.output_batchnorm,\n 'use_output_dropout': self.output_dropout,\n 'output_act': output_act,\n 'hid_act': hid_act }\n \n base_config = super(TDNNV1, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n \n",
"\"\"\"\n Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)\n Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom six.moves import xrange\nimport sys\nimport numpy as np\n\nfrom ..pdfs.mixtures.diag_gmm_tiedcovs import DiagGMMTiedCovs as GMM\nfrom .gauss_calibration import GaussCalibration\n\nclass UnsupGaussCalibration(GaussCalibration):\n \"\"\"Class for unsupervised Gaussian calibration.\n \"\"\"\n\n def __init__(self, mu1=None, mu2=None, sigma2=None, prior=0.5, init_prior=0.5, **kwargs):\n super(UnsupGaussCalibration, self).__init__(mu1, mu2, sigma2, prior, **kwargs)\n self.init_prior = init_prior\n\n \n def fit(self, x):\n\n if x.ndim == 1:\n x = np.expand_dims(x, axis=-1)\n\n if self.is_init():\n mu1 = self.mu1\n mu2 = self.mu2\n sigma2 = np.expand_dims(self.sigma2, axis=-1)\n pi = np.array([self.prior, 1-self.prior])\n else:\n mu1 = np.max(x, axis=0, keepdims=True)\n mu2 = np.mean(x, axis=0, keepdims=True)\n sigma2 = np.std(x, axis=0, keepdims=True)**2\n pi = np.array([self.init_prior, 1-self.init_prior])\n\n mu = np.vstack((mu1, mu2))\n gmm = GMM(mu=mu, Lambda=1/sigma2, pi=pi)\n gmm.fit(x, epochs=20)\n\n self.mu1 = gmm.mu[0,0]\n self.mu2 = gmm.mu[1,0]\n self.sigma2 = gmm.Sigma[0]\n self.prior = gmm.pi[0]\n \n self._compute_scale_bias()\n\n \n \n \n"
]
| [
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"numpy.array",
"torch.nn.BatchNorm1d"
],
[
"numpy.expand_dims",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.vstack"
]
]
|
niuwk/infonets | [
"274e97c9a86144dd52cbe90caffff578a2f5d178"
]
| [
"code/learn/srelu2_grad.py"
]
| [
"# srelu2_grad.py\n\"\"\"\nCreated on Thu May 31 12:49:01 2018\n\n@author: Wentao Huang\n\"\"\"\nimport ipdb\nimport torch as tc\nfrom .grad import Grad\nfrom ..utils.methods import get_diag_e\n\nclass SReLU2Grad(Grad):\n\n @staticmethod\n def forward(ctx, input, C, bias=None, beta=1.0, isorth=True, eps=1e-6, \n ith_cls=0, R=None, margin=None, E0=None):\n if isinstance(bias, tc.Tensor):\n bias_requires_grad = bias.requires_grad\n else:\n bias_requires_grad = False\n if bias is not None:\n bias = beta * bias\n else:\n bias = 0.0\n if margin is not None:\n margin = beta * margin\n else:\n margin = 0.0\n K, KA = C.size()\n C1 = beta * C\n e0, e1 = get_diag_e(E0, C1, eps)\n# ipdb.set_trace()\n# N0 = input.get_len(ith_cls)\n# N1 = len(input) - N0\n# R = input.get_len_list()\n# M = len(R)\n# if balance_factor == 0.5:\n# R = [0.5/((M-1)*i) for i in R]\n# R[ith_cls] = 0.5/N0\n# else:\n# R = [(1.0-balance_factor)/N1]*M\n# R[ith_cls] = balance_factor/N0\n input.reiter()\n# cls_label = input.get_label(ith_cls)\n obj0 = 0.0\n obj1 = 0.0\n db = 0.0 if bias_requires_grad else None#tc.Tensor([0.0])\n dQ = 0.0\n for X, label in input:\n# i = input.get_index(label)\n r = R[label]\n if label == ith_cls:\n if r == 0.0:\n continue\n s = 1.0\n e = e0\n f = X.mm(C1)\n bias0 = bias - margin\n else:\n if r == 0.0:\n continue\n s = -1.0\n e = e1\n f = X.mm(C1.mul(-1.0))\n bias0 = -bias - margin\n if bias0 is not 0.0:\n f.add_(bias0)\n f.sigmoid_()\n f2 = f.pow(2)\n g = f2.add(e)\n# ipdb.set_trace()\n f2.mul_(1.0-f).div_(g)\n obj0 += -0.5*r*g.log_().sum()\n dQ = dQ - s*r*beta*X.t().mm(f2)\n if bias_requires_grad:\n db = db - s*r*beta*f2.sum(0)\n KA = C.size(1)\n if KA == 1:\n G = C.t().mm(C) + eps\n obj1 = -0.5*G.log()\n else:\n G = C.t().mm(C) + tc.diag(tc.full((C.size(1),), eps))\n sign, logdet = tc.slogdet(G)\n obj1 = -0.5*logdet\n dQ = dQ.mm(C.t().mm(C))\n if isorth:\n dC = dQ - C.mm(dQ.t()).mm(C)\n else:\n dC = dQ - C\n argnum = tc.tensor([10])\n ctx.save_for_backward(dC, db, argnum)\n return obj0 + obj1\n"
]
| [
[
"torch.slogdet",
"torch.tensor"
]
]
|
vpereira/covid-19 | [
"bff527b618f8c7c32c64d4b52809df5b757c2acc"
]
| [
"web/app/tests/test_plot_cummulative.py"
]
| [
"import os\nimport pandas as pd\nimport unittest\n\nfrom covid19.expogo.core import PlotCummulative\n\n\nclass BasicTests(unittest.TestCase):\n def setUp(self):\n self.df = pd.DataFrame(columns=[\"Province/State\", \"Country/Region\", \"Lat\", \"Long\", \"1/22/20\", \"1/23/20\"],\n data=[[\"Tokyo\", \"1.2\", \"2.2\", \"Japan\", 5, 10]])\n self.base_df = PlotCummulative(self.df)\n\n def test_json_plot(self):\n self.assertIsNotNone(self.base_df.plot_by_country_json())\n"
]
| [
[
"pandas.DataFrame"
]
]
|
EddieNewcastle/QML_Conformers | [
"16e067942b4a75c775e106180e343fe54724eb88"
]
| [
"machine.py"
]
| [
"\nimport json\nimport re\n\nimport numpy as np\nimport qml\nimport qml.fchl as qml_fchl\nimport qml.math as qml_math\nimport qml.representations as qml_representations\nimport qml.kernels.distance as qml_distance\n\n\nglobal __ATOM_LIST__\n__ATOM_LIST__ = [ x.strip() for x in ['h ','he', \\\n 'li','be','b ','c ','n ','o ','f ','ne', \\\n 'na','mg','al','si','p ','s ','cl','ar', \\\n 'k ','ca','sc','ti','v ','cr','mn','fe','co','ni','cu', \\\n 'zn','ga','ge','as','se','br','kr', \\\n 'rb','sr','y ','zr','nb','mo','tc','ru','rh','pd','ag', \\\n 'cd','in','sn','sb','te','i ','xe', \\\n 'cs','ba','la','ce','pr','nd','pm','sm','eu','gd','tb','dy', \\\n 'ho','er','tm','yb','lu','hf','ta','w ','re','os','ir','pt', \\\n 'au','hg','tl','pb','bi','po','at','rn', \\\n 'fr','ra','ac','th','pa','u ','np','pu'] ]\n\n\ndef get_atom(atom):\n global __ATOM_LIST__\n atom = atom.lower()\n return __ATOM_LIST__.index(atom) + 1\n\n\ndef get_representations_fchl(charge_list, coordinates_list, parameters):\n\n nmax = parameters['nmax']\n cut_distance = parameters['cut_distance']\n\n rep_list = []\n\n for atoms, coordinates in zip(charge_list, coordinates_list):\n rep = qml_fchl.generate_representation(\n coordinates,\n atoms,\n max_size=nmax,\n neighbors=nmax,\n cut_distance=cut_distance)\n rep_list.append(rep)\n\n rep_list = np.array(rep_list)\n\n return rep_list\n\n\ndef get_representations_slatm(charge_list, coordinates_list, parameters, mbtypes=None):\n\n if mbtypes is None:\n quit(\"error: mbtypes needed for slatm predictions\")\n\n rep_list = []\n\n for atoms, coordinates in zip(charge_list, coordinates_list):\n rep = qml_representations.generate_slatm(coordinates, atoms, mbtypes)\n rep_list.append(rep)\n\n rep_list = np.array(rep_list)\n\n return rep_list\n\n\ndef get_kernel_fchl(representations_x, representations_y, parameters):\n\n # TODO if id(representations_x) == id(representations_y)\n\n kernel_args = {\n \"kernel\": parameters['kernel'],\n \"kernel_args\": parameters['kernel_args'],\n \"cut_distance\": parameters['cut_distance'],\n \"alchemy\": parameters['alchemy']\n }\n\n # TODO if key in parameters: kernel_args[key] = parameters[key]\n\n kernel = qml_fchl.get_local_kernels(representations_x, representations_y, **kernel_args)\n kernel = kernel[0]\n\n return kernel\n\ndef get_kernel_slatm(representations_x, representations_y, parameters):\n\n var_lambda = parameters['lambda']\n var_sigma = parameters['sigma']\n\n kernel = qml_distance.l2_distance(representations_x, representations_y)\n kernel = np.square(kernel)\n kernel *= -1\n kernel /= (2*var_sigma**2)\n kernel = np.exp(kernel)\n # kernel[np.diag_indices_from(kernel)] += var_lambda\n\n return kernel\n\ndef get_alphas(kernel, properties):\n alpha = qml_math.cho_solve(kernel, properties)\n return alpha\n\n\ndef predict(predict_representations, trained_representations, alpha, parameters, get_kernel):\n kernel = get_kernel(predict_representations, trained_representations, parameters)\n predictions = np.dot(kernel, alpha)\n return predictions\n\n\ndef get_coordinates_xyz(filename):\n \"\"\"\n Get coordinates from filename and return a vectorset with all the\n coordinates, in XYZ format.\n \"\"\"\n\n f = open(filename, 'r')\n coordinates_list = list()\n atoms_list = list()\n charges_list = list()\n\n for line in f:\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(line)\n except ValueError:\n exit(\"Could not obtain the number of atoms in the .xyz file.\")\n\n # Skip the title line\n next(f)\n\n coordinates = []\n atoms = []\n charges = []\n\n for _ in range(n_atoms):\n line = next(f)\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n atom = atom.upper()\n\n charge = get_atom(atom)\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) != 3:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n coordinates.append(np.array(numbers))\n atoms.append(atom)\n charges.append(charge)\n\n coordinates_list.append(np.array(coordinates, dtype=float))\n atoms_list.append(np.array(atoms))\n charges_list.append(np.array(charges))\n\n coordinates_list = np.array(coordinates_list)\n atoms_list = np.array(atoms_list)\n charges_list = np.array(charges_list)\n\n return charges_list, coordinates_list\n\n\ndef get_coordinates_sdf(filename):\n \"\"\"\n \"\"\"\n # TODO\n # NOTE just use rdkit?\n\n return\n\n\ndef main():\n\n description = \"\"\"\nStand-alone predictor of conformational energies using QML\n\n\"\"\"\n\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--xyz', action='store', help='Molecule file containing the conformational structures. Either xyz or sdf format.')\n\n # Prediction parameters\n parser.add_argument('--training', '-t', action='store', metavar='FILE.npy', help='Training set in representation form')\n parser.add_argument('--alpha', '-a', action='store', metavar='FILE.npy', help='The alpha values')\n parser.add_argument('--model', '-m', action='store', metavar='FILE.json', help='The model, e.i. the settings, kernel, hyperparameters')\n\n # Train parameters\n\n\n args = parser.parse_args()\n\n\n # check format\n fmt = args.xyz.split('.')[-1].lower()\n\n if fmt == \"xyz\":\n charges_list, coordinates_list = get_coordinates_xyz(args.xyz)\n\n elif fmt == \"sdf\":\n quit(\"error: missing sdf feature\")\n\n else:\n print(\"error: Don't recognize the extension. Either XYZ or SDF format.\")\n quit()\n\n\n # Load model\n with open(args.model, 'r') as f:\n MODEL = json.load(f)\n\n\n # SWITCH model\n\n krr_type = MODEL['representation']\n\n if krr_type == \"slatm\":\n get_representations = get_representations_slatm\n get_kernel = get_kernel_slatm\n\n folder = args.model.split(\"/\")\n folder = \"/\".join(folder[:-1]) + \"/\"\n\n mbtypes = np.load(folder + MODEL['mbtypes'])\n\n repkwargs = {\"mbtypes\": mbtypes}\n\n\n\n elif krr_type == \"fchl\":\n get_representations = get_representations_fchl\n get_kernel = get_kernel_fchl\n\n # Check input size\n NMAX = MODEL['nmax']\n this_n = max([len(atoms) for atoms in charges_list])\n\n if this_n > NMAX:\n print(\"error: The model is trained for {:}, but input has {:} atoms\".format(NMAX, this_n))\n quit()\n\n\n repkwargs = {\"nmax\": NMAX}\n\n else:\n print(\"error: Don't know this representation\")\n\n\n # generate predict representations\n representations = get_representations(charges_list, coordinates_list, MODEL, **repkwargs)\n\n\n # load training and alphas\n alpha = np.load(args.alpha)\n training = np.load(args.training)\n\n\n # Predict\n predictions = predict(representations, training, alpha, MODEL, get_kernel=get_kernel)\n\n for prediction in predictions:\n print(prediction)\n\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.square",
"numpy.dot",
"numpy.exp",
"numpy.load",
"numpy.array"
]
]
|
aTamaz/pytorch_geometric | [
"8bf6edf423fef012281d82f8a90e9d7f731485b4"
]
| [
"torch_geometric/nn/conv/point_conv.py"
]
| [
"from typing import Optional, Callable, Union\nfrom torch_geometric.typing import OptTensor, PairOptTensor, PairTensor, Adj\n\nimport torch\nfrom torch import Tensor\nfrom torch_sparse import SparseTensor, set_diag\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops\n\nfrom ..inits import reset\n\n\nclass PointConv(MessagePassing):\n r\"\"\"The PointNet set layer from the `\"PointNet: Deep Learning on Point Sets\n for 3D Classification and Segmentation\"\n <https://arxiv.org/abs/1612.00593>`_ and `\"PointNet++: Deep Hierarchical\n Feature Learning on Point Sets in a Metric Space\"\n <https://arxiv.org/abs/1706.02413>`_ papers\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\gamma_{\\mathbf{\\Theta}} \\left( \\max_{j \\in\n \\mathcal{N}(i) \\cup \\{ i \\}} h_{\\mathbf{\\Theta}} ( \\mathbf{x}_j,\n \\mathbf{p}_j - \\mathbf{p}_i) \\right),\n\n where :math:`\\gamma_{\\mathbf{\\Theta}}` and\n :math:`h_{\\mathbf{\\Theta}}` denote neural\n networks, *.i.e.* MLPs, and :math:`\\mathbf{P} \\in \\mathbb{R}^{N \\times D}`\n defines the position of each point.\n\n Args:\n local_nn (torch.nn.Module, optional): A neural network\n :math:`h_{\\mathbf{\\Theta}}` that maps node features :obj:`x` and\n relative spatial coordinates :obj:`pos_j - pos_i` of shape\n :obj:`[-1, in_channels + num_dimensions]` to shape\n :obj:`[-1, out_channels]`, *e.g.*, defined by\n :class:`torch.nn.Sequential`. (default: :obj:`None`)\n global_nn (torch.nn.Module, optional): A neural network\n :math:`\\gamma_{\\mathbf{\\Theta}}` that maps aggregated node features\n of shape :obj:`[-1, out_channels]` to shape :obj:`[-1,\n final_out_channels]`, *e.g.*, defined by\n :class:`torch.nn.Sequential`. (default: :obj:`None`)\n add_self_loops (bool, optional): If set to :obj:`False`, will not add\n self-loops to the input graph. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n def __init__(self, local_nn: Optional[Callable] = None,\n global_nn: Optional[Callable] = None,\n add_self_loops: bool = True, **kwargs):\n super(PointConv, self).__init__(aggr='max', **kwargs)\n\n self.local_nn = local_nn\n self.global_nn = global_nn\n self.add_self_loops = add_self_loops\n\n self.reset_parameters()\n\n def reset_parameters(self):\n reset(self.local_nn)\n reset(self.global_nn)\n\n def forward(self, x: Union[OptTensor, PairOptTensor],\n pos: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:\n \"\"\"\"\"\"\n if not isinstance(x, tuple):\n x: PairOptTensor = (x, None)\n\n if isinstance(pos, Tensor):\n pos: PairTensor = (pos, pos)\n\n if self.add_self_loops:\n if isinstance(edge_index, Tensor):\n edge_index, _ = remove_self_loops(edge_index)\n edge_index, _ = add_self_loops(edge_index,\n num_nodes=pos[1].size(0))\n elif isinstance(edge_index, SparseTensor):\n edge_index = set_diag(edge_index)\n\n # propagate_type: (x: PairOptTensor, pos: PairTensor)\n out = self.propagate(edge_index, x=x, pos=pos, size=None)\n\n if self.global_nn is not None:\n out = self.global_nn(out)\n\n return out\n\n def message(self, x_j: Optional[Tensor], pos_i: Tensor,\n pos_j: Tensor) -> Tensor:\n msg = pos_j - pos_i\n if x_j is not None:\n msg = torch.cat([x_j, msg], dim=1)\n if self.local_nn is not None:\n msg = self.local_nn(msg)\n return msg\n\n def __repr__(self):\n return '{}(local_nn={}, global_nn={})'.format(self.__class__.__name__,\n self.local_nn,\n self.global_nn)\n"
]
| [
[
"torch.cat"
]
]
|
Jintao-Huang/course_homework | [
"d886550baea78c38afdb4b3a40d82d2c862dbdcf"
]
| [
"suanfa/hw5/ex p83.py"
]
| [
"# Author: Jintao Huang\n# Email: [email protected]\n# Date: \n\nfrom typing import Optional, List, Set, Tuple\nimport random\nimport time\nfrom numba import njit\nimport numpy as np\n\n\n@njit\ndef QueensLV(n: int, k: int) -> Optional[Tuple[List[int], Set[int], Set[int], Set[int]]]:\n \"\"\"若成功(未失败)返回解. 失败返回None.\n n: n皇后, k: 随机取k次.\n 返回: col, diag45, diag135, ans\"\"\"\n # diag135: i+j=i2+j2: 冲突\n col, diag45, diag135 = set(), set(), set()\n ans = []\n for i in range(k):\n st = [] # s.t. 满足的\n for j in range(n): # 列\n # 符合条件\n if j not in col and (i - j) not in diag45 and (i + j) not in diag135:\n st.append(j)\n nb = len(st)\n #\n if nb > 0:\n j = st[random.randint(0, nb - 1)] # [0...nb-1]含. 随机选一个\n ans.append(j)\n col.add(j)\n diag45.add(i - j)\n diag135.add(i + j)\n else:\n return None\n\n return ans, col, diag45, diag135\n\n\n@njit\ndef Backtrack(n: int, track: List[int],\n col: Set[int], diag45: Set[int], diag135: Set[int]) -> bool:\n if len(track) == n:\n return True\n #\n i = len(track) # in [0, n)\n #\n for j in range(n): #\n # 剪枝\n if j in col or i - j in diag45 or i + j in diag135:\n continue\n #\n col.add(j)\n diag45.add(i - j)\n diag135.add(i + j)\n track.append(j)\n #\n if Backtrack(n, track, col, diag45, diag135):\n return True\n #\n col.remove(j)\n diag45.remove(i - j)\n diag135.remove(i + j)\n track.pop()\n return False\n\n\n@njit\ndef QueensLVBacktrace(n: int, k: int) -> Optional[List[int]]:\n res = QueensLV(n, k)\n if res is None:\n return None\n track, col, diag45, diag135 = res\n success = Backtrack(n, track, col, diag45, diag135)\n if not success:\n return None\n return track\n\n\n@njit\ndef QueensSuccessT(n: int, k: int, st: int) -> None:\n \"\"\"n: n皇后. k: 随机取k次. st. success times.\n 返回: 耗时\"\"\"\n success = 0\n while True:\n if QueensLVBacktrace(n, k) is not None:\n success += 1\n if success == st:\n break\n return\n\n\n# numba编译\nn = 4\nQueensSuccessT(n, n // 2, 1)\n#\nfor n in range(12, 21):\n ts = []\n for i in range(n):\n t = time.time()\n QueensSuccessT(n, i, 1000)\n ts.append(time.time() - t)\n print(n, np.argmin(ts))\n"
]
| [
[
"numpy.argmin"
]
]
|
Niekvdplas/ktrain | [
"808a212a9b8ebddd4e2d75eaca2e54a7ea990b4e"
]
| [
"ktrain/tests/test_qa.py"
]
| [
"#!/usr/bin/env python3\n\"\"\"\nTests of ktrain text classification flows\n\"\"\"\nfrom unittest import TestCase, main, skip\n\nimport IPython\nimport numpy as np\nimport testenv\n\nimport ktrain\nfrom ktrain import text\nfrom ktrain.imports import ACC_NAME, VAL_ACC_NAME\n\n\nclass TestQA(TestCase):\n\n # @skip('temporarily disabled')\n def test_qa(self):\n\n from sklearn.datasets import fetch_20newsgroups\n\n remove = (\"headers\", \"footers\", \"quotes\")\n newsgroups_train = fetch_20newsgroups(subset=\"train\", remove=remove)\n newsgroups_test = fetch_20newsgroups(subset=\"test\", remove=remove)\n docs = newsgroups_train.data + newsgroups_test.data\n\n # tmp_folder = '/tmp/qa_test'\n import shutil\n import tempfile\n\n tmp_folder = tempfile.mkdtemp()\n shutil.rmtree(tmp_folder)\n text.SimpleQA.initialize_index(tmp_folder)\n text.SimpleQA.index_from_list(\n docs, tmp_folder, commit_every=len(docs), multisegment=True\n )\n qa = text.SimpleQA(tmp_folder, framework=\"tf\")\n\n answers = qa.ask(\"When did Cassini launch?\")\n top_answer = answers[0][\"answer\"]\n self.assertEqual(top_answer, \"in october of 1997\")\n\n # @skip('temporarily disabled')\n def test_extractor(self):\n\n # data = ['Indeed, risk factors are sex, obesity, genetic factors and mechanical factors (3) .',\n # 'The sun is the center of our solar system.',\n # 'There is a risk of Donald Trump running again in 2024.',\n # 'My speciality is risk assessments.',\n # \"\"\"This risk was consistent across patients stratified by history of CVD, risk factors\n # but no CVD, and neither CVD nor risk factors.\"\"\",\n # \"\"\"Risk factors associated with subsequent death include older age, hypertension, diabetes,\n # ischemic heart disease, obesity and chronic lung disease; however, sometimes\n # there are no obvious risk factors .\"\"\",\n # 'Three major risk factors for COVID-19 were sex (male), age (≥60), and severe pneumonia.']\n # from ktrain.text import AnswerExtractor\n # ae = AnswerExtractor()\n # import pandas as pd\n # pd.set_option(\"display.max_colwidth\", None)\n # df = pd.DataFrame(data, columns=['Text'])\n # df = ae.extract(df.Text.values, df, [('What are the risk factors?', 'Risk Factors')], min_conf=8)\n # answers = df['Risk Factors'].values\n # self.assertEqual(answers[0].startswith('sex'), True)\n # self.assertEqual(answers[1], None)\n # self.assertEqual(answers[2], None)\n # self.assertEqual(answers[3], None)\n # self.assertEqual(answers[4], None)\n # self.assertEqual(answers[5].startswith('older'), True)\n # self.assertEqual(answers[6].startswith('sex'), True)\n\n data = [\n \"Three major risk factors for COVID-19 were sex (male), age (≥60), and severe pneumonia.\",\n \"His speciality is medical risk assessments, and he is 30 years old.\",\n \"Results: A total of nine studies including 356 patients were included in this study, the mean age was 52.4 years and 221 (62.1%) were male.\",\n ]\n from ktrain.text import AnswerExtractor\n\n ae = AnswerExtractor(framework=\"pt\", device=\"cpu\", quantize=True)\n import pandas as pd\n\n pd.set_option(\"display.max_colwidth\", None)\n df = pd.DataFrame(data, columns=[\"Text\"])\n import time\n\n start = time.time()\n df = ae.extract(\n df.Text.values,\n df,\n [\n (\"What are the risk factors?\", \"Risk Factors\"),\n (\"How many individuals in sample?\", \"Sample Size\"),\n ],\n min_conf=5,\n )\n print(time.time() - start)\n print(df.head())\n answers = df[\"Risk Factors\"].values\n self.assertEqual(answers[0].startswith(\"sex\"), True)\n self.assertEqual(answers[1], None)\n self.assertEqual(answers[2], None)\n answers = df[\"Sample Size\"].values\n self.assertEqual(answers[0], None)\n self.assertEqual(answers[1], None)\n self.assertEqual(answers[2].startswith(\"356\"), True)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"pandas.set_option",
"pandas.DataFrame",
"sklearn.datasets.fetch_20newsgroups"
]
]
|
Guo-Jian-Wang/ReFANN | [
"e985358d1c805f4f46499d5ffa92ba06ba7cff38"
]
| [
"refann/sequence.py"
]
| [
"# -*- coding: utf-8 -*-\n\nfrom . import element\nimport torch.nn as nn\n\nclass SeqName(object):\n \"\"\" The name of sequence, to be used by class LinearSeq \"\"\"\n def __init__(self, module_name):\n self.moduleName = module_name\n \n def seq_name(self):\n self.moduleName = str(eval(self.moduleName)+1)\n return self.moduleName\n\nclass BatchNorm(object):\n \"\"\" Batch Normalization, to be used by class LinearSeq \"\"\"\n def _batchnorm1d(self, name, n_output):\n self.seq.add_module(name, nn.BatchNorm1d(n_output, eps=self.eps, momentum=self.momentum))\n\nclass Activation(object):\n \"\"\" Activation functions, to be used by class LinearSeq \"\"\"\n def _activation(self, module_name, active_name):\n self.seq.add_module(module_name, element.activation(active_name=active_name))\n\nclass Pooling(object):\n \"\"\" Pooling, to be used by class LinearSeq \"\"\"\n def _pooling(self, module_name, pool_name):\n self.seq.add_module(module_name, element.pooling(pool_name=pool_name))\n\nclass Dropout(object):\n \"\"\" Dropout, to be used by class LinearSeq \"\"\"\n def _dropout(self, module_name, dropout_name):\n self.seq.add_module(module_name, element.get_dropout(dropout_name))\n\n\nclass LinearSeq(SeqName,BatchNorm,Activation,Dropout):\n \"\"\" Sequence of Linear \"\"\"\n def __init__(self, nodes, mainBN=True, finalBN=False, mainActive='relu',\n finalActive='None', mainDropout='None', finalDropout='None'):\n SeqName.__init__(self, '-1') #or super(LinearSeq, self).__init__('-1')\n self.nodes = nodes\n self.layers = len(nodes) - 1\n self.mainBN = mainBN\n self.finalBN = finalBN\n self.mainActive = mainActive\n self.finalActive = finalActive\n self.mainDropout = mainDropout\n self.finalDropout = finalDropout\n self.eps = 1e-05\n self.momentum = 0.1\n self.seq = nn.Sequential()\n\n def __linear(self, name, n_input, n_output):\n self.seq.add_module(name, nn.Linear(n_input, n_output))\n \n def get_seq(self):\n for i in range(self.layers-1):\n self.__linear(self.seq_name(), self.nodes[i], self.nodes[i+1])\n if self.mainBN:\n self._batchnorm1d(self.seq_name(), self.nodes[i+1])\n if self.mainActive!='None':\n self._activation(self.seq_name(), self.mainActive)\n if self.mainDropout!='None':\n self._dropout(self.seq_name(), self.mainDropout)\n \n self.__linear(self.seq_name(), self.nodes[-2], self.nodes[-1])\n if self.finalBN:\n self._batchnorm1d(self.seq_name(), self.nodes[-1])\n if self.finalActive!='None':\n self._activation(self.seq_name(), self.finalActive)\n if self.finalDropout!='None':\n self._dropout(self.seq_name(), self.finalDropout)\n return self.seq\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d"
]
]
|
anatoly-khomenko/text-to-text-transfer-transformer | [
"2e707da4b46236c7056999d6857d71e25a5c0c77"
]
| [
"t5/data/utils.py"
]
| [
"# Copyright 2019 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for data loading and processing.\n\nDefines Tasks, TaskRegistry, Mixture, and MixtureRegistry\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport inspect\nimport json\nimport os\nimport re\n\nfrom absl import logging\nimport gin\nimport numpy as np\nfrom t5.data import sentencepiece_vocabulary\nimport tensorflow.compat.v1 as tf\nimport tensorflow_datasets as tfds\n\n_DEFAULT_FEATURE_KEYS = [\"inputs\", \"targets\"]\n\n_VALID_TASK_NAME_REGEX = re.compile(r\"^[\\w\\d\\._]+$\")\n_INFO_FILENAME = \"info.{split}.json\"\n_STATS_FILENAME = \"stats.{split}.json\"\n_TFRECORD_PREFIX = \"{split}.tfrecord\"\n_MAX_EXAMPLES_TO_MEM_CACHE = 10000\n_SHUFFLE_BUFFER_SIZE = 1000\n\n_TFDS_DATA_DIR_OVERRIDE = None\n_GLOBAL_CACHE_DIRECTORIES = []\n\nDEFAULT_SPM_PATH = \"gs://t5-data/vocabs/cc_all.32000/sentencepiece.model\" # GCS\n\n\ndef set_tfds_data_dir_override(tfds_data_dir):\n global _TFDS_DATA_DIR_OVERRIDE\n _TFDS_DATA_DIR_OVERRIDE = tfds_data_dir\n\n\ndef set_global_cache_dirs(global_cache_dirs):\n global _GLOBAL_CACHE_DIRECTORIES\n _GLOBAL_CACHE_DIRECTORIES = global_cache_dirs\n\n\ndef add_global_cache_dirs(global_cache_dirs):\n global _GLOBAL_CACHE_DIRECTORIES\n _GLOBAL_CACHE_DIRECTORIES += global_cache_dirs\n\n\nclass DatasetProviderBase(object):\n \"\"\"Abstract base for classes that provide a tf.data.Dataset.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractproperty\n def sentencepiece_model_path(self):\n raise NotImplementedError\n\n @abc.abstractproperty\n def output_features(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_vocabulary(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dataset(\n self, sequence_length, split, use_cached=False, shuffle=True):\n raise NotImplementedError\n\n @abc.abstractmethod\n def num_input_examples(self, split):\n raise NotImplementedError\n\n\nclass DatasetProviderRegistry(object):\n \"\"\"Base for registry of data providers.\n\n Child classes must implement a _REGISTRY dict.\n \"\"\"\n\n _PROVIDER_TYPE = DatasetProviderBase\n\n @classmethod\n def add(cls, name, provider_cls, *provider_args, **provider_kwargs):\n \"\"\"Adds provider to the registry.\"\"\"\n if name in cls._REGISTRY:\n raise ValueError(\"Attempting to register duplicate provider: %s\" % name)\n provider = provider_cls(*provider_args, **provider_kwargs)\n if not isinstance(provider, cls._PROVIDER_TYPE):\n raise ValueError(\n \"Attempting to register a class not of an invalid type. \"\n \"Expecting instance of %s, got %s\" %\n (cls._PROVIDER_TYPE, provider_cls))\n\n cls._REGISTRY[name] = provider\n\n @classmethod\n def remove(cls, name):\n \"\"\"Remove provider from the registry, if it exists.\"\"\"\n if name in cls._REGISTRY:\n del cls._REGISTRY[name]\n\n @classmethod\n def get(cls, name):\n \"\"\"Returns provider from the registry.\"\"\"\n if name not in cls._REGISTRY:\n raise ValueError(\"Provider name not registered: %s\" % name)\n return cls._REGISTRY[name]\n\n @classmethod\n def names(cls):\n \"\"\"Returns all provider names in registry.\"\"\"\n return cls._REGISTRY.keys()\n\n @classmethod\n def get_dataset(\n cls, name, sequence_length, split, use_cached=False, shuffle=True):\n return cls.get(name).get_dataset(\n sequence_length=sequence_length, split=split, use_cached=use_cached,\n shuffle=shuffle)\n\n\nclass LazyTfdsLoader(object):\n \"\"\"Wrapper for TFDS datasets with memoization and additional functionality.\n\n Lazily loads info from TFDS and provides memoization to avoid expensive hidden\n file operations. Also provides additional utility methods.\n \"\"\"\n\n _MEMOIZED_INSTANCES = {}\n\n def __new__(cls, name, data_dir=None):\n \"\"\"Either creates a new dataset or returns it if it already exists.\"\"\"\n key = (name, data_dir)\n if key not in cls._MEMOIZED_INSTANCES:\n cls._MEMOIZED_INSTANCES[key] = object.__new__(cls)\n return cls._MEMOIZED_INSTANCES[key]\n\n def __init__(self, name, data_dir=None):\n \"\"\"LazyTfdsLoader constructor.\n\n Args:\n name: str, the name of the TFDS dataset.\n data_dir: str (optional), directory to read/write TFDS data.\n \"\"\"\n self._name = name\n self._data_dir = data_dir\n self._builder = None\n\n def __getstate__(self):\n \"\"\"Remove un-pickle-able attributes and return the state.\"\"\"\n state = self.__dict__.copy()\n del state[\"_builder\"]\n return state\n\n def __getnewargs__(self):\n return self._name, self._data_dir\n\n @property\n def name(self):\n return self._name\n\n @property\n def data_dir(self):\n if _TFDS_DATA_DIR_OVERRIDE:\n if self._data_dir:\n logging.warning(\n \"Overriding TFDS data directory '%s' with '%s' for dataset '%s'.\",\n self._data_dir, _TFDS_DATA_DIR_OVERRIDE, self.name)\n return _TFDS_DATA_DIR_OVERRIDE\n return self._data_dir\n\n @property\n def builder(self):\n if not self._builder:\n self._builder = tfds.builder(self.name, data_dir=self.data_dir)\n return self._builder\n\n @property\n def info(self):\n return self.builder.info\n\n def files(self, split):\n \"\"\"Returns set containing paths to TFDS TFRecord files for the dataset.\"\"\"\n self.verify_split(split)\n files = set()\n\n def _get_builder_files(builder):\n split_info = builder.info.splits[split]\n if builder.version.implements(tfds.core.Experiment.S3):\n num_shards = len(split_info.shard_lengths)\n else:\n num_shards = split_info.num_shards\n return tfds.core.naming.filepaths_for_dataset_split(\n dataset_name=builder.name,\n split=split_info.name,\n num_shards=num_shards,\n data_dir=builder._data_dir, # pylint:disable=protected-access\n filetype_suffix=\"tfrecord\",\n )\n\n if self.builder.BUILDER_CONFIGS and \"/\" not in self.name:\n # If builder has multiple configs, and no particular config was\n # requested, then compute all.\n for config in self.builder.BUILDER_CONFIGS:\n builder_for_config = tfds.builder(self.builder.name, config=config)\n files.update(_get_builder_files(builder_for_config))\n else:\n files.update(_get_builder_files(self.builder))\n\n if not files:\n logging.fatal(\"No TFRecord files found for dataset: %s\", self.name)\n return files\n\n def load(self, split, shuffle_files):\n \"\"\"Returns a tf.data.Dataset for the given split.\"\"\"\n self.verify_split(split)\n return tfds.load(\n self._name,\n split=split,\n data_dir=self.data_dir,\n shuffle_files=shuffle_files,\n download=True,\n try_gcs=True)\n\n def load_shard(self, shard_path):\n \"\"\"Returns a dataset for a single shard of the TFDS TFRecord files.\"\"\"\n ds = tfds.core.file_format_adapter.TFRecordExampleAdapter(\n self.info.features.get_serialized_info()).dataset_from_filename(\n shard_path)\n ds = ds.map(self.info.features.decode_example)\n return ds\n\n def verify_split(self, split):\n \"\"\"Verify that `split` is a valid split.\"\"\"\n if split not in self.info.splits.keys():\n raise ValueError(\"{} has no '{}' split\".format(self.name, split))\n\n def size(self, split):\n \"\"\"Returns the number of examples in the split.\"\"\"\n self.verify_split(split)\n ds_splits = self.info.splits\n dataset_size = ds_splits[split].num_examples\n # Very large datasets have num_examples = 0; default instead to np.inf\n dataset_size = dataset_size if dataset_size > 0 else np.inf\n return dataset_size\n\n\ndef encode_string_features(\n dataset, vocabulary, keys, copy_plaintext=False):\n \"\"\"Encode specified string features.\n\n Passes through non-string features unchanged. Optionally passes through copy\n of original string features with \"_plaintext\" suffix added to the key.\n\n Args:\n dataset: a tf.data.Dataset\n vocabulary: a vocabulary.Vocabulary\n keys: list of strings, keys of features to encode.\n copy_plaintext: bool, whether to pass through copies of plaintext strings\n with a \"_plaintext\" suffix added to the key.\n Returns:\n a tf.data.Dataset\n \"\"\"\n keys = set(keys)\n def my_fn(features):\n \"\"\"Encode all specified feature that are strings and return a dictionary.\n\n Args:\n features: a dictionary\n Returns:\n a dictionary\n \"\"\"\n ret = {}\n for k, v in features.items():\n if v.dtype == tf.string and k in keys:\n if copy_plaintext:\n ret[\"%s_plaintext\" % k] = v\n v = tf.cast(vocabulary.encode_tf(v), tf.int64)\n ret[k] = v\n return ret\n return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef dict_to_tfexample(ex):\n \"\"\"Convert example dictionary to tf.train.Example proto.\"\"\"\n feature_dict = {}\n for k, v in ex.items():\n t = tf.constant(v)\n if len(t.shape) == 0: # pylint:disable=g-explicit-length-test\n v = [v]\n elif len(t.shape) == 1:\n v = list(v)\n else:\n raise ValueError(\n \"Unsupported shape (%s) for '%s' value: %s\" %\n (tf.shape, k, v))\n\n if t.dtype == tf.string and len(t.shape) <= 1:\n feature_dict[k] = tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[tf.compat.as_bytes(t) for t in v]))\n elif t.dtype in (tf.int32, tf.int64) and len(t.shape) <= 1:\n feature_dict[k] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=v))\n else:\n raise ValueError(\n \"Unsupported type (%s) and shape (%s) for '%s' value: %s\" %\n (tf.dtype, tf.shape, k, v))\n\n return tf.train.Example(features=tf.train.Features(feature=feature_dict))\n\n\ndef inverse_dataset(dataset, label):\n \"\"\"Invert examples and prepend the given label to the new inputs.\n\n Args:\n dataset: tf.data.Dataset, contains \"inputs\" and \"targets\" keys\n label: str, the label to prepend to the inputs.\n Returns:\n a tf.data.Dataset\n \"\"\"\n def map_fn(x):\n return {\n \"inputs\": tf.strings.join([label, x[\"targets\"]]),\n \"targets\": x[\"inputs\"],\n }\n return dataset.map(\n map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\n# ================================ Tasks =======================================\ndef get_info_path(data_dir, split):\n return os.path.join(data_dir, _INFO_FILENAME.format(split=split))\n\n\ndef get_tfrecord_prefix(data_dir, split):\n return os.path.join(data_dir, _TFRECORD_PREFIX.format(split=split))\n\n\ndef get_stats_path(data_dir, split):\n return os.path.join(data_dir, _STATS_FILENAME.format(split=split))\n\n\nclass Task(DatasetProviderBase):\n \"\"\"A wrapper for a `tf.data.Dataset` along with preprocessing information.\n\n Tasks handle preprocessing (via arbitrary TF function) and tokenization\n (via SentencePiece). Non-train splits also pass through the original\n plaintext strings with a \"_plaintext\" suffix added to the key.\n \"\"\"\n\n def __init__(self,\n name,\n dataset_fn,\n splits,\n text_preprocessor,\n sentencepiece_model_path,\n metric_fns,\n postprocess_fn=None,\n token_preprocessor=None,\n output_features=None,\n num_input_examples=None):\n \"\"\"Task constructor.\n\n Args:\n name: string, a unique name for the Task. A ValueError will be raised if\n another task with this name is already registered.\n dataset_fn: callable, a function with the signature\n `dataset_fn(split, shuffle_files)' that returns a `tf.data.Dataset`.\n splits: list(string), a list of allowable splits to request from the\n `dataset_fn`.\n text_preprocessor: a function (or list of functions) that (each) takes in\n a tf.data.Dataset of string features and returns a tf.data.Dataset of\n string features. Can be set to None as a no-op. If a list is given,\n they will be executed sequentially.\n sentencepiece_model_path: string, path to a SentencePiece model file to\n use for tokenization.\n metric_fns: list(callable), list of metric functions with the signature\n `metric_fn(targets, predictions)` to use during evaluation.\n postprocess_fn: function, a function that takes in decoded model outputs\n (strings) and returns a string which is ready for evaluation using the\n metric functions in `metric_fns`. Can be set to None as a no-op.\n token_preprocessor: an optional function (or list of functions) that\n (each) takes in a tf.data.Dataset of token features and returns a\n tf.data.Dataset of token features.\n Can be set to None as a no-op. If a list is given, they will be\n executed sequentially.\n The functions are also passed `sequence_length` and `vocabulary`\n keyword arguments.\n output_features: list(string), a list of the primary output features of\n the dataset that will be prepared for the model. Defaults to 'inputs'\n and 'targets'.\n num_input_examples: dict(string: int) or None, a dictionary mapping split\n to its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n \"\"\"\n if not _VALID_TASK_NAME_REGEX.match(name):\n raise ValueError(\n \"Task name '%s' contains invalid characters. Must match regex: %s\" % (\n name, _VALID_TASK_NAME_REGEX.pattern))\n _validate_args(dataset_fn, [\"split\", \"shuffle_files\"])\n for metric_fn in metric_fns:\n _validate_args(metric_fn, [\"targets\", \"predictions\"])\n\n self._name = name\n self._dataset_fn = dataset_fn\n self._text_preprocessor = (\n [] if text_preprocessor is None else text_preprocessor)\n self._token_preprocessor = (\n [] if token_preprocessor is None else token_preprocessor)\n self._sentencepiece_model_path = sentencepiece_model_path\n self._metric_fns = metric_fns\n # Use a pass-through if postprocess_fn is not provided\n self._postprocess_fn = postprocess_fn or (lambda x, **unused_kwargs: x)\n self._cache_dir = None\n self._stats = {}\n self._output_features = sorted(\n set(output_features or _DEFAULT_FEATURE_KEYS))\n self._splits = splits\n self._num_input_examples = num_input_examples\n\n @property\n def name(self):\n return self._name\n\n @property\n def postprocess_fn(self):\n return self._postprocess_fn\n\n @property\n def metric_fns(self):\n return self._metric_fns\n\n @property\n def sentencepiece_model_path(self):\n return self._sentencepiece_model_path\n\n @property\n def output_features(self):\n return self._output_features\n\n @property\n def token_preprocessor(self):\n return self._token_preprocessor\n\n @property\n def splits(self):\n return self._splits\n\n def num_input_examples(self, split):\n if self._num_input_examples is None:\n return None\n return self._num_input_examples[split]\n\n def _preprocess_dataset(self, dataset, preprocessors, **preprocess_kwargs):\n if not hasattr(preprocessors, \"__iter__\"):\n preprocessors = [preprocessors]\n for prep_fn in preprocessors:\n dataset = prep_fn(dataset, **preprocess_kwargs)\n return dataset\n\n def _validate_dataset(\n self,\n dataset,\n expected_output_type,\n expected_output_rank,\n error_label,\n ensure_no_eos=False):\n \"\"\"Validates properties of a tf.data.Dataset, raising Exceptions if needed.\n\n Args:\n dataset: a tf.data.Dataset to validate.\n expected_output_type: a tf.dtype, the expected type of the model features.\n expected_output_rank: an int, the expected rank of the model features.\n error_label: a string, an identifier for the previous processing step to\n report in raised ValueErrors.\n ensure_no_eos: a bool, whether or not to verify that the model features\n contain no EOS tokens.\n\n Returns:\n a validated tf.data.Dataset.\n \"\"\"\n types = tf.data.get_output_types(dataset)\n shapes = tf.data.get_output_shapes(dataset)\n for feat in self.output_features:\n if feat not in types:\n raise ValueError(\n \"Task dataset is missing expected output feature after {label}: \"\n \"{feat}\".format(label=error_label, feat=feat))\n if expected_output_type != types[feat]:\n raise ValueError(\n \"Task dataset has incorrect type for feature '{feat}' after \"\n \"{label}: Got {actual}, expected {expected}\".format(\n feat=feat, label=error_label, actual=types[feat].name,\n expected=expected_output_type.name))\n if expected_output_rank != len(shapes[feat]):\n raise ValueError(\n \"Task dataset has incorrect rank for feature '{feat}' after \"\n \"{label}: Got {actual}, expected {expected}\".format(\n feat=feat, label=error_label, actual=len(shapes[feat]),\n expected=expected_output_rank))\n\n def _ensure_no_eos(feat, v):\n if feat not in self.output_features:\n return v\n with tf.control_dependencies([\n tf.assert_none_equal(\n v, tf.constant(1, tf.int64),\n message=\"Feature '{feat}' unexpectedly contains EOS=1 token \"\n \"after {label}.\".format(feat=feat, label=error_label))\n ]):\n return v\n if ensure_no_eos:\n dataset = dataset.map(\n lambda ex: {k: _ensure_no_eos(k, v) for k, v in ex.items()},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n\n def preprocess_text(self, dataset):\n \"\"\"Preprocessed text dataset.\"\"\"\n dataset = self._preprocess_dataset(dataset, self._text_preprocessor)\n dataset = self._validate_dataset(\n dataset, expected_output_type=tf.string, expected_output_rank=0,\n error_label=\"text preprocessing\")\n return dataset\n\n def preprocess_tokens(self, dataset, sequence_length):\n \"\"\"Preprocesses tokenized dataset.\n\n Args:\n dataset: a tf.data.Dataset\n sequence_length: dict mapping feature key to int length for that feature\n Returns:\n a tf.data.Dataset\n \"\"\"\n dataset = self._preprocess_dataset(\n dataset, self._token_preprocessor,\n sequence_length=sequence_length,\n vocabulary=self.get_vocabulary())\n dataset = self._validate_dataset(\n dataset,\n expected_output_type=tf.int64,\n expected_output_rank=1,\n error_label=\"token preprocessing\",\n ensure_no_eos=True)\n # Trim and append EOS=1 token to model features.\n def _trim_and_append_eos(feat, v):\n if feat not in self.output_features:\n return v\n return tf.concat([v[:sequence_length[feat]-1], [1]], axis=0)\n\n return dataset.map(\n lambda ex: {k: _trim_and_append_eos(k, v) for k, v in ex.items()},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n def initialize(self):\n \"\"\"Attempts to load cached dataset and stats.\"\"\"\n if self._cache_dir:\n return\n\n # See if cached data exists in any of the cache directories.\n potential_cache_dirs = [\n os.path.join(d, self.name) for d in _GLOBAL_CACHE_DIRECTORIES]\n for cache_dir in potential_cache_dirs:\n if tf.io.gfile.exists(os.path.join(cache_dir, \"COMPLETED\")):\n self._cache_dir = cache_dir\n logging.info(\"'%s' is cached at %s.\", self.name, self.cache_dir)\n return\n logging.info(\n \"'%s' does not exist in any task cache directories (searched %s).\",\n self.name,\n potential_cache_dirs,\n )\n\n @property\n def cached(self):\n \"\"\"Returns whether or not cached dataset exists, initializing if needed.\"\"\"\n self.initialize()\n return self._cache_dir is not None\n\n @property\n def cache_dir(self):\n \"\"\"Returns the cache directory, initializing if needed.\"\"\"\n self.assert_cached()\n return self._cache_dir\n\n def assert_cached(self):\n \"\"\"Raises an assertion error if cached dataset does not exist.\"\"\"\n assert self.cached, (\n \"'%s' does not exist in any of the task cache directories\" % self.name)\n\n def get_cached_stats(self, split=tfds.Split.TRAIN):\n \"\"\"Returns basic statistics for cached dataset.\"\"\"\n self.assert_cached()\n if split not in self._stats:\n stats_path = get_stats_path(self.cache_dir, split)\n if not tf.io.gfile.exists(stats_path):\n raise ValueError(\n \"Stats do not exist for '%s' split: %s\" % (self.name, split))\n with tf.io.gfile.GFile(stats_path) as f:\n self._stats[split] = json.load(f)\n return self._stats[split]\n\n def get_vocabulary(self):\n \"\"\"Returns a SentencePieceVocabulary object using the Task's model.\"\"\"\n return sentencepiece_vocabulary.SentencePieceVocabulary(\n self.sentencepiece_model_path)\n\n def get_dataset(\n self,\n sequence_length,\n split=tfds.Split.TRAIN,\n use_cached=False,\n shuffle=True,\n shuffle_buffer_size=_SHUFFLE_BUFFER_SIZE,\n ):\n \"\"\"Returns a tf.data.Dataset from cache or generated on the fly.\n\n Args:\n sequence_length: dict mapping feature key to int length for that feature\n split: string, the split to return.\n use_cached: bool, whether to use the cached dataset instead of processing\n it on the fly. Defaults to True.\n shuffle: bool, whether to shuffle the dataset. Only used when generating\n on the fly (use_cached=False).\n shuffle_buffer_size: an integer\n Returns:\n A mixed tf.data.Dataset.\n \"\"\"\n if use_cached:\n ds = self._get_cached_dataset(split, shuffle)\n else:\n ds = self._dataset_fn(split=split, shuffle_files=shuffle)\n ds = self.preprocess_text(ds)\n # Tokenize\n ds = encode_string_features(\n ds, self.get_vocabulary(), keys=self.output_features,\n copy_plaintext=True)\n\n if (not use_cached and self.num_input_examples(split) and\n self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE):\n ds = ds.cache()\n\n # Post tokenization processing.\n ds = self.preprocess_tokens(ds, sequence_length)\n\n if shuffle:\n # Shuffle before mixing since preprocessor can output multiple\n # (correlated) examples per input.\n ds = ds.shuffle(shuffle_buffer_size)\n\n return ds\n\n def _get_cached_dataset(self, split=tfds.Split.TRAIN, shuffle=True):\n \"\"\"Returns a tf.data.Dataset read from cached files.\"\"\"\n self.assert_cached()\n with tf.io.gfile.GFile(get_info_path(self.cache_dir, split)) as f:\n split_info = json.load(f)\n\n # Use `FixedLenSequenceFeature` for sequences with variable length.\n def _feature_config(shape, dtype):\n if shape and shape[0] is None:\n return tf.io.FixedLenSequenceFeature(\n shape[1:], dtype, allow_missing=True)\n return tf.io.FixedLenFeature(shape, dtype)\n feature_desc = {\n feat: _feature_config(**desc)\n for feat, desc in split_info[\"features\"].items()}\n\n ds = tf.data.Dataset.list_files(\n \"%s-*-of-*%d\" % (\n get_tfrecord_prefix(self.cache_dir, split),\n split_info[\"num_shards\"]),\n shuffle=shuffle)\n ds = ds.interleave(\n tf.data.TFRecordDataset,\n cycle_length=16, block_length=16,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n ds = ds.map(lambda ex: tf.parse_single_example(ex, feature_desc),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if self.get_cached_stats(split)[\"examples\"] <= _MAX_EXAMPLES_TO_MEM_CACHE:\n ds = ds.cache()\n return ds\n\n\nclass TfdsTask(Task):\n \"\"\"A `Task` that uses TensorFlow Datasets to provide the input dataset.\"\"\"\n\n def __init__(\n self,\n name,\n tfds_name,\n text_preprocessor,\n sentencepiece_model_path,\n metric_fns,\n tfds_data_dir=None,\n splits=None,\n **task_kwargs):\n \"\"\"TfdsTask constructor.\n\n Args:\n name: string, a unique name for the Task. A ValueError will be raised if\n another task with this name is already registered.\n tfds_name: string, the name and version number of a TFDS dataset,\n optionally with a config.\n text_preprocessor: a function (or list of functions) that (each) takes in\n a tf.data.Dataset of string features and returns a tf.data.Dataset of\n string features. Can be set to None as a no-op. If a list is given,\n they will be executed sequentially.\n sentencepiece_model_path: string, path to a SentencePiece model file to\n use for tokenization.\n metric_fns: list(callable), list of metric functions with the signature\n metric_fn(targets, predictions) to use during evaluation.\n tfds_data_dir: string, an optional path to a specific TFDS data directory\n to use.\n splits: list(string) or None, a list of allowable splits to load. The\n default, None, uses all available splits from the TFDS dataset info.\n **task_kwargs: dict, additional keyword arguments for the parent `Task`\n class.\n \"\"\"\n if \":\" not in tfds_name:\n raise ValueError(\n \"TFDS name must contain a version number, got: %s\" % tfds_name)\n\n self._tfds_dataset = LazyTfdsLoader(tfds_name, tfds_data_dir)\n\n def dataset_fn(split, shuffle_files):\n return self._tfds_dataset.load(split, shuffle_files)\n\n super(TfdsTask, self).__init__(\n name,\n dataset_fn=dataset_fn,\n splits=splits,\n text_preprocessor=text_preprocessor,\n sentencepiece_model_path=sentencepiece_model_path,\n metric_fns=metric_fns,\n **task_kwargs)\n\n @property\n def splits(self):\n \"\"\"Override since we can't call `info.splits` until after init.\"\"\"\n return self._splits or self._tfds_dataset.info.splits\n\n @property\n def tfds_dataset(self):\n return self._tfds_dataset\n\n def num_input_examples(self, split):\n return self.tfds_dataset.size(split)\n\n\nclass TextLineTask(Task):\n \"\"\"A `Task` that reads text lines as input.\n\n Requires a text_processor to be passed that takes a tf.data.Dataset of\n strings and returns a tf.data.Dataset of feature dictionaries.\n e.g. preprocessors.preprocess_tsv()\n \"\"\"\n\n def __init__(\n self,\n name,\n text_preprocessor,\n sentencepiece_model_path,\n metric_fns,\n splits,\n **task_kwargs):\n \"\"\"TfdsTask constructor.\n\n Args:\n name: string, a unique name for the Task. A ValueError will be raised if\n another task with this name is already registered.\n text_preprocessor: a function (or list of functions) that (each) takes in\n a tf.data.Dataset of string features and returns a tf.data.Dataset of\n string features. Can be set to None as a no-op. If a list is given,\n they will be executed sequentially.\n sentencepiece_model_path: string, path to a SentencePiece model file to\n use for tokenization.\n metric_fns: list(callable), list of metric functions with the signature\n metric_fn(targets, predictions) to use during evaluation.\n splits: dict of string (split name) to string (filename)\n **task_kwargs: dict, additional keyword arguments for the parent `Task`\n class.\n \"\"\"\n self._split_to_filename = splits\n def dataset_fn(split, shuffle_files):\n del shuffle_files\n filename = self._split_to_filename[split]\n return tf.data.TextLineDataset(filename)\n super(TextLineTask, self).__init__(\n name,\n dataset_fn=dataset_fn,\n splits=self._split_to_filename.keys(),\n text_preprocessor=text_preprocessor,\n sentencepiece_model_path=sentencepiece_model_path,\n metric_fns=metric_fns,\n **task_kwargs)\n\n\nclass TaskRegistry(DatasetProviderRegistry):\n _REGISTRY = {}\n _PROVIDER_TYPE = Task\n\n @classmethod\n def add(cls, name, task_cls=Task, **kwargs):\n super(TaskRegistry, cls).add(name, task_cls, name, **kwargs)\n\n\n# ================================ Mixtures ====================================\nclass Mixture(DatasetProviderBase):\n \"\"\"Class for mixing multiple tasks.\"\"\"\n\n def __init__(self, tasks, default_rate=None):\n \"\"\"Mixture constructor.\n\n A mixture specifies a set of tasks with associated mixing rates.\n\n Mixing happens on preprocessed tokenized examples.\n\n The mixing rates represent relative numbers of examples to use from their\n associated tasks. Setting the mixing rates to be equal to the numbers of\n examples in the tasks will result in each task going through an epoch in\n about the same amount of time - i.e. all examples are sampled equally across\n all tasks.\n\n Rates can be expressed either as absolute numbers or as functions that\n receive the Task as an argument.\n\n Args:\n tasks: a list where each element is either a string (task name) or a\n pair whose first element is the task name and whose second element\n is either a float (rate) or a function from Task to float.\n default_rate: a float or a function from Task to float. This specifies the\n default rate if rates are not provided in the `tasks` argument.\n \"\"\"\n self._task_to_rate = {}\n self._tasks = []\n for t in tasks:\n if isinstance(t, str):\n task_name = t\n rate = default_rate\n if default_rate is None:\n raise ValueError(\"need a rate for each task\")\n else:\n task_name, rate = t\n self._tasks.append(TaskRegistry.get(task_name))\n self._task_to_rate[task_name] = rate\n if len(set(tuple(t.output_features) for t in self._tasks)) != 1:\n raise ValueError(\n \"All Tasks in a Mixture must have the same output features.\"\n )\n if len(set(t.sentencepiece_model_path for t in self._tasks)) != 1:\n raise ValueError(\n \"All Tasks in a Mixture must have the same sentencepiece_model_path.\"\n )\n\n @property\n def tasks(self):\n return self._tasks\n\n def get_rate(self, task):\n rate = self._task_to_rate[task.name]\n return float(rate(task) if callable(rate) else rate)\n\n def num_input_examples(self, split):\n return sum(t.num_input_examples(split) for t in self.tasks)\n\n @property\n def output_features(self):\n # We require all tasks to have the same output_features in __init__\n # so we can just get the output_features for the 0th task\n return self._tasks[0].output_features\n\n @property\n def sentencepiece_model_path(self):\n # We require all tasks to have the same sentencepiece_model_path in __init__\n # so we can just get the sentencepiece_model_path for the first task\n return self._tasks[0].sentencepiece_model_path\n\n def get_vocabulary(self):\n \"\"\"Returns a SentencePieceVocabulary object using the Tasks' model.\"\"\"\n return self._tasks[0].get_vocabulary()\n\n def get_dataset(\n self,\n sequence_length,\n split=tfds.Split.TRAIN,\n use_cached=False,\n shuffle=True,\n compute_stats_empirically=False,\n ):\n \"\"\"Returns the dataset of mixed tasks using the object-specified rates.\n\n Args:\n sequence_length: dict mapping feature key to int length for that feature\n split: string, the split to return for all tasks.\n use_cached: bool, whether to use the cached dataset instead of processing\n it on the fly. Defaults to True.\n shuffle: bool, whether to shuffle the dataset. Only used when generating\n on the fly (use_cached=False).\n compute_stats_empirically: a boolean - does not work on TPU\n \"\"\"\n tasks = []\n for task in self.tasks:\n if split not in task.splits:\n logging.info(\n \"Task %s has no '%s' split, skipping.\", task.name, split\n )\n continue\n tasks.append(task)\n if not tasks:\n raise ValueError(\"No datasets have a '{}' split\".format(split))\n def filter_features(ex):\n return {k: v for k, v in ex.items() if k in self.output_features}\n datasets = [\n task.get_dataset(sequence_length, split, use_cached, shuffle=shuffle) # pylint:disable=g-complex-comprehension\n .repeat()\n .map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n for task in tasks]\n rates = [self.get_rate(task) for task in tasks]\n # Sample from the dataset with the rates rates\n dataset = tf.data.experimental.sample_from_datasets(datasets, rates)\n if split == \"train\" and use_cached:\n _log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,\n compute_stats_empirically)\n return dataset\n\n# Functions to be used as mixing rates:\n\n\[email protected]\ndef rate_num_examples(task, maximum=None, temperature=1.0, scale=1.0):\n \"\"\"Mixing rate equal to the number of examples for the task.\"\"\"\n # TODO(adarob): Support case when there are no cached stats.\n ret = task.get_cached_stats(\"train\")[\"examples\"]\n ret *= scale\n if maximum:\n ret = min(ret, maximum)\n if temperature != 1.0:\n ret = ret ** (1.0 / temperature)\n return ret\n\n\[email protected]\ndef rate_unsupervised(task, value=1e6):\n \"\"\"Gin-configurable mixing rate for the unsupervised co-training task.\"\"\"\n del task\n return value\n\n\ndef _log_padding_fractions(dataset, sequence_length, num_examples=100):\n \"\"\"Empirically compute the fraction of padding - log the results.\n\n Args:\n dataset: a tf.data.Dataset\n sequence_length: dict from string to int (packed lengths)\n num_examples: an integer\n \"\"\"\n logging.info(\"computing padding fractions\")\n keys = sequence_length.keys()\n padding_frac = {k: 0 for k in keys}\n for ex in tfds.as_numpy(dataset.take(num_examples)):\n for k in keys:\n padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))\n for k in keys:\n logging.info(\"%s padding fraction = %g\", k, padding_frac[k])\n\n\ndef _log_mixing_proportions(\n tasks, datasets, rates, mixed_dataset,\n sequence_length, compute_stats_empirically):\n \"\"\"Log information about the mixing proportions.\n\n Called from Mixture.get_dataset.\n\n Args:\n tasks: a list of Task\n datasets: a list of tf.data.Dataset\n rates: a list of floats\n mixed_dataset: a tf.data.Dataset\n sequence_length: dict from string to int (packed lengths)\n compute_stats_empirically: a boolean - does not work on TPU\n \"\"\"\n def _normalize(l):\n denom = sum(l)\n return [x / denom for x in l]\n # compute some stats about the mixture\n examples_fraction = _normalize(rates)\n if compute_stats_empirically:\n stats_examples = 100\n mean_inputs_length = []\n mean_targets_length = []\n for dataset in datasets:\n inputs_sum = 0\n targets_sum = 0\n for ex in tfds.as_numpy(dataset.take(stats_examples)):\n inputs_sum += ex[\"inputs\"].size\n targets_sum += ex[\"targets\"].size\n mean_inputs_length.append(inputs_sum / float(stats_examples))\n mean_targets_length.append(targets_sum / float(stats_examples))\n else:\n def _estimated_mean_length(task, key):\n if task.token_preprocessor:\n return sequence_length[key]\n else:\n return min(sequence_length[key],\n (task.get_cached_stats(\"train\")[key + \"_tokens\"] /\n task.get_cached_stats(\"train\")[\"examples\"]))\n mean_inputs_length = [_estimated_mean_length(task, \"inputs\")\n for task in tasks]\n mean_targets_length = [_estimated_mean_length(task, \"targets\")\n for task in tasks]\n inputs_fraction = _normalize(\n [l * r for l, r in zip(mean_inputs_length, rates)])\n targets_fraction = _normalize(\n [l * r for l, r in zip(mean_targets_length, rates)])\n logging.info(\"%12s %12s %12s %12s %12s %12s %s\",\n \"rate\", \"ex.frac.\", \"inp.frac.\", \"tgt.frac.\",\n \"inp.len.\", \"tgt.len\", \"task\")\n for i in range(len(rates)):\n logging.info(\"%12g %12g %12g %12g %12g %12g %s\",\n rates[i], examples_fraction[i],\n inputs_fraction[i], targets_fraction[i],\n mean_inputs_length[i], mean_targets_length[i],\n tasks[i].name)\n if compute_stats_empirically:\n _log_padding_fractions(mixed_dataset, sequence_length)\n\n\nclass MixtureRegistry(DatasetProviderRegistry):\n _REGISTRY = {}\n _PROVIDER_TYPE = Mixture\n\n @classmethod\n def add(cls, name, tasks, default_rate=None):\n super(MixtureRegistry, cls).add(name, Mixture, tasks, default_rate)\n\n\ndef get_mixture_or_task(task_or_mixture_name):\n \"\"\"Return the Task or Mixture from the appropriate registry.\"\"\"\n mixtures = MixtureRegistry.names()\n tasks = TaskRegistry.names()\n if task_or_mixture_name in mixtures:\n if task_or_mixture_name in tasks:\n logging.warning(\"%s is both a Task and a Mixture, returning Mixture\",\n task_or_mixture_name)\n return MixtureRegistry.get(task_or_mixture_name)\n if task_or_mixture_name in tasks:\n return TaskRegistry.get(task_or_mixture_name)\n else:\n raise ValueError(\"No Task or Mixture found with name: %s\" %\n task_or_mixture_name)\n\n\ndef get_subtasks(task_or_mixture):\n \"\"\"Returns all the Tasks in a Mixture as a list or the Task itself.\"\"\"\n if isinstance(task_or_mixture, Task):\n return [task_or_mixture]\n else:\n return task_or_mixture.tasks\n\n\ndef _validate_args(fn, expected_pos_args):\n \"\"\"Ensure function has exactly expected positional args.\"\"\"\n argspec = inspect.getargspec(fn)\n expected_pos_args = tuple(expected_pos_args)\n actual_args = tuple(argspec.args)\n if actual_args[:len(expected_pos_args)] != expected_pos_args:\n raise ValueError(\n \"'%s' must have positional args %s, got: %s\" % (\n fn.__name__, expected_pos_args, actual_args))\n actual_pos_args = tuple(\n argspec.args[:-len(argspec.defaults)]\n if argspec.defaults else argspec.args)\n if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:\n raise ValueError(\n \"'%s' may only have positional args %s, got: %s\" % (\n fn.__name__, expected_pos_args, actual_pos_args))\n"
]
| [
[
"tensorflow.compat.v1.io.FixedLenFeature",
"tensorflow.compat.v1.io.gfile.exists",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.data.get_output_types",
"tensorflow.compat.v1.data.get_output_shapes",
"tensorflow.compat.v1.compat.as_bytes",
"tensorflow.compat.v1.io.gfile.GFile",
"tensorflow.compat.v1.data.experimental.sample_from_datasets",
"tensorflow.compat.v1.train.Int64List",
"tensorflow.compat.v1.io.FixedLenSequenceFeature",
"tensorflow.compat.v1.strings.join",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.data.TextLineDataset",
"tensorflow.compat.v1.train.Features",
"tensorflow.compat.v1.constant"
]
]
|
2019ZSS/FER | [
"49b8b400680bb1c6723394a7fc1396e55d643315"
]
| [
"trainers/tta_trainer.py"
]
| [
"\"\"\"this class build and run a trainer by a configuration\"\"\"\nimport os\nimport sys\nimport shutil\nimport datetime\nimport traceback\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport matplotlib.pyplot as plt\nfrom torchvision.transforms import transforms\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom utils.radam import RAdam\n\n# from torch.optim import Adam as RAdam\n# from torch.optim import SGD as RAdam\n\nfrom utils.metrics.segment_metrics import eval_metrics\nfrom utils.metrics.metrics import accuracy\nfrom utils.generals import make_batch\n\n\nEMO_DICT = {0: \"ne\", 1: \"an\", 2: \"di\", 3: \"fe\", 4: \"ha\", 5: \"sa\", 6: \"su\"}\n\n\nclass Trainer(object):\n \"\"\"base class for trainers\"\"\"\n\n def __init__(self):\n pass\n\n\nclass FER2013Trainer(Trainer):\n \"\"\"for classification task\"\"\"\n\n def __init__(self, model, train_set, val_set, test_set, configs):\n super().__init__()\n print(\"Start trainer..\")\n print(configs)\n\n # load config\n self._configs = configs\n self._lr = self._configs[\"lr\"]\n self._batch_size = self._configs[\"batch_size\"]\n self._momentum = self._configs[\"momentum\"]\n self._weight_decay = self._configs[\"weight_decay\"]\n self._distributed = self._configs[\"distributed\"]\n self._num_workers = self._configs[\"num_workers\"]\n self._device = torch.device(self._configs[\"device\"])\n self._max_epoch_num = self._configs[\"max_epoch_num\"]\n self._max_plateau_count = self._configs[\"max_plateau_count\"]\n self._min_lr = self._configs[\"min_lr\"] if \"min_lr\" in self._configs else 1e-6\n # load dataloader and model\n self._train_set = train_set\n self._val_set = val_set\n self._test_set = test_set\n if self._configs[\"arch\"] in [\"lightcnn_ag\", \"lightcnn_at\", \"resnet_at\"]:\n kw = configs[\"model_kw\"] if 'model_kw' in configs else {}\n self._model = model(\n in_channels=configs[\"in_channels\"],\n num_classes=configs[\"num_classes\"],\n weight_path=configs[\"weight_path\"] if \"weight_path\" in configs else \"\",\n **kw, \n )\n else:\n self._model = model(\n in_channels=configs[\"in_channels\"],\n num_classes=configs[\"num_classes\"],\n weight_path=configs[\"weight_path\"] if \"weight_path\" in configs else \"\", \n )\n\n # self._model.fc = nn.Linear(512, 7)\n # self._model.fc = nn.Linear(256, 7)\n self._model = self._model.to(self._device)\n\n if self._distributed == 1:\n torch.distributed.init_process_group(backend=\"nccl\")\n self._model = nn.parallel.DistributedDataParallel(self._model)\n\n self._train_loader = DataLoader(\n self._train_set,\n batch_size=self._batch_size,\n num_workers=self._num_workers,\n pin_memory=True,\n shuffle=True,\n worker_init_fn=lambda x: np.random.seed(x),\n )\n self._val_loader = DataLoader(\n self._val_set,\n batch_size=self._batch_size,\n num_workers=self._num_workers,\n pin_memory=True,\n shuffle=False,\n worker_init_fn=lambda x: np.random.seed(x),\n )\n\n self._test_loader = DataLoader(\n self._test_set,\n batch_size=1,\n num_workers=self._num_workers,\n pin_memory=True,\n shuffle=False,\n worker_init_fn=lambda x: np.random.seed(x),\n )\n else:\n self._train_loader = DataLoader(\n self._train_set,\n batch_size=self._batch_size,\n num_workers=self._num_workers,\n pin_memory=True,\n shuffle=True,\n )\n self._val_loader = DataLoader(\n self._val_set,\n batch_size=self._batch_size,\n num_workers=self._num_workers,\n pin_memory=True,\n shuffle=False,\n )\n self._test_loader = DataLoader(\n self._test_set,\n batch_size=1,\n num_workers=self._num_workers,\n pin_memory=True,\n shuffle=False,\n )\n\n # define loss function (criterion) and optimizer\n class_weights = [\n 1.02660468,\n 9.40661861,\n 1.00104606,\n 0.56843877,\n 0.84912748,\n 1.29337298,\n 0.82603942,\n ]\n class_weights = torch.FloatTensor(np.array(class_weights))\n\n if self._configs[\"weighted_loss\"] == 0:\n self._criterion = nn.CrossEntropyLoss().to(self._device)\n else:\n self._criterion = nn.CrossEntropyLoss(class_weights).to(self._device)\n\n self._optimizer = RAdam(\n params=self._model.parameters(),\n lr=self._lr,\n weight_decay=self._weight_decay,\n )\n\n self._scheduler = ReduceLROnPlateau(\n self._optimizer,\n patience=self._configs[\"plateau_patience\"],\n min_lr=self._min_lr,\n verbose=True,\n )\n\n \"\"\" TODO set step size equal to configs\n self._scheduler = StepLR(\n self._optimizer,\n step_size=self._configs['steplr']\n )\n \"\"\"\n\n # training info\n self._start_time = datetime.datetime.now()\n self._start_time = self._start_time.replace(microsecond=0)\n\n log_dir = os.path.join(\n self._configs[\"cwd\"],\n self._configs[\"log_dir\"],\n \"{}_{}_{}\".format(\n self._configs[\"arch\"],\n self._configs[\"model_name\"],\n self._start_time.strftime(\"%Y%b%d_%H.%M\"),\n ),\n )\n self._writer = SummaryWriter(log_dir)\n self._train_loss_list = []\n self._train_acc_list = []\n self._val_loss_list = []\n self._val_acc_list = []\n self._best_val_loss = 1e9\n self._best_val_acc = 0\n self._best_train_loss = 1e9\n self._best_train_acc = 0\n self._test_acc = 0.0\n self._plateau_count = 0\n self._current_epoch_num = 0\n\n # for checkpoints\n # really? \n # self._checkpoint_dir = os.path.join(self._configs[\"cwd\"], \"saved/checkpoints\")\n # if not os.path.exists(self._checkpoint_dir):\n # os.makedirs(self._checkpoint_dir, exist_ok=True)\n\n self._checkpoint_dir = os.path.join(self._configs[\"cwd\"], self._configs[\"checkpoint_dir\"])\n if not os.path.exists(self._checkpoint_dir):\n os.makedirs(self._checkpoint_dir, exist_ok=True)\n\n\n\n self._checkpoint_path = os.path.join(\n self._checkpoint_dir,\n \"{}_{}_{}\".format(\n self._configs[\"arch\"],\n self._configs[\"model_name\"],\n self._start_time.strftime(\"%Y%b%d_%H.%M\"),\n ),\n )\n\n def _train(self):\n self._model.train()\n train_loss = 0.0\n train_acc = 0.0\n\n for i, (images, targets) in tqdm(\n enumerate(self._train_loader), total=len(self._train_loader), leave=False\n ):\n images = images.cuda(non_blocking=True)\n targets = targets.cuda(non_blocking=True)\n\n # compute output, measure accuracy and record loss\n outputs = self._model(images)\n\n loss = self._criterion(outputs, targets)\n acc = accuracy(outputs, targets)[0]\n # acc = eval_metrics(targets, outputs, 2)[0]\n\n train_loss += loss.item()\n train_acc += acc.item()\n\n # compute gradient and do SGD step\n self._optimizer.zero_grad()\n loss.backward()\n self._optimizer.step()\n\n i += 1\n self._train_loss_list.append(train_loss / i)\n self._train_acc_list.append(train_acc / i)\n\n def _val(self):\n self._model.eval()\n val_loss = 0.0\n val_acc = 0.0\n\n with torch.no_grad():\n for i, (images, targets) in tqdm(\n enumerate(self._val_loader), total=len(self._val_loader), leave=False\n ):\n images = images.cuda(non_blocking=True)\n targets = targets.cuda(non_blocking=True)\n\n # compute output, measure accuracy and record loss\n outputs = self._model(images)\n\n loss = self._criterion(outputs, targets)\n acc = accuracy(outputs, targets)[0]\n\n val_loss += loss.item()\n val_acc += acc.item()\n\n i += 1\n self._val_loss_list.append(val_loss / i)\n self._val_acc_list.append(val_acc / i)\n\n def _calc_acc_on_private_test(self):\n self._model.eval()\n test_acc = 0.0\n print(\"Calc acc on private test..\")\n f = open(\"private_test_log.txt\", \"w\")\n with torch.no_grad():\n for i, (images, targets) in tqdm(\n enumerate(self._test_loader), total=len(self._test_loader), leave=False\n ):\n\n images = images.cuda(non_blocking=True)\n targets = targets.cuda(non_blocking=True)\n outputs = self._model(images)\n acc = accuracy(outputs, targets)[0]\n test_acc += acc.item()\n f.writelines(\"{}_{}\\n\".format(i, acc.item()))\n\n test_acc = test_acc / (i + 1)\n print(\"Accuracy on private test: {:.3f}\".format(test_acc))\n f.close()\n return test_acc\n\n def _calc_acc_on_private_test_with_tta(self):\n self._model.eval()\n test_acc = 0.0\n print(\"Calc acc on private test with tta..\")\n f = open(\n \"private_test_log_{}_{}.txt\".format(\n self._configs[\"arch\"], self._configs[\"model_name\"]\n ),\n \"w\",\n )\n\n with torch.no_grad():\n for idx in tqdm(\n range(len(self._test_set)), total=len(self._test_set), leave=False\n ):\n images, targets = self._test_set[idx]\n targets = torch.LongTensor([targets])\n\n images = make_batch(images)\n images = images.cuda(non_blocking=True)\n targets = targets.cuda(non_blocking=True)\n\n outputs = self._model(images)\n outputs = F.softmax(outputs, 1)\n\n # outputs.shape [tta_size, 7]\n outputs = torch.sum(outputs, 0)\n\n outputs = torch.unsqueeze(outputs, 0)\n # print(outputs.shape)\n # TODO: try with softmax first and see the change\n acc = accuracy(outputs, targets)[0]\n test_acc += acc.item()\n f.writelines(\"{}_{}\\n\".format(idx, acc.item()))\n\n test_acc = test_acc / (idx + 1)\n print(\"Accuracy on private test with tta: {:.3f}\".format(test_acc))\n f.close()\n return test_acc\n\n def train(self):\n \"\"\"make a training job\"\"\"\n # print(self._model)\n\n # freeze the model\n\n \"\"\"\n print('=' * 10)\n for idx, child in enumerate(self._model.children()):\n if idx < 6:\n print(child)\n print('=' * 10)\n \n for m in child.parameters():\n m.requires_grad = False\n \"\"\"\n\n # exit(0)\n\n try:\n while not self._is_stop():\n self._increase_epoch_num()\n self._train()\n self._val()\n\n self._update_training_state()\n self._logging()\n except KeyboardInterrupt:\n traceback.print_exc()\n pass\n\n # training stop\n try:\n state = torch.load(self._checkpoint_path + '_{}.pth'.format('best'))\n if self._distributed:\n self._model.module.load_state_dict(state[\"net\"])\n else:\n self._model.load_state_dict(state[\"net\"])\n\n if not self._test_set.is_tta():\n self._test_acc = self._calc_acc_on_private_test()\n else:\n self._test_acc = self._calc_acc_on_private_test_with_tta()\n\n # self._test_acc = self._calc_acc_on_private_test()\n self._save_weights(pth_name='best')\n except Exception as e:\n traceback.print_exc()\n pass\n\n consume_time = str(datetime.datetime.now() - self._start_time)\n self._writer.add_text(\n \"Summary\",\n \"Converged after {} epochs, consume {}\".format(\n self._current_epoch_num, consume_time[:-7]\n ),\n )\n self._writer.add_text(\n \"Results\", \"Best validation accuracy: {:.3f}\".format(self._best_val_acc)\n )\n self._writer.add_text(\n \"Results\", \"Best training accuracy: {:.3f}\".format(self._best_train_acc)\n )\n self._writer.add_text(\n \"Results\", \"Private test accuracy: {:.3f}\".format(self._test_acc)\n )\n self._writer.close()\n\n def _update_training_state(self):\n if self._val_acc_list[-1] > self._best_val_acc:\n self._save_weights(pth_name='best')\n self._plateau_count = 0\n self._best_val_acc = self._val_acc_list[-1]\n self._best_val_loss = self._val_loss_list[-1]\n self._best_train_acc = self._train_acc_list[-1]\n self._best_train_loss = self._train_loss_list[-1]\n else:\n self._plateau_count += 1\n\n # self._scheduler.step(self._train_loss_list[-1])\n self._scheduler.step(100 - self._val_acc_list[-1])\n # self._scheduler.step()\n\n def _logging(self):\n consume_time = str(datetime.datetime.now() - self._start_time)\n\n message = \"\\nE{:03d} {:.3f}/{:.3f}/{:.3f} {:.3f}/{:.3f}/{:.3f} | p{:02d} Time {}\\n\".format(\n self._current_epoch_num,\n self._train_loss_list[-1],\n self._val_loss_list[-1],\n self._best_val_loss,\n self._train_acc_list[-1],\n self._val_acc_list[-1],\n self._best_val_acc,\n self._plateau_count,\n consume_time[:-7],\n )\n\n self._writer.add_scalar(\n \"Accuracy/Train\", self._train_acc_list[-1], self._current_epoch_num\n )\n self._writer.add_scalar(\n \"Accuracy/Val\", self._val_acc_list[-1], self._current_epoch_num\n )\n self._writer.add_scalar(\n \"Loss/Train\", self._train_loss_list[-1], self._current_epoch_num\n )\n self._writer.add_scalar(\n \"Loss/Val\", self._val_loss_list[-1], self._current_epoch_num\n )\n\n print(message)\n\n def _is_stop(self):\n \"\"\"check stop condition\"\"\"\n return (\n self._plateau_count > self._max_plateau_count\n or self._current_epoch_num > self._max_epoch_num\n )\n\n def _increase_epoch_num(self):\n self._current_epoch_num += 1\n\n def _save_weights(self, test_acc=0.0, pth_name='final'):\n if self._distributed == 0:\n state_dict = self._model.state_dict()\n else:\n state_dict = self._model.module.state_dict()\n\n state = {\n **self._configs,\n \"net\": state_dict,\n \"best_val_loss\": self._best_val_loss,\n \"best_val_acc\": self._best_val_acc,\n \"best_train_loss\": self._best_train_loss,\n \"best_train_acc\": self._best_train_acc,\n \"train_losses\": self._train_loss_list,\n \"val_loss_list\": self._val_loss_list,\n \"train_acc_list\": self._train_acc_list,\n \"val_acc_list\": self._val_acc_list,\n \"test_acc\": self._test_acc,\n }\n\n torch.save(state, self._checkpoint_path + '_{}.pth'.format(pth_name))\n"
]
| [
[
"torch.LongTensor",
"torch.nn.functional.softmax",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.distributed.init_process_group",
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.unsqueeze",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"numpy.array",
"torch.nn.parallel.DistributedDataParallel"
]
]
|
WMD-group/PyTASER | [
"12b4e3203698b7e6f03457022897405023d6f239"
]
| [
"pytaser/kpoints.py"
]
| [
"import numpy as np\nfrom pymatgen.core import Structure\n\n\ndef get_kpoint_weights(bandstructure, time_reversal=True, symprec=0.1):\n \"\"\"\n Function to calculate the kpoint_weights for non-magnetic materials (non-metals).\n\n Args:\n bandstructure: PMG bandstructure object\n time_reversal:\n symprec: Symmetry precision in Angstrom.(Lower value is more precise, but\n computationally more expensive)\n Returns:\n k-point_weights\n \"\"\"\n\n kpoints = get_kpoints_from_bandstructure(bandstructure)\n _, _, _, _, _, kp_mapping = expand_kpoints(\n bandstructure.structure,\n kpoints,\n symprec=symprec,\n time_reversal=time_reversal,\n return_mapping=True,\n )\n weights = np.unique(kp_mapping, return_counts=True)[1].astype(float)\n weights /= np.sum(weights)\n return weights\n\n\ndef get_kpoints_from_bandstructure(bandstructure, cartesian=False):\n if cartesian:\n kpoints = np.array([k.cart_coords for k in bandstructure.kpoints])\n else:\n kpoints = np.array([k.frac_coords for k in bandstructure.kpoints])\n\n return kpoints\n\n\ndef expand_kpoints(\n structure,\n kpoints,\n symprec=0.01,\n return_mapping=False,\n time_reversal=True,\n):\n kpoints = np.array(kpoints).round(8)\n\n # due to limited input precision of the k-points, the mesh is returned as a float\n mesh, is_shifted = get_mesh_from_kpoint_diff(kpoints)\n\n if is_shifted:\n shift = np.array([1, 1, 1])\n else:\n shift = np.array([0, 0, 0])\n\n # to avoid issues to limited input precision, recalculate the input k-points\n # so that the mesh is integer and the k-points are not truncated\n # to a small precision\n addresses = np.rint((kpoints + shift / (mesh * 2)) * mesh)\n mesh = np.rint(mesh)\n kpoints = addresses / mesh - shift / (mesh * 2)\n\n rotations, translations, is_tr = get_reciprocal_point_group_operations(\n structure, symprec=symprec, time_reversal=time_reversal\n )\n len(rotations)\n # rotate all-kpoints\n all_rotated_kpoints = []\n for r in rotations:\n all_rotated_kpoints.append(np.dot(r, kpoints.T).T)\n all_rotated_kpoints = np.concatenate(all_rotated_kpoints)\n\n # map to first BZ\n all_rotated_kpoints -= np.rint(all_rotated_kpoints)\n all_rotated_kpoints = all_rotated_kpoints.round(8)\n\n # zone boundary consistent with VASP not with spglib\n all_rotated_kpoints[all_rotated_kpoints == -0.5] = 0.5\n\n # Find unique points\n unique_rotated_kpoints, unique_idxs = np.unique(\n all_rotated_kpoints, return_index=True, axis=0\n )\n\n # find integer addresses\n unique_addresses = (unique_rotated_kpoints + shift / (mesh * 2)) * mesh\n unique_addresses -= np.rint(unique_addresses)\n in_uniform_mesh = (np.abs(unique_addresses) < 1e-5).all(axis=1)\n\n n_mapped = int(np.sum(in_uniform_mesh))\n n_expected = int(np.product(mesh))\n if n_mapped != n_expected:\n raise ValueError(f\"Expected {n_expected} points but found {n_mapped}\")\n\n full_kpoints = unique_rotated_kpoints[in_uniform_mesh]\n full_idxs = unique_idxs[in_uniform_mesh]\n\n if not return_mapping:\n return full_kpoints\n\n op_mapping = np.floor(full_idxs / len(kpoints)).astype(int)\n kp_mapping = (full_idxs % len(kpoints)).astype(int)\n\n return full_kpoints, rotations, translations, is_tr, op_mapping, kp_mapping\n\n\ndef get_mesh_from_kpoint_diff(kpoints, ktol=1e-5):\n kpoints = np.array(kpoints)\n\n # whether the k-point mesh is shifted or Gamma centered mesh\n is_shifted = np.min(np.linalg.norm(kpoints, axis=1)) > 1e-6\n\n unique_a = np.unique(kpoints[:, 0])\n unique_b = np.unique(kpoints[:, 1])\n unique_c = np.unique(kpoints[:, 2])\n\n if len(unique_a) == 1:\n na = 1\n else:\n # filter very small changes, with a tol of 5e-4 this means k-point meshes\n # denser than 2000x2000x2000 will be treated as numerical noise. Meshes\n # this dense are extremely unlikely\n diff = np.diff(unique_a)\n diff = diff[diff > ktol]\n na = 1 / np.min(diff[diff > ktol])\n\n if len(unique_b) == 1:\n nb = 1\n else:\n diff = np.diff(unique_b)\n nb = 1 / np.min(diff[diff > ktol])\n\n if len(unique_c) == 1:\n nc = 1\n else:\n diff = np.diff(unique_c)\n nc = 1 / np.min(diff[diff > ktol])\n\n # due to limited precision of the input k-points, the mesh is returned as a float\n return np.array([na, nb, nc]), is_shifted\n\n\ndef get_reciprocal_point_group_operations(\n structure: Structure,\n symprec: float = 0.01,\n time_reversal: bool = True,\n):\n from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n\n sga = SpacegroupAnalyzer(structure, symprec=symprec)\n if sga.get_symmetry_dataset() is None:\n # sometimes default angle tolerance doesn't work as expected\n sga = SpacegroupAnalyzer(structure, symprec=symprec, angle_tolerance=-1)\n\n rotations = sga.get_symmetry_dataset()[\"rotations\"].transpose((0, 2, 1))\n translations = sga.get_symmetry_dataset()[\"translations\"]\n is_tr = np.full(len(rotations), False, dtype=bool)\n\n if time_reversal:\n rotations = np.concatenate([rotations, -rotations])\n translations = np.concatenate([translations, -translations])\n is_tr = np.concatenate([is_tr, ~is_tr])\n\n rotations, unique_ops = np.unique(rotations, axis=0, return_index=True)\n translations = translations[unique_ops]\n is_tr = is_tr[unique_ops]\n\n # put identity first and time-reversal last\n sort_idx = np.argsort(np.abs(rotations - np.eye(3)).sum(axis=(1, 2)) + is_tr * 10)\n\n return rotations[sort_idx], translations[sort_idx], is_tr[sort_idx]\n"
]
| [
[
"numpy.dot",
"numpy.product",
"numpy.abs",
"numpy.unique",
"numpy.min",
"numpy.eye",
"numpy.rint",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.diff",
"numpy.array",
"numpy.sum"
]
]
|
rvk007/Monocular-Depth-Estimation | [
"c53b89a74cad5554bde7944357d6026e64c7dcdd"
]
| [
"deepnet/model/models/masknet.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom deepnet.model.learner import Model\n\nclass MaskNet3(nn.Module):\n def __init__(self):\n \"\"\"Creates Masknet-3\"\"\"\n super(MaskNet3, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=3, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=3, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n\n self.layer3= nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 1, kernel_size=1)\n )\n \n\n def forward(self, x):\n bg = x['bg']\n bg_fg = x['bg_fg']\n bg = self.layer1(bg)\n bg_fg = self.layer2(bg_fg)\n\n out = torch.cat([bg, bg_fg], dim=1)\n out = self.layer3(out)\n\n return out\n\n def learner(self, model, tensorboard, dataset_train, train_loader, test_loader, device, optimizer, criterion, epochs, metrics, callbacks):\n \"\"\"Trains the model\n Arguments:\n model: Model to trained and validated\n tensorboard: Tensorboard instance for visualization\n dataset_train: Dataset training instance\n train_loader: Dataloader containing train data on the GPU/ CPU\n test_loader: Dataloader containing test data on the GPU/ CPU \n device: Device on which model will be trained (GPU/CPU)\n optimizer: optimizer for the model\n criterion: Loss function\n epochs: Number of epochs to train the model\n metrics(bool): If metrics is to be displayed or not\n (default: False)\n callbacks: Scheduler to be applied on the model\n (default : None)\n \"\"\"\n\n learn = Model(model, tensorboard, dataset_train, train_loader, test_loader, device, optimizer, criterion, epochs, metrics, callbacks)\n self.result = learn.fit()\n\n @property\n def results(self):\n \"\"\"Returns model results\"\"\"\n return self.result"
]
| [
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.cat"
]
]
|
euikook/python-moving-average-examples | [
"d721fdb8a49faa6e9cf84ded3510185b42e7d8dc"
]
| [
"weight.py"
]
| [
"import numpy as np\n\nimport pandas as pd\n\nfrom matplotlib import rcParams\nrcParams['font.family'] = 'monospace'\nrcParams['font.sans-serif'] = ['Tahoma']\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import (MultipleLocator, AutoMinorLocator)\n\n\n# smoothing factor and number of data points\nN = 15\nALPHA = 2/(N+1)\n\nNUM_X = N * 4\n\n# simple moving average\nsma = [0 if i > N else 1/N for i in range(NUM_X)]\n\n# weighted moving average\nwma = [0 if i > N else (N - i)/(N * (N + 1) / 2) for i in range(NUM_X)]\n\n# exponential moving average alpha=2/(N+1)\nema = [ALPHA*(1-ALPHA)**i for i in range(NUM_X)]\n\n# store the values in a data frame \npd.DataFrame({'sma': sma, 'wma': wma, 'ema': ema}).plot(kind='bar', figsize=(16,9))\n\nplt.xticks(np.arange(0, NUM_X, 5), rotation=0)\nplt.yticks(fontsize=10)\n\nplt.legend(labels=[\n 'SMA', \n 'WMA',\n 'EMA, α=2/(N+1)'\n ], fontsize=12)\n\n# title and labels\nplt.title(f'Weights of Moving Average(N={N})', fontsize=14)\nplt.ylabel('Weights', fontsize=12)\nplt.xlabel('n-th Most Recent Smple', fontsize=12)\nplt.tight_layout()\n\nplt.savefig('images/weight.svg', format='svg')\n#plt.show()\n\n"
]
| [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel"
]
]
|
pbmanis/pylibrary | [
"d6cb41386cd39b7a1b6678a71a704f3b9d09faef"
]
| [
"pylibrary/plotting/colormaps/PuBuGnYl_r.py"
]
| [
"\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom numpy import nan, inf\n\n# Used to reconstruct the colormap in pycam02ucs.cm.viscm\nparameters = {'xp': [22.674387857633945, 11.221508276482126, -14.356589454756971, -47.188177587392218, -34.590010048125208, 0.15039134803535603],\n 'yp': [-20.102530541012214, -33.082460732984288, -42.24476439790574, -5.5955497382198871, 42.5065445026178, 24.563699825479944],\n 'min_JK': 18.8671875,\n 'max_JK': 92.5}\n\ncm_data = [[ 0.26700401, 0.00487433, 0.32941519],\n [ 0.2685542 , 0.00957471, 0.33533275],\n [ 0.27003443, 0.01455696, 0.34119436],\n [ 0.27144484, 0.01982836, 0.34699773],\n [ 0.27278686, 0.02539504, 0.35273831],\n [ 0.27405872, 0.03126572, 0.35841766],\n [ 0.27526033, 0.03744784, 0.36403393],\n [ 0.27639307, 0.04380557, 0.36958299],\n [ 0.2774554 , 0.04993149, 0.37506557],\n [ 0.27844714, 0.05585994, 0.38048004],\n [ 0.27936893, 0.06162751, 0.38582368],\n [ 0.28022049, 0.0672627 , 0.39109518],\n [ 0.28100114, 0.07278804, 0.39629373],\n [ 0.28171081, 0.07822095, 0.40141769],\n [ 0.28235053, 0.0835745 , 0.40646432],\n [ 0.28291925, 0.08886094, 0.4114333 ],\n [ 0.2834169 , 0.09408964, 0.41632321],\n [ 0.28384358, 0.09926827, 0.42113253],\n [ 0.28420026, 0.10440257, 0.42585912],\n [ 0.28448608, 0.10949867, 0.43050257],\n [ 0.2847012 , 0.11456108, 0.43506157],\n [ 0.28484581, 0.11959358, 0.43953491],\n [ 0.28492117, 0.12459863, 0.44392086],\n [ 0.28492661, 0.12957963, 0.44821903],\n [ 0.28486252, 0.1345388 , 0.45242844],\n [ 0.28472931, 0.13947799, 0.45654822],\n [ 0.28452817, 0.14439827, 0.46057726],\n [ 0.28425933, 0.14930113, 0.46451508],\n [ 0.28392311, 0.15418778, 0.46836123],\n [ 0.28352022, 0.15905901, 0.47211522],\n [ 0.28305157, 0.16391535, 0.47577663],\n [ 0.28251884, 0.16875671, 0.479345 ],\n [ 0.28192206, 0.17358397, 0.48282047],\n [ 0.28126222, 0.17839731, 0.486203 ],\n [ 0.28054043, 0.18319677, 0.48949266],\n [ 0.27975811, 0.18798216, 0.49268964],\n [ 0.27891713, 0.19275301, 0.49579421],\n [ 0.27801794, 0.19750965, 0.49880695],\n [ 0.27706191, 0.20225183, 0.50172842],\n [ 0.27605049, 0.20697927, 0.50455929],\n [ 0.27498517, 0.21169165, 0.50730037],\n [ 0.27386887, 0.21638789, 0.50995262],\n [ 0.272702 , 0.22106829, 0.51251706],\n [ 0.27148621, 0.22573247, 0.51499482],\n [ 0.27022324, 0.23038004, 0.51738712],\n [ 0.26891491, 0.23501057, 0.51969528],\n [ 0.26756319, 0.2396236 , 0.52192074],\n [ 0.26617119, 0.24421813, 0.52406522],\n [ 0.26473951, 0.24879442, 0.52613012],\n [ 0.26327012, 0.25335212, 0.52811709],\n [ 0.26176496, 0.25789086, 0.53002785],\n [ 0.26022606, 0.26241031, 0.53186418],\n [ 0.25865541, 0.26691017, 0.5336279 ],\n [ 0.25705544, 0.27138997, 0.53532102],\n [ 0.25542898, 0.27584915, 0.53694571],\n [ 0.25377689, 0.28028798, 0.5385036 ],\n [ 0.25210122, 0.28470627, 0.53999668],\n [ 0.25040396, 0.28910388, 0.54142695],\n [ 0.24868713, 0.29348068, 0.54279642],\n [ 0.2469527 , 0.29783657, 0.54410709],\n [ 0.24520266, 0.3021715 , 0.545361 ],\n [ 0.24343892, 0.30648544, 0.54656016],\n [ 0.24166391, 0.31077823, 0.54770676],\n [ 0.23987982, 0.31504978, 0.54880295],\n [ 0.23808762, 0.3193005 , 0.54985035],\n [ 0.23628909, 0.3235305 , 0.5508509 ],\n [ 0.23448595, 0.32773993, 0.5518065 ],\n [ 0.23267989, 0.33192894, 0.55271901],\n [ 0.23087254, 0.33609773, 0.55359027],\n [ 0.22906545, 0.34024654, 0.55442204],\n [ 0.22726014, 0.34437561, 0.55521608],\n [ 0.22545806, 0.34848521, 0.55597407],\n [ 0.22366058, 0.35257566, 0.55669764],\n [ 0.22186902, 0.35664726, 0.55738837],\n [ 0.22008461, 0.36070037, 0.55804779],\n [ 0.21830854, 0.36473534, 0.55867737],\n [ 0.21654188, 0.36875256, 0.55927849],\n [ 0.21478568, 0.37275242, 0.55985251],\n [ 0.21304088, 0.37673533, 0.5604007 ],\n [ 0.21130836, 0.38070172, 0.56092426],\n [ 0.20958891, 0.38465203, 0.56142436],\n [ 0.20788325, 0.3885867 , 0.56190206],\n [ 0.20619204, 0.3925062 , 0.56235839],\n [ 0.20451584, 0.39641098, 0.56279428],\n [ 0.20285517, 0.40030153, 0.56321061],\n [ 0.20121044, 0.40417832, 0.56360819],\n [ 0.19958202, 0.40804183, 0.56398778],\n [ 0.19797019, 0.41189255, 0.56435004],\n [ 0.1963752 , 0.41573098, 0.56469559],\n [ 0.19479719, 0.41955759, 0.56502496],\n [ 0.19323628, 0.42337288, 0.56533864],\n [ 0.19169253, 0.42717733, 0.56563705],\n [ 0.19016593, 0.43097144, 0.56592052],\n [ 0.18865644, 0.43475567, 0.56618935],\n [ 0.18716399, 0.43853052, 0.56644376],\n [ 0.18568845, 0.44229644, 0.56668391],\n [ 0.1842297 , 0.44605392, 0.5669099 ],\n [ 0.18278756, 0.4498034 , 0.56712177],\n [ 0.18136186, 0.45354533, 0.56731952],\n [ 0.17995242, 0.45728016, 0.56750306],\n [ 0.17855907, 0.46100832, 0.56767227],\n [ 0.17718163, 0.46473023, 0.56782696],\n [ 0.17581998, 0.4684463 , 0.5679669 ],\n [ 0.17447399, 0.47215693, 0.5680918 ],\n [ 0.17314359, 0.4758625 , 0.56820133],\n [ 0.17182876, 0.47956339, 0.5682951 ],\n [ 0.17052956, 0.48325995, 0.56837267],\n [ 0.1692461 , 0.48695252, 0.56843358],\n [ 0.16797885, 0.49064139, 0.56847745],\n [ 0.16672853, 0.49432679, 0.56850396],\n [ 0.16549485, 0.49800916, 0.5685121 ],\n [ 0.16427838, 0.50168876, 0.56850123],\n [ 0.16307978, 0.50536586, 0.56847067],\n [ 0.16189991, 0.50904071, 0.56841971],\n [ 0.16073977, 0.51271352, 0.5683476 ],\n [ 0.15960057, 0.51638451, 0.56825356],\n [ 0.1584837 , 0.52005386, 0.56813679],\n [ 0.15739078, 0.52372174, 0.56799646],\n [ 0.15632364, 0.5273883 , 0.56783172],\n [ 0.15528438, 0.53105366, 0.56764168],\n [ 0.15427564, 0.53471788, 0.56742564],\n [ 0.15330014, 0.53838103, 0.56718273],\n [ 0.15236029, 0.54204327, 0.56691174],\n [ 0.1514593 , 0.54570462, 0.56661171],\n [ 0.15060068, 0.54936512, 0.56628168],\n [ 0.14978824, 0.55302477, 0.56592069],\n [ 0.14902614, 0.55668353, 0.56552775],\n [ 0.14831885, 0.56034138, 0.56510188],\n [ 0.14767117, 0.56399824, 0.5646421 ],\n [ 0.14708824, 0.56765403, 0.56414742],\n [ 0.14657555, 0.57130864, 0.56361689],\n [ 0.14613895, 0.57496191, 0.5630496 ],\n [ 0.14578423, 0.57861374, 0.56244438],\n [ 0.14551771, 0.58226396, 0.56180026],\n [ 0.14534595, 0.58591236, 0.56111627],\n [ 0.14527574, 0.58955873, 0.56039146],\n [ 0.14531405, 0.59320283, 0.55962489],\n [ 0.14546796, 0.5968444 , 0.55881564],\n [ 0.14574467, 0.60048316, 0.5579628 ],\n [ 0.14615138, 0.60411879, 0.55706548],\n [ 0.14669517, 0.607751 , 0.55612273],\n [ 0.14738303, 0.61137946, 0.55513357],\n [ 0.14822204, 0.61500377, 0.5540973 ],\n [ 0.14921896, 0.61862356, 0.55301312],\n [ 0.15038025, 0.6222384 , 0.55188028],\n [ 0.15171204, 0.62584787, 0.55069804],\n [ 0.15322007, 0.62945152, 0.54946571],\n [ 0.15490964, 0.63304888, 0.54818262],\n [ 0.15678555, 0.63663945, 0.54684816],\n [ 0.15885206, 0.64022273, 0.54546173],\n [ 0.16111284, 0.6437982 , 0.54402268],\n [ 0.16357079, 0.64736537, 0.54252996],\n [ 0.16622882, 0.6509236 , 0.54098368],\n [ 0.16908891, 0.65447229, 0.53938344],\n [ 0.17215248, 0.65801084, 0.53772891],\n [ 0.1754204 , 0.66153862, 0.53601978],\n [ 0.17889298, 0.66505497, 0.53425584],\n [ 0.18257005, 0.66855923, 0.53243691],\n [ 0.18645098, 0.67205072, 0.53056289],\n [ 0.19053468, 0.67552871, 0.52863375],\n [ 0.19481972, 0.6789925 , 0.52664953],\n [ 0.1993043 , 0.68244133, 0.52461034],\n [ 0.20398632, 0.68587445, 0.52251639],\n [ 0.20886365, 0.68929113, 0.52036688],\n [ 0.21393364, 0.69269051, 0.5181632 ],\n [ 0.21919356, 0.69607177, 0.51590585],\n [ 0.22464052, 0.69943407, 0.51359544],\n [ 0.23027153, 0.70277656, 0.51123265],\n [ 0.23608352, 0.70609837, 0.50881832],\n [ 0.24207335, 0.70939862, 0.50635339],\n [ 0.24823786, 0.71267639, 0.50383895],\n [ 0.25457385, 0.71593078, 0.50127621],\n [ 0.26107813, 0.71916085, 0.49866655],\n [ 0.2677475 , 0.72236566, 0.49601153],\n [ 0.27457878, 0.72554425, 0.49331285],\n [ 0.2815688 , 0.72869565, 0.49057242],\n [ 0.2887144 , 0.73181887, 0.48779233],\n [ 0.29601244, 0.73491295, 0.48497491],\n [ 0.30345981, 0.73797687, 0.48212268],\n [ 0.31105336, 0.74100964, 0.47923843],\n [ 0.31878998, 0.74401027, 0.47632519],\n [ 0.32666653, 0.74697776, 0.47338625],\n [ 0.33467983, 0.74991112, 0.47042521],\n [ 0.34282668, 0.75280936, 0.46744595],\n [ 0.35110384, 0.75567153, 0.46445269],\n [ 0.35950797, 0.75849666, 0.46144996],\n [ 0.36803567, 0.76128384, 0.45844266],\n [ 0.37668344, 0.76403218, 0.45543604],\n [ 0.38544765, 0.7667408 , 0.45243575],\n [ 0.39432454, 0.7694089 , 0.44944779],\n [ 0.40331021, 0.77203572, 0.44647859],\n [ 0.41240057, 0.77462055, 0.44353498],\n [ 0.42159135, 0.77716276, 0.4406242 ],\n [ 0.43087809, 0.77966179, 0.43775387],\n [ 0.4402561 , 0.78211718, 0.43493205],\n [ 0.44972048, 0.78452855, 0.43216717],\n [ 0.45926607, 0.78689563, 0.42946802],\n [ 0.46888748, 0.7892183 , 0.42684375],\n [ 0.47857908, 0.79149652, 0.42430383],\n [ 0.48833499, 0.79373043, 0.42185799],\n [ 0.49814908, 0.79592027, 0.41951618],\n [ 0.50801501, 0.79806648, 0.41728852],\n [ 0.51792621, 0.80016963, 0.41518523],\n [ 0.5278759 , 0.80223047, 0.41321655],\n [ 0.53785667, 0.80425 , 0.41139298],\n [ 0.5478616 , 0.8062293 , 0.40972442],\n [ 0.55788351, 0.8081696 , 0.40822062],\n [ 0.56791519, 0.81007234, 0.40689099],\n [ 0.57794942, 0.81193908, 0.40574446],\n [ 0.58797905, 0.81377155, 0.40478944],\n [ 0.59799701, 0.81557158, 0.4040337 ],\n [ 0.60799641, 0.81734115, 0.40348428],\n [ 0.61797054, 0.81908233, 0.40314744],\n [ 0.62791297, 0.82079727, 0.40302856],\n [ 0.63781758, 0.8224882 , 0.40313214],\n [ 0.6476786 , 0.82415737, 0.40346171],\n [ 0.65749067, 0.82580707, 0.40401987],\n [ 0.66724885, 0.82743959, 0.40480824],\n [ 0.67694867, 0.82905718, 0.40582751],\n [ 0.68658614, 0.83066207, 0.40707747],\n [ 0.69615776, 0.83225641, 0.40855703],\n [ 0.70566056, 0.83384229, 0.41026431],\n [ 0.71509205, 0.8354217 , 0.41219669],\n [ 0.72445026, 0.83699653, 0.41435088],\n [ 0.73373341, 0.83856862, 0.41672307],\n [ 0.74293191, 0.84014248, 0.41930934],\n [ 0.75205237, 0.84171735, 0.42210392],\n [ 0.76109459, 0.84329463, 0.42510159],\n [ 0.77005877, 0.84487561, 0.4282969 ],\n [ 0.7789455 , 0.84646142, 0.43168424],\n [ 0.7877557 , 0.84805303, 0.43525793],\n [ 0.79649058, 0.84965132, 0.43901227],\n [ 0.80513957, 0.85126153, 0.44293939],\n [ 0.81370767, 0.85288321, 0.44703322],\n [ 0.82220381, 0.85451429, 0.4512893 ],\n [ 0.83063017, 0.85615512, 0.45570229],\n [ 0.83898905, 0.85780596, 0.4602671 ],\n [ 0.84726348, 0.85947482, 0.46497193],\n [ 0.85546684, 0.86115762, 0.46981455],\n [ 0.86360871, 0.86285158, 0.47479281],\n [ 0.87169181, 0.86455657, 0.47990267],\n [ 0.87969472, 0.86628276, 0.48512849],\n [ 0.88763554, 0.86802364, 0.49047254],\n [ 0.89552455, 0.86977573, 0.49593512],\n [ 0.90335389, 0.8715434 , 0.50150708],\n [ 0.91111136, 0.87333317, 0.50717547],\n [ 0.91882427, 0.87513377, 0.51295184],\n [ 0.92648868, 0.87694776, 0.51882966],\n [ 0.93408127, 0.87878692, 0.52478777],\n [ 0.94163648, 0.88063609, 0.53084585],\n [ 0.94914523, 0.88250029, 0.5369936 ],\n [ 0.95659136, 0.88438815, 0.54321369],\n [ 0.964007 , 0.88628495, 0.54952816],\n [ 0.97136951, 0.88820246, 0.55591444],\n [ 0.97868693, 0.89013755, 0.56237537],\n [ 0.98598024, 0.89208031, 0.56892728]]\n\ntest_cm = LinearSegmentedColormap.from_list(__file__, cm_data)\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import numpy as np\n\n try:\n from pycam02ucs.cm.viscm import viscm\n viscm(test_cm)\n except ImportError:\n print(\"pycam02ucs not found, falling back on simple display\")\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',\n cmap=test_cm)\n plt.show()\n"
]
| [
[
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.linspace"
]
]
|
surasakcho/zkyhaxpy | [
"4d3114b0cce9b19cc92c4e7b1b6aa59f3c7a88f5"
]
| [
"zkyhaxpy/colab_tools.py"
]
| [
"import os\r\nfrom google.colab import auth\r\nfrom google.colab import drive\r\nfrom zkyhaxpy.io_tools import get_list_files_re, filepaths_to_df\r\nimport pandas as pd\r\nimport re\r\n\r\n\r\ndef check_dup_files(folder, file_nm_prefix=None, file_nm_extension=None, remove=False):\r\n if file_nm_prefix==None:\r\n file_nm_prefix = '.*'\r\n if file_nm_extension==None:\r\n file_nm_extension = '.*'\r\n \r\n list_file_paths = get_list_files_re(folder, file_nm_prefix + '.*\\([0-9]{1,}\\)\\.' + file_nm_extension) \r\n \r\n if remove==True:\r\n print(f'Total of {len(list_file_paths)} duplicated files will be removed.')\r\n for filepath in list_file_paths:\r\n os.remove(filepath)\r\n print(f'{filepath} is removed.')\r\n else: \r\n return list_file_paths\r\n \r\n \r\ndef mount_drive():\r\n drive.mount('/content/drive', force_remount=True)\r\n \r\ndef authen_gcp():\r\n auth.authenticate_user()\r\n\r\n\r\n\r\n\r\ndef get_list_files_re_gcs(bucket_path_prefix, storage_client, filename_re=None, folder_re=None, return_as_df=False):\r\n '''\r\n Get a list of files in Google Cloud Storage\r\n\r\n Parameters\r\n -------------------------------------\r\n bucket_path_prefix (str): \r\n prefix of files to be listed (must start with bucket name)\r\n For example, if bucket name = 'test-bucket', and a file is saved at '/test-bucket/path_prefix/test1.file'\r\n To list all files in '/test-bucket/path_prefix/', path_prefix shall be '/test-bucket/path_prefix/'\r\n\r\n storage_client (google.cloud.storage.client.Client):\r\n Storage client that can be used to access the bucket\r\n\r\n filename_re (str): \r\n regular expression to search for filename\r\n\r\n folder_re (str): \r\n regular expression to search for folder\r\n\r\n\r\n Output\r\n -------------------------------------\r\n return : a list of tuple(filepaths\r\n '''\r\n\r\n bucket_name = bucket_path_prefix.split('/')[1]\r\n path_prefix = bucket_path_prefix[len(bucket_name)+2:]\r\n bucket = storage_client.bucket(bucket_name)\r\n\r\n all_blobs = list(bucket.list_blobs(prefix=path_prefix))\r\n\r\n list_files = []\r\n if len(all_blobs) == 0:\r\n return list_files\r\n\r\n\r\n df_files = pd.DataFrame(all_blobs, columns=['blob'])\r\n df_files['path'] = df_files['blob'].astype(str).str.split(',', expand=True).loc[:, 1].str.strip() \r\n\r\n if filename_re == None:\r\n filename_re = '.*'\r\n if folder_re == None:\r\n folder_re = '.*'\r\n for filepath in list(df_files['path']): \r\n file_nm = os.path.basename(filepath)\r\n folder = os.path.dirname(filepath)\r\n if ((re.search(filename_re, file_nm) != None) & (re.search(folder_re, folder) != None)):\r\n list_files.append(os.path.join('/'+bucket_name, folder, file_nm))\r\n\r\n \r\n if return_as_df==False:\r\n return list_files\r\n else:\r\n return filepaths_to_df(list_files) \r\n \r\n "
]
| [
[
"pandas.DataFrame"
]
]
|
dzzxjl/DeepCTR | [
"ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e"
]
| [
"deepctr/models/fwfm.py"
]
| [
"# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Harshit Pande\n\nReference:\n [1] Field-weighted Factorization Machines for Click-Through Rate Prediction in Display Advertising\n (https://arxiv.org/pdf/1806.03514.pdf)\n\n\"\"\"\n\nfrom itertools import chain\n\nimport tensorflow as tf\n\nfrom ..feature_column import build_input_features, get_linear_logit, DEFAULT_GROUP_NAME, input_from_feature_columns\nfrom ..layers.core import PredictionLayer, DNN\nfrom ..layers.interaction import FwFMLayer\nfrom ..layers.utils import concat_func, add_func, combined_dnn_input\n\n\ndef FwFM(linear_feature_columns, dnn_feature_columns, fm_group=(DEFAULT_GROUP_NAME,), dnn_hidden_units=(256, 128, 64),\n l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_field_strength=0.00001, l2_reg_dnn=0,\n seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'):\n \"\"\"Instantiates the FwFM Network architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param fm_group: list, group_name of features that will be used to do feature interactions.\n :param dnn_hidden_units: list,list of positive integer or empty list if do not want DNN, the layer number and units\n in each layer of DNN\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_field_strength: float. L2 regularizer strength applied to the field pair strength parameters\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n \"\"\"\n\n features = build_input_features(linear_feature_columns + dnn_feature_columns)\n\n inputs_list = list(features.values())\n\n linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear',\n l2_reg=l2_reg_linear)\n\n group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,\n l2_reg_embedding, seed,\n support_group=True)\n\n fwfm_logit = add_func([FwFMLayer(num_fields=len(v), regularizer=l2_reg_field_strength)\n (concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group])\n\n final_logit_components = [linear_logit, fwfm_logit]\n\n if dnn_hidden_units:\n dnn_input = combined_dnn_input(list(chain.from_iterable(\n group_embedding_dict.values())), dense_value_list)\n dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)\n dnn_logit = tf.keras.layers.Dense(\n 1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_output)\n final_logit_components.append(dnn_logit)\n\n final_logit = add_func(final_logit_components)\n\n output = PredictionLayer(task)(final_logit)\n model = tf.keras.models.Model(inputs=inputs_list, outputs=output)\n return model\n"
]
| [
[
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.keras.models.Model"
]
]
|
RLReed/unotran | [
"b317107e1a39490dda732f86a731872f5207a167"
]
| [
"src/test/test_mesh.py"
]
| [
"import sys\nsys.path.append('../')\n\nimport unittest\nimport pydgm\nimport numpy as np\n\n\nclass TestMESH(unittest.TestCase):\n\n def test_mesh_create_mesh_1D(self):\n ''' \n Test the mesh initialization for 1D\n '''\n\n # Set the variables for the test\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = [2, 4, 2]\n pydgm.control.coarse_mesh_x = [0.0, 1.0, 2.0, 3.0]\n pydgm.control.material_map = [1, 2, 3]\n\n # Initialize the dependancies\n pydgm.mesh.create_mesh()\n\n # Test the number of cells\n self.assertEqual(pydgm.control.number_cells, 8)\n\n # Test the cell size\n dx_test = [0.5, 0.5, 0.25, 0.25, 0.25, 0.25, 0.5, 0.5]\n np.testing.assert_array_equal(pydgm.mesh.dx, dx_test)\n\n # Test the material assignment\n mMap_test = [1, 1, 2, 2, 2, 2, 3, 3]\n np.testing.assert_array_equal(pydgm.mesh.mmap, mMap_test)\n\n # Test the problem width\n self.assertEqual(pydgm.mesh.width_x, 3.0)\n\n def test_mesh_create_mesh_2D(self):\n ''' \n Test the mesh initialization for 2D\n '''\n\n # Set the variables for the test\n pydgm.control.spatial_dimension = 2\n pydgm.control.fine_mesh_x = [5, 5, 3]\n pydgm.control.fine_mesh_y = [4, 4, 2]\n pydgm.control.coarse_mesh_x = [0.0, 21.42, 42.84, 64.26]\n pydgm.control.coarse_mesh_y = [0.0, 21.42, 42.84, 64.26]\n pydgm.control.material_map = [2, 4, 5,\n 4, 2, 5,\n 5, 5, 5]\n\n # Initialize the dependancies\n pydgm.mesh.create_mesh()\n\n # Test the number of cells\n self.assertEqual(pydgm.control.number_cells, 130)\n self.assertEqual(pydgm.control.number_cells_x, 13)\n self.assertEqual(pydgm.control.number_cells_y, 10)\n\n # Test the cell size\n dx_test = [4.284, 4.284, 4.284, 4.284, 4.284, 4.284, 4.284, 4.284, 4.284, 4.284, 7.14, 7.14, 7.14]\n dy_test = [5.355, 5.355, 5.355, 5.355, 5.355, 5.355, 5.355, 5.355, 10.71, 10.71, 10.71]\n np.testing.assert_array_almost_equal(pydgm.mesh.dx, dx_test, 12)\n np.testing.assert_array_almost_equal(pydgm.mesh.dx, dx_test, 12)\n\n # Test the material assignment\n mMap_test = [2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 5, 5, 5,\n 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 5, 5, 5,\n 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 5, 5, 5,\n 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 5, 5, 5,\n 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 5, 5, 5,\n 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 5, 5, 5,\n 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 5, 5, 5,\n 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n np.testing.assert_array_equal(pydgm.mesh.mmap, mMap_test)\n\n # Test the problem width\n self.assertEqual(pydgm.mesh.width_x, 64.26)\n self.assertEqual(pydgm.mesh.width_y, 64.26)\n\n def tearDown(self):\n pydgm.mesh.finalize_mesh()\n pydgm.control.finalize_control()\n\n\nif __name__ == '__main__':\n\n unittest.main()\n"
]
| [
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_array_almost_equal"
]
]
|
chaselee/autokeras | [
"6e0ae127810c353722669b897f17ee3ca9ce2ee2"
]
| [
"autokeras/nodes.py"
]
| [
"# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\nfrom autokeras import adapters\nfrom autokeras import analysers\nfrom autokeras import blocks\nfrom autokeras import hyper_preprocessors as hpps_module\nfrom autokeras import preprocessors\nfrom autokeras.engine import io_hypermodel\nfrom autokeras.engine import node as node_module\nfrom autokeras.utils import data_utils\n\n\ndef serialize(obj):\n return tf.keras.utils.serialize_keras_object(obj)\n\n\ndef deserialize(config, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n config,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"nodes\",\n )\n\n\nclass Input(node_module.Node, io_hypermodel.IOHyperModel):\n \"\"\"Input node for tensor data.\n\n The data should be numpy.ndarray or tf.data.Dataset.\n\n # Arguments\n name: String. The name of the input node. If unspecified, it will be set\n automatically with the class name.\n \"\"\"\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def build_node(self, hp):\n return tf.keras.Input(shape=self.shape, dtype=self.dtype)\n\n def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)[0]\n return data_utils.cast_to_float32(input_node)\n\n def get_adapter(self):\n return adapters.InputAdapter()\n\n def get_analyser(self):\n return analysers.InputAnalyser()\n\n def get_block(self):\n return blocks.GeneralBlock()\n\n def get_hyper_preprocessors(self):\n return []\n\n\nclass ImageInput(Input):\n \"\"\"Input node for image data.\n\n The input data should be numpy.ndarray or tf.data.Dataset. The shape of the data\n should be should be (samples, width, height) or\n (samples, width, height, channels).\n\n # Arguments\n name: String. The name of the input node. If unspecified, it will be set\n automatically with the class name.\n \"\"\"\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def build(self, hp, inputs=None):\n inputs = super().build(hp, inputs)\n output_node = nest.flatten(inputs)[0]\n if len(output_node.shape) == 3:\n output_node = tf.expand_dims(output_node, axis=-1)\n return output_node\n\n def get_adapter(self):\n return adapters.ImageAdapter()\n\n def get_analyser(self):\n return analysers.ImageAnalyser()\n\n def get_block(self):\n return blocks.ImageBlock()\n\n\nclass TextInput(Input):\n \"\"\"Input node for text data.\n\n The input data should be numpy.ndarray or tf.data.Dataset. The data should be\n one-dimensional. Each element in the data should be a string which is a full\n sentence.\n\n # Arguments\n name: String. The name of the input node. If unspecified, it will be set\n automatically with the class name.\n \"\"\"\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def build_node(self, hp):\n return tf.keras.Input(shape=self.shape, dtype=tf.string)\n\n def build(self, hp, inputs=None):\n output_node = nest.flatten(inputs)[0]\n if len(output_node.shape) == 1:\n output_node = tf.expand_dims(output_node, axis=-1)\n return output_node\n\n def get_adapter(self):\n return adapters.TextAdapter()\n\n def get_analyser(self):\n return analysers.TextAnalyser()\n\n def get_block(self):\n return blocks.TextBlock()\n\n\nclass StructuredDataInput(Input):\n \"\"\"Input node for structured data.\n\n The input data should be numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n The data should be two-dimensional with numerical or categorical values.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will be obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data. A column will be judged as\n categorical if the number of different values is less than 5% of the\n number of instances.\n name: String. The name of the input node. If unspecified, it will be set\n automatically with the class name.\n \"\"\"\n\n def __init__(self, column_names=None, column_types=None, name=None, **kwargs):\n super().__init__(name=name, **kwargs)\n self.column_names = column_names\n self.column_types = column_types\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\"column_names\": self.column_names, \"column_types\": self.column_types}\n )\n return config\n\n def get_adapter(self):\n return adapters.StructuredDataAdapter()\n\n def get_analyser(self):\n return analysers.StructuredDataAnalyser(self.column_names, self.column_types)\n\n def get_block(self):\n return blocks.StructuredDataBlock()\n\n def config_from_analyser(self, analyser):\n super().config_from_analyser(analyser)\n self.column_names = analyser.column_names\n # Analyser keeps the specified ones and infer the missing ones.\n self.column_types = analyser.column_types\n\n def build(self, hp, inputs=None):\n return inputs\n\n\nclass TimeseriesInput(StructuredDataInput):\n \"\"\"Input node for timeseries data.\n\n # Arguments\n lookback: Int. The range of history steps to consider for each prediction.\n For example, if lookback=n, the data in the range of [i - n, i - 1]\n is used to predict the value of step i. If unspecified, it will be tuned\n automatically.\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will be obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data. A column will be judged as\n categorical if the number of different values is less than 5% of the\n number of instances.\n name: String. The name of the input node. If unspecified, it will be set\n automatically with the class name.\n \"\"\"\n\n def __init__(\n self,\n lookback: Optional[int] = None,\n column_names: Optional[List[str]] = None,\n column_types: Optional[Dict[str, str]] = None,\n name: Optional[str] = None,\n **kwargs\n ):\n super().__init__(\n column_names=column_names, column_types=column_types, name=name, **kwargs\n )\n self.lookback = lookback\n\n def get_config(self):\n config = super().get_config()\n config.update({\"lookback\": self.lookback})\n return config\n\n def get_adapter(self):\n return adapters.TimeseriesAdapter()\n\n def get_analyser(self):\n return analysers.TimeseriesAnalyser(\n column_names=self.column_names, column_types=self.column_types\n )\n\n def get_block(self):\n return blocks.TimeseriesBlock()\n\n def config_from_analyser(self, analyser):\n super().config_from_analyser(analyser)\n\n def get_hyper_preprocessors(self):\n hyper_preprocessors = []\n hyper_preprocessors.append(\n hpps_module.DefaultHyperPreprocessor(\n preprocessors.SlidingWindow(\n lookback=self.lookback, batch_size=self.batch_size\n )\n )\n )\n return hyper_preprocessors\n"
]
| [
[
"tensorflow.expand_dims",
"tensorflow.keras.utils.serialize_keras_object",
"tensorflow.python.util.nest.flatten",
"tensorflow.keras.Input"
]
]
|
mbonto/fewshot_neuroimaging_classification | [
"2ff0aab6d2c7991e566200d8e4da4b2cbf025a4a"
]
| [
"MAML_plus_plus/meta_neural_network_architectures.py"
]
| [
"import numbers\nfrom copy import copy\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\nfrom scipy import io as sio\n\n\n\ndef extract_top_level_dict(current_dict):\n \"\"\"\n Builds a graph dictionary from the passed depth_keys, value pair. Useful for dynamically passing external params\n :param depth_keys: A list of strings making up the name of a variable. Used to make a graph for that params tree.\n :param value: Param value\n :param key_exists: If none then assume new dict, else load existing dict and add new key->value pairs to it.\n :return: A dictionary graph of the params already added to the graph.\n \"\"\"\n output_dict = dict()\n for key in current_dict.keys():\n name = key.replace(\"layer_dict.\", \"\")\n name = name.replace(\"layer_dict.\", \"\")\n name = name.replace(\"block_dict.\", \"\")\n name = name.replace(\"module-\", \"\")\n top_level = name.split(\".\")[0]\n sub_level = \".\".join(name.split(\".\")[1:])\n\n if top_level not in output_dict:\n if sub_level == \"\":\n output_dict[top_level] = current_dict[key]\n else:\n output_dict[top_level] = {sub_level: current_dict[key]}\n else:\n new_item = {key: value for key, value in output_dict[top_level].items()}\n new_item[sub_level] = current_dict[key]\n output_dict[top_level] = new_item\n return output_dict\n\n\n\nclass MetaConv1dLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_bias, groups=1, dilation_rate=1):\n \"\"\"\n A MetaConv2D layer. Applies the same functionality of a standard Conv2D layer with the added functionality of\n being able to receive a parameter dictionary at the forward pass which allows the convolution to use external\n weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta\n learning setting.\n :param in_channels: Number of input channels\n :param out_channels: Number of output channels\n :param kernel_size: Convolutional kernel size\n :param stride: Convolutional stride\n :param padding: Convolution padding\n :param use_bias: Boolean indicating whether to use a bias or not.\n \"\"\"\n super(MetaConv1dLayer, self).__init__()\n num_filters = out_channels\n self.stride = int(stride)\n self.padding = int(padding)\n self.dilation_rate = int(dilation_rate)\n self.use_bias = use_bias\n self.groups = int(groups)\n self.weight = nn.Parameter(torch.empty(num_filters, in_channels, kernel_size))\n nn.init.xavier_uniform_(self.weight)\n\n if self.use_bias:\n self.bias = nn.Parameter(torch.zeros(num_filters))\n\n def forward(self, x, params=None):\n \"\"\"\n Applies a conv2D forward pass. If params are not None will use the passed params as the conv weights and biases\n :param x: Input image batch.\n :param params: If none, then conv layer will use the stored self.weights and self.bias, if they are not none\n then the conv layer will use the passed params as its parameters.\n :return: The output of a convolutional function.\n \"\"\"\n if params is not None:\n params = extract_top_level_dict(current_dict=params)\n if self.use_bias:\n (weight, bias) = params[\"weight\"], params[\"bias\"]\n else:\n (weight) = params[\"weight\"]\n bias = None\n else:\n if self.use_bias:\n weight, bias = self.weight, self.bias\n else:\n weight = self.weight\n bias = None\n\n out = F.conv1d(input=x, weight=weight, bias=bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation_rate, groups=self.groups)\n return out\n \n def restore_backup_stats(self):\n pass\n\n\n\nclass Conv1d(nn.Module):\n def __init__(self, num_output_classes, args, device, meta_classifier=True):\n \"\"\"\n Builds a multilayer MLP network. It also provides functionality for passing external parameters to be\n used at inference time. Enables inner loop optimization readily.\n \n Parameters:\n num_output_classes -- The number of output classes of the network.\n args -- A named tuple containing the system's hyperparameters.\n device -- The device to run this on.\n meta_classifier -- A flag indicating whether the system's meta-learning (inner-loop) functionalities should\n be enabled.\n \"\"\"\n super(Conv1d, self).__init__()\n self.device = device\n self.args = args\n self.num_features = args.num_features\n self.num_layers = args.num_stages ## number of layers\n self.num_output_classes = num_output_classes\n\n self.meta_classifier = meta_classifier\n\n self.build_network()\n print(\"meta network params\")\n for name, param in self.named_parameters():\n print(name, param.shape)\n\n def build_network(self):\n \"\"\"\n Builds the network before inference is required by creating some dummy inputs with the same input as the\n self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and\n sets output shapes for each layer.\n \"\"\"\n self.layer_dict = nn.ModuleDict()\n self.layer_dict['conv0'] = MetaConv1dLayer(in_channels=1, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, use_bias=True, groups=1, dilation_rate=1)\n for i in range(1, self.num_layers):\n self.layer_dict['conv{}'.format(i)] = MetaConv1dLayer(in_channels=self.num_features, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, use_bias=True, groups=1, dilation_rate=1)\n\n self.layer_dict['linear'] = MetaLinearLayer(360, self.num_output_classes, use_bias=True, activation=False)\n \n\n def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):\n \"\"\"\n Forward propages through the network. If any params are passed then they are used instead of stored params.\n :param x: Input image batch.\n :param num_step: The current inner loop step number\n :param params: If params are None then internal parameters are used. If params are a dictionary with keys the\n same as the layer names then they will be used instead.\n :param training: Whether this is training (True) or eval time.\n :param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is\n then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)\n :return: Logits of shape b, num_output_classes.\n \"\"\"\n param_dict = dict()\n\n if params is not None:\n params = {key: value[0] for key, value in params.items()}\n param_dict = extract_top_level_dict(current_dict=params)\n\n for name, param in self.layer_dict.named_parameters():\n path_bits = name.split(\".\")\n layer_name = path_bits[0]\n if layer_name not in param_dict:\n param_dict[layer_name] = None\n\n # print(x.shape) # 5, 1, 360, 1\n out = torch.squeeze(x, dim=-1)\n # print('input shape is: ' + str(out.shape))\n\n for i in range(self.num_layers):\n out = self.layer_dict['conv{}'.format(i)](out, params=param_dict['conv{}'.format(i)])\n out = F.relu(out)\n \n # Average pooling by \"pixels\".\n out = torch.mean(out, dim=1)\n out = torch.squeeze(out, dim=-2)\n # Or keep all coefficients until the end.\n # out = out.view(out.size(0), -1)\n \n out = self.layer_dict['linear'](out, param_dict['linear'])\n\n return out\n\n def zero_grad(self, params=None):\n if params is None:\n for param in self.parameters():\n if param.requires_grad == True:\n if param.grad is not None:\n if torch.sum(param.grad) > 0:\n param.grad.zero_()\n else:\n for name, param in params.items():\n if param.requires_grad == True:\n if param.grad is not None:\n if torch.sum(param.grad) > 0:\n param.grad.zero_()\n params[name].grad = None\n\n def restore_backup_stats(self):\n \"\"\"\n Reset stored batch statistics from the stored backup.\n \"\"\"\n for i in range(self.num_layers):\n self.layer_dict['conv{}'.format(i)].restore_backup_stats()\n\n\n\nclass MetaLinearLayer(nn.Module):\n def __init__(self, num_in_filters, num_out_filters, use_bias, activation):\n \"\"\"\n A MetaLinear layer. Applies the same functionality of a standard linearlayer with the added functionality of\n being able to receive a parameter dictionary at the forward pass which allows the convolution to use external\n weights instead of the internal ones stored in the linear layer. Useful for inner loop optimization in the meta\n learning setting.\n :param input_shape: The shape of the input data, in the form (b, f)\n :param num_filters: Number of output filters\n :param use_bias: Whether to use biases or not.\n \"\"\"\n super(MetaLinearLayer, self).__init__()\n self.use_bias = use_bias\n self.weights = nn.Parameter(torch.ones(num_out_filters, num_in_filters))\n nn.init.xavier_uniform_(self.weights)\n if self.use_bias:\n self.bias = nn.Parameter(torch.zeros(num_out_filters))\n self.activation = activation\n\n def forward(self, x, params=None):\n \"\"\"\n Forward propagates by applying a linear function (Wx + b). If params are none then internal params are used.\n Otherwise passed params will be used to execute the function.\n :param x: Input data batch, in the form (b, f)\n :param params: A dictionary containing 'weights' and 'bias'. If params are none then internal params are used.\n Otherwise the external are used.\n :return: The result of the linear function.\n \"\"\"\n if params is not None:\n params = extract_top_level_dict(current_dict=params)\n if self.use_bias:\n (weight, bias) = params[\"weights\"], params[\"bias\"]\n else:\n (weight) = params[\"weights\"]\n bias = None\n else:\n pass\n\n if self.use_bias:\n weight, bias = self.weights, self.bias\n else:\n weight = self.weights\n bias = None\n \n out = F.linear(input=x, weight=weight, bias=bias)\n if self.activation:\n out = F.relu(out)\n return out\n \n def restore_backup_stats(self):\n pass\n\n\n\n### New architectures ###\nclass MLP(nn.Module):\n def __init__(self, num_output_classes, args, device, meta_classifier=True):\n \"\"\"\n Builds a multilayer MLP network. It also provides functionality for passing external parameters to be\n used at inference time. Enables inner loop optimization readily.\n \n Parameters:\n num_output_classes -- The number of output classes of the network.\n args -- A named tuple containing the system's hyperparameters.\n device -- The device to run this on.\n meta_classifier -- A flag indicating whether the system's meta-learning (inner-loop) functionalities should\n be enabled.\n \"\"\"\n super(MLP, self).__init__()\n self.device = device\n self.args = args\n self.num_features = args.num_features\n self.num_layers = args.num_stages ## number of hidden layers\n self.num_output_classes = num_output_classes\n self.meta_classifier = meta_classifier\n self.build_network()\n print(\"meta network params\")\n for name, param in self.named_parameters():\n print(name, param.shape)\n\n def build_network(self):\n \"\"\"\n Builds the network before inference is required by creating some dummy inputs with the same input as the\n self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and\n sets output shapes for each layer.\n \"\"\"\n self.layer_dict = nn.ModuleDict()\n self.layer_dict['linear0'] = MetaLinearLayer(360, self.num_features, use_bias=True, activation=True)\n for i in range(1, self.num_layers):\n self.layer_dict['linear{}'.format(i)] = MetaLinearLayer(self.num_features, self.num_features, use_bias=True, activation=True)\n self.layer_dict['linear'] = MetaLinearLayer(self.num_features, self.num_output_classes, use_bias=True, activation=False)\n \n\n def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):\n \"\"\"\n Forward propages through the network. If any params are passed then they are used instead of stored params.\n :param x: Input image batch.\n :param num_step: The current inner loop step number\n :param params: If params are None then internal parameters are used. If params are a dictionary with keys the\n same as the layer names then they will be used instead.\n :param training: Whether this is training (True) or eval time.\n :param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is\n then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)\n :return: Logits of shape b, num_output_classes.\n \"\"\"\n param_dict = dict()\n\n if params is not None:\n params = {key: value[0] for key, value in params.items()}\n param_dict = extract_top_level_dict(current_dict=params)\n\n for name, param in self.layer_dict.named_parameters():\n path_bits = name.split(\".\")\n layer_name = path_bits[0]\n if layer_name not in param_dict:\n param_dict[layer_name] = None\n\n # print(x.shape) 5, 1, 360, 1\n out = torch.squeeze(x, dim=-1)\n out = torch.squeeze(out, dim=1)\n # print('input shape is: ' + str(out.shape))\n\n for i in range(self.num_layers):\n out = self.layer_dict['linear{}'.format(i)](out, params=param_dict['linear{}'.format(i)])\n out = self.layer_dict['linear'](out, param_dict['linear'])\n\n return out\n\n def zero_grad(self, params=None):\n if params is None:\n for param in self.parameters():\n if param.requires_grad == True:\n if param.grad is not None:\n if torch.sum(param.grad) > 0:\n param.grad.zero_()\n else:\n for name, param in params.items():\n if param.requires_grad == True:\n if param.grad is not None:\n if torch.sum(param.grad) > 0:\n param.grad.zero_()\n params[name].grad = None\n\n def restore_backup_stats(self):\n \"\"\"\n Reset stored batch statistics from the stored backup.\n \"\"\"\n for i in range(self.num_layers):\n self.layer_dict['linear{}'.format(i)].restore_backup_stats()\n\n\n\ndef compute_normalized_adjacency(adj_matrix, threshold=True):\n norm_adj = torch.from_numpy(adj_matrix)\n if threshold:\n # We keep the 1% highest connections (threshold value of 0.945 for the considered brain graph).\n mask = (norm_adj > 0.945).type(torch.float)\n norm_adj = norm_adj * mask\n # Add the identity matrix to the adjacency matrix.\n norm_adj = torch.add(norm_adj, torch.eye(norm_adj.shape[0]))\n # Compute the degree matrix by summing the columns.\n degree = torch.sum(norm_adj, dim=1)\n degree = torch.pow(degree, -1/2)\n degree = torch.diag(degree)\n # Compute a normalized adjacency matrix.\n norm_adj = torch.matmul(torch.matmul(degree, norm_adj), degree)\n return norm_adj.type(torch.float)\n\n\ndef propagation(features, graph, k=1):\n \"\"\"\n Return the features propagated k times on the graph.\n \n Params:\n features -- tensor of size (batch, num_features).\n graph -- adjacency matrix of size (num_features, num_features).\n k -- number of times the features are propagated, integer.\n \"\"\"\n for _ in range(k):\n features = torch.matmul(features, graph)\n return features\n\n\nclass GNN(nn.Module):\n def __init__(self, num_output_classes, args, device, meta_classifier=True):\n \"\"\"\n Builds a multilayer GNN network. It also provides functionality for passing external parameters to be\n used at inference time. Enables inner loop optimization readily.\n \n Parameters:\n num_output_classes -- The number of output classes of the network.\n args -- A named tuple containing the system's hyperparameters.\n device -- The device to run this on.\n meta_classifier -- A flag indicating whether the system's meta-learning (inner-loop) functionalities should\n be enabled.\n \"\"\"\n super(GNN, self).__init__()\n self.device = device\n self.args = args\n self.num_features = args.num_features\n self.num_layers = args.num_stages\n self.num_output_classes = num_output_classes\n\n self.meta_classifier = meta_classifier\n\n self.build_network()\n print(\"meta network params\")\n for name, param in self.named_parameters():\n print(name, param.shape)\n \n # Load the structural connectivity matrix.\n # The connections are computed between the 360 regions defined in glasser. They are established from \n # structural MRI (T1, DWI). Self-connections are set to zero.\n # DWI: Tractography looking at the strength of the white fibers between regions, averaged on 56 subjects. \n connectivity_matrix_path = args.connectivity_matrix_path\n connectivity = sio.loadmat(connectivity_matrix_path)['SC_avg56'] \n self.graph = compute_normalized_adjacency(connectivity).cuda()\n \n # Number of diffusion step.\n self.k = 1\n \n \n def build_network(self):\n \"\"\"\n Builds the network before inference is required by creating some dummy inputs with the same input as the\n self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and\n sets output shapes for each layer.\n \"\"\"\n self.layer_dict = nn.ModuleDict()\n self.layer_dict['linear0'] = MetaLinearLayer(360, self.num_features, use_bias=True, activation=True)\n for i in range(1, self.num_layers):\n self.layer_dict['linear{}'.format(i)] = MetaLinearLayer(self.num_features, self.num_features, use_bias=True, activation=True)\n self.layer_dict['linear'] = MetaLinearLayer(self.num_features, self.num_output_classes, use_bias=True, activation=False)\n \n\n def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):\n \"\"\"\n Forward propages through the network. If any params are passed then they are used instead of stored params.\n :param x: Input image batch.\n :param num_step: The current inner loop step number\n :param params: If params are None then internal parameters are used. If params are a dictionary with keys the\n same as the layer names then they will be used instead.\n :param training: Whether this is training (True) or eval time.\n :param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is\n then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)\n :return: Logits of shape b, num_output_classes.\n \"\"\"\n param_dict = dict()\n\n if params is not None:\n params = {key: value[0] for key, value in params.items()}\n param_dict = extract_top_level_dict(current_dict=params)\n\n for name, param in self.layer_dict.named_parameters():\n path_bits = name.split(\".\")\n layer_name = path_bits[0]\n if layer_name not in param_dict:\n param_dict[layer_name] = None\n\n # Reshape the data.\n # print(x.shape) 5, 1, 360, 1\n out = torch.squeeze(x, dim=-1)\n out = torch.squeeze(out, dim=1)\n # print('input shape is: ' + str(out.shape))\n \n # Propagate the signal on the graph.\n out = propagation(out, self.graph, self.k)\n \n # Go through the MLP network.\n for i in range(self.num_layers):\n out = self.layer_dict['linear{}'.format(i)](out, params=param_dict['linear{}'.format(i)])\n\n out = self.layer_dict['linear'](out, param_dict['linear'])\n return out\n\n def zero_grad(self, params=None):\n if params is None:\n for param in self.parameters():\n if param.requires_grad == True:\n if param.grad is not None:\n if torch.sum(param.grad) > 0:\n param.grad.zero_()\n else:\n for name, param in params.items():\n if param.requires_grad == True:\n if param.grad is not None:\n if torch.sum(param.grad) > 0:\n param.grad.zero_()\n params[name].grad = None\n\n def restore_backup_stats(self):\n \"\"\"\n Reset stored batch statistics from the stored backup.\n \"\"\"\n for i in range(self.num_layers):\n self.layer_dict['linear{}'.format(i)].restore_backup_stats()\n \n"
]
| [
[
"torch.mean",
"torch.ones",
"torch.empty",
"torch.zeros",
"torch.nn.ModuleDict",
"torch.nn.functional.conv1d",
"torch.sum",
"torch.from_numpy",
"torch.eye",
"scipy.io.loadmat",
"torch.matmul",
"torch.nn.functional.relu",
"torch.nn.init.xavier_uniform_",
"torch.diag",
"torch.nn.functional.linear",
"torch.pow",
"torch.squeeze"
]
]
|
cheinger/cudf | [
"f33130152eb325fe123be2f91ed8a9ac0dc28d0c"
]
| [
"python/cudf/cudf/core/index.py"
]
| [
"# Copyright (c) 2018-2022, NVIDIA CORPORATION.\n\nfrom __future__ import annotations\n\nimport math\nimport pickle\nimport warnings\nfrom functools import cached_property\nfrom numbers import Number\nfrom typing import (\n Any,\n Dict,\n List,\n MutableMapping,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport cupy\nimport numpy as np\nimport pandas as pd\nfrom pandas._config import get_option\n\nimport cudf\nfrom cudf._lib.datetime import extract_quarter, is_leap_year\nfrom cudf._lib.filling import sequence\nfrom cudf._lib.search import search_sorted\nfrom cudf.api.types import (\n _is_non_decimal_numeric_dtype,\n _is_scalar_or_zero_d_array,\n is_categorical_dtype,\n is_dtype_equal,\n is_interval_dtype,\n is_string_dtype,\n)\nfrom cudf.core._base_index import BaseIndex, _index_astype_docstring\nfrom cudf.core.column import (\n CategoricalColumn,\n ColumnBase,\n DatetimeColumn,\n IntervalColumn,\n NumericalColumn,\n StringColumn,\n TimeDeltaColumn,\n arange,\n column,\n)\nfrom cudf.core.column.column import as_column, concat_columns\nfrom cudf.core.column.string import StringMethods as StringMethods\nfrom cudf.core.dtypes import IntervalDtype\nfrom cudf.core.frame import Frame\nfrom cudf.core.mixins import BinaryOperand\nfrom cudf.core.single_column_frame import SingleColumnFrame\nfrom cudf.utils.docutils import copy_docstring, doc_apply\nfrom cudf.utils.dtypes import find_common_type\nfrom cudf.utils.utils import _cudf_nvtx_annotate, search_range\n\nT = TypeVar(\"T\", bound=\"Frame\")\n\n\ndef _lexsorted_equal_range(\n idx: Union[GenericIndex, cudf.MultiIndex],\n key_as_table: Frame,\n is_sorted: bool,\n) -> Tuple[int, int, Optional[ColumnBase]]:\n \"\"\"Get equal range for key in lexicographically sorted index. If index\n is not sorted when called, a sort will take place and `sort_inds` is\n returned. Otherwise `None` is returned in that position.\n \"\"\"\n if not is_sorted:\n sort_inds = idx._get_sorted_inds()\n sort_vals = idx._gather(sort_inds)\n else:\n sort_inds = None\n sort_vals = idx\n lower_bound = search_sorted(\n sort_vals, key_as_table, side=\"left\"\n ).element_indexing(0)\n upper_bound = search_sorted(\n sort_vals, key_as_table, side=\"right\"\n ).element_indexing(0)\n\n return lower_bound, upper_bound, sort_inds\n\n\ndef _index_from_data(data: MutableMapping, name: Any = None):\n \"\"\"Construct an index of the appropriate type from some data.\"\"\"\n\n if len(data) == 0:\n raise ValueError(\"Cannot construct Index from any empty Table\")\n if len(data) == 1:\n values = next(iter(data.values()))\n\n if isinstance(values, NumericalColumn):\n try:\n index_class_type: Type[\n Union[GenericIndex, cudf.MultiIndex]\n ] = _dtype_to_index[values.dtype.type]\n except KeyError:\n index_class_type = GenericIndex\n elif isinstance(values, DatetimeColumn):\n index_class_type = DatetimeIndex\n elif isinstance(values, TimeDeltaColumn):\n index_class_type = TimedeltaIndex\n elif isinstance(values, StringColumn):\n index_class_type = StringIndex\n elif isinstance(values, CategoricalColumn):\n index_class_type = CategoricalIndex\n elif isinstance(values, IntervalColumn):\n index_class_type = IntervalIndex\n else:\n index_class_type = cudf.MultiIndex\n return index_class_type._from_data(data, None, name)\n\n\ndef _index_from_columns(\n columns: List[cudf.core.column.ColumnBase], name: Any = None\n):\n \"\"\"Construct an index from ``columns``, with levels named 0, 1, 2...\"\"\"\n return _index_from_data(dict(zip(range(len(columns)), columns)), name=name)\n\n\nclass RangeIndex(BaseIndex, BinaryOperand):\n \"\"\"\n Immutable Index implementing a monotonic integer range.\n\n This is the default index type used by DataFrame and Series\n when no explicit index is provided by the user.\n\n Parameters\n ----------\n start : int (default: 0), or other range instance\n stop : int (default: 0)\n step : int (default: 1)\n name : object, optional\n Name to be stored in the index.\n dtype : numpy dtype\n Unused, accepted for homogeneity with other index types.\n copy : bool, default False\n Unused, accepted for homogeneity with other index types.\n\n Returns\n -------\n RangeIndex\n\n Examples\n --------\n >>> import cudf\n >>> cudf.RangeIndex(0, 10, 1, name=\"a\")\n RangeIndex(start=0, stop=10, step=1, name='a')\n\n >>> cudf.RangeIndex(range(1, 10, 1), name=\"a\")\n RangeIndex(start=1, stop=10, step=1, name='a')\n \"\"\"\n\n _VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS\n\n _range: range\n\n @_cudf_nvtx_annotate\n def __init__(\n self, start, stop=None, step=1, dtype=None, copy=False, name=None\n ):\n if step == 0:\n raise ValueError(\"Step must not be zero.\")\n\n if isinstance(start, range):\n therange = start\n start = therange.start\n stop = therange.stop\n step = therange.step\n if stop is None:\n start, stop = 0, start\n self._start = int(start)\n self._stop = int(stop)\n self._step = int(step) if step is not None else 1\n self._index = None\n self._name = name\n self._range = range(self._start, self._stop, self._step)\n # _end is the actual last element of RangeIndex,\n # whereas _stop is an upper bound.\n self._end = self._start + self._step * (len(self._range) - 1)\n\n def _copy_type_metadata(\n self, other: Frame, include_index: bool = True\n ) -> RangeIndex:\n # There is no metadata to be copied for RangeIndex since it does not\n # have an underlying column.\n return self\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def name(self):\n \"\"\"\n Returns the name of the Index.\n \"\"\"\n return self._name\n\n @name.setter # type: ignore\n @_cudf_nvtx_annotate\n def name(self, value):\n self._name = value\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def start(self):\n \"\"\"\n The value of the `start` parameter (0 if this was not supplied).\n \"\"\"\n return self._start\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def stop(self):\n \"\"\"\n The value of the stop parameter.\n \"\"\"\n return self._stop\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def step(self):\n \"\"\"\n The value of the step parameter.\n \"\"\"\n return self._step\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def _num_rows(self):\n return len(self)\n\n @cached_property\n @_cudf_nvtx_annotate\n def _values(self):\n if len(self) > 0:\n return column.arange(\n self._start, self._stop, self._step, dtype=self.dtype\n )\n else:\n return column.column_empty(0, masked=False, dtype=self.dtype)\n\n def is_numeric(self):\n return True\n\n def is_boolean(self):\n return False\n\n def is_integer(self):\n return True\n\n def is_floating(self):\n return False\n\n def is_object(self):\n return False\n\n def is_categorical(self):\n return False\n\n def is_interval(self):\n return False\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def _data(self):\n return cudf.core.column_accessor.ColumnAccessor(\n {self.name: self._values}\n )\n\n @_cudf_nvtx_annotate\n def __contains__(self, item):\n if not isinstance(\n item, tuple(np.sctypes[\"int\"] + np.sctypes[\"float\"] + [int, float])\n ):\n return False\n if not item % 1 == 0:\n return False\n return item in range(self._start, self._stop, self._step)\n\n @_cudf_nvtx_annotate\n def copy(self, name=None, deep=False, dtype=None, names=None):\n \"\"\"\n Make a copy of this object.\n\n Parameters\n ----------\n name : object optional (default: None), name of index\n deep : Bool (default: False)\n Ignored for RangeIndex\n dtype : numpy dtype optional (default: None)\n Target dtype for underlying range data\n names : list-like optional (default: False)\n Kept compatibility with MultiIndex. Should not be used.\n\n Returns\n -------\n New RangeIndex instance with same range, casted to new dtype\n \"\"\"\n\n dtype = self.dtype if dtype is None else dtype\n\n if not np.issubdtype(dtype, np.signedinteger):\n raise ValueError(f\"Expected Signed Integer Type, Got {dtype}\")\n\n name = self.name if name is None else name\n\n return RangeIndex(\n start=self._start, stop=self._stop, step=self._step, name=name\n )\n\n @_cudf_nvtx_annotate\n @doc_apply(_index_astype_docstring)\n def astype(self, dtype, copy: bool = True):\n if is_dtype_equal(dtype, np.int64):\n return self\n return self._as_int64().astype(dtype, copy=copy)\n\n @_cudf_nvtx_annotate\n def drop_duplicates(self, keep=\"first\"):\n return self\n\n @_cudf_nvtx_annotate\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(start={self._start}, stop={self._stop}\"\n f\", step={self._step}\"\n + (\n f\", name={pd.io.formats.printing.default_pprint(self.name)}\"\n if self.name is not None\n else \"\"\n )\n + \")\"\n )\n\n @_cudf_nvtx_annotate\n def __len__(self):\n return len(range(self._start, self._stop, self._step))\n\n @_cudf_nvtx_annotate\n def __getitem__(self, index):\n len_self = len(self)\n if isinstance(index, slice):\n sl_start, sl_stop, sl_step = index.indices(len_self)\n\n lo = self._start + sl_start * self._step\n hi = self._start + sl_stop * self._step\n st = self._step * sl_step\n return RangeIndex(start=lo, stop=hi, step=st, name=self._name)\n\n elif isinstance(index, Number):\n if index < 0:\n index = len_self + index\n if not (0 <= index < len_self):\n raise IndexError(\"out-of-bound\")\n index = min(index, len_self)\n index = self._start + index * self._step\n return index\n else:\n if _is_scalar_or_zero_d_array(index):\n index = np.min_scalar_type(index).type(index)\n index = column.as_column(index)\n\n return as_index(self._values[index], name=self.name)\n\n @_cudf_nvtx_annotate\n def equals(self, other):\n if isinstance(other, RangeIndex):\n if (self._start, self._stop, self._step) == (\n other._start,\n other._stop,\n other._step,\n ):\n return True\n return Int64Index._from_data(self._data).equals(other)\n\n @_cudf_nvtx_annotate\n def serialize(self):\n header = {}\n header[\"index_column\"] = {}\n\n # store metadata values of index separately\n # We don't need to store the GPU buffer for RangeIndexes\n # cuDF only needs to store start/stop and rehydrate\n # during de-serialization\n header[\"index_column\"][\"start\"] = self._start\n header[\"index_column\"][\"stop\"] = self._stop\n header[\"index_column\"][\"step\"] = self._step\n frames = []\n\n header[\"name\"] = pickle.dumps(self.name)\n header[\"dtype\"] = pickle.dumps(self.dtype)\n header[\"type-serialized\"] = pickle.dumps(type(self))\n header[\"frame_count\"] = 0\n return header, frames\n\n @classmethod\n @_cudf_nvtx_annotate\n def deserialize(cls, header, frames):\n h = header[\"index_column\"]\n name = pickle.loads(header[\"name\"])\n start = h[\"start\"]\n stop = h[\"stop\"]\n step = h.get(\"step\", 1)\n return RangeIndex(start=start, stop=stop, step=step, name=name)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def dtype(self):\n \"\"\"\n `dtype` of the range of values in RangeIndex.\n \"\"\"\n return cudf.dtype(np.int64)\n\n @_cudf_nvtx_annotate\n def find_label_range(self, first=None, last=None):\n \"\"\"Find subrange in the ``RangeIndex``, marked by their positions, that\n starts greater or equal to ``first`` and ends less or equal to ``last``\n\n The range returned is assumed to be monotonically increasing. In cases\n where there is no such range that suffice the constraint, an exception\n will be raised.\n\n Parameters\n ----------\n first, last : int, optional, Default None\n The \"start\" and \"stop\" values of the subrange. If None, will use\n ``self._start`` as first, ``self._stop`` as last.\n\n Returns\n -------\n begin, end : 2-tuple of int\n The starting index and the ending index.\n The `last` value occurs at ``end - 1`` position.\n \"\"\"\n\n first = self._start if first is None else first\n last = self._stop if last is None else last\n\n if self._step < 0:\n first = -first\n last = -last\n start = -self._start\n step = -self._step\n else:\n start = self._start\n step = self._step\n\n stop = start + len(self) * step\n begin = search_range(start, stop, first, step, side=\"left\")\n end = search_range(start, stop, last, step, side=\"right\")\n\n return begin, end\n\n @_cudf_nvtx_annotate\n def to_pandas(self):\n return pd.RangeIndex(\n start=self._start,\n stop=self._stop,\n step=self._step,\n dtype=self.dtype,\n name=self.name,\n )\n\n @property\n def is_unique(self):\n \"\"\"\n Return if the index has unique values.\n \"\"\"\n return True\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def is_monotonic_increasing(self):\n return self._step > 0 or len(self) <= 1\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def is_monotonic_decreasing(self):\n return self._step < 0 or len(self) <= 1\n\n @_cudf_nvtx_annotate\n def get_slice_bound(self, label, side, kind=None):\n \"\"\"\n Calculate slice bound that corresponds to given label.\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n\n Parameters\n ----------\n label : int\n A valid value in the ``RangeIndex``\n side : {'left', 'right'}\n kind : Unused\n To keep consistency with other index types.\n\n Returns\n -------\n int\n Index of label.\n \"\"\"\n if side not in {\"left\", \"right\"}:\n raise ValueError(f\"Unrecognized side parameter: {side}\")\n\n if self._step < 0:\n label = -label\n start = -self._start\n step = -self._step\n else:\n start = self._start\n step = self._step\n\n stop = start + len(self) * step\n pos = search_range(start, stop, label, step, side=side)\n return pos\n\n @_cudf_nvtx_annotate\n def memory_usage(self, deep=False):\n if deep:\n warnings.warn(\n \"The deep parameter is ignored and is only included \"\n \"for pandas compatibility.\"\n )\n return 0\n\n def unique(self):\n # RangeIndex always has unique values\n return self\n\n @_cudf_nvtx_annotate\n def __mul__(self, other):\n # Multiplication by raw ints must return a RangeIndex to match pandas.\n if isinstance(other, cudf.Scalar) and other.dtype.kind in \"iu\":\n other = other.value\n elif (\n isinstance(other, (np.ndarray, cupy.ndarray))\n and other.ndim == 0\n and other.dtype.kind in \"iu\"\n ):\n other = other.item()\n if isinstance(other, (int, np.integer)):\n return RangeIndex(\n self.start * other, self.stop * other, self.step * other\n )\n return self._as_int64().__mul__(other)\n\n @_cudf_nvtx_annotate\n def __rmul__(self, other):\n # Multiplication is commutative.\n return self.__mul__(other)\n\n @_cudf_nvtx_annotate\n def _as_int64(self):\n # Convert self to an Int64Index. This method is used to perform ops\n # that are not defined directly on RangeIndex.\n return Int64Index._from_data(self._data)\n\n @_cudf_nvtx_annotate\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n return self._as_int64().__array_ufunc__(\n ufunc, method, *inputs, **kwargs\n )\n\n @_cudf_nvtx_annotate\n def __getattr__(self, key):\n # For methods that are not defined for RangeIndex we attempt to operate\n # on the corresponding integer index if possible.\n try:\n return getattr(self._as_int64(), key)\n except AttributeError:\n raise AttributeError(\n f\"'{type(self)}' object has no attribute {key}\"\n )\n\n @_cudf_nvtx_annotate\n def get_loc(self, key, method=None, tolerance=None):\n # Given an actual integer,\n idx = (key - self._start) / self._step\n idx_int_upper_bound = (self._stop - self._start) // self._step\n if method is None:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if using pad, \"\n \"backfill or nearest lookups\"\n )\n\n if idx > idx_int_upper_bound or idx < 0:\n raise KeyError(key)\n\n idx_int = (key - self._start) // self._step\n if idx_int != idx:\n raise KeyError(key)\n return idx_int\n\n if (method == \"ffill\" and idx < 0) or (\n method == \"bfill\" and idx > idx_int_upper_bound\n ):\n raise KeyError(key)\n\n round_method = {\n \"ffill\": math.floor,\n \"bfill\": math.ceil,\n \"nearest\": round,\n }[method]\n if tolerance is not None and (abs(idx) * self._step > tolerance):\n raise KeyError(key)\n return np.clip(round_method(idx), 0, idx_int_upper_bound, dtype=int)\n\n @_cudf_nvtx_annotate\n def _union(self, other, sort=None):\n if isinstance(other, RangeIndex):\n # Variable suffixes are of the\n # following notation: *_o -> other, *_s -> self,\n # and *_r -> result\n start_s, step_s = self.start, self.step\n end_s = self._end\n start_o, step_o = other.start, other.step\n end_o = other._end\n if self.step < 0:\n start_s, step_s, end_s = end_s, -step_s, start_s\n if other.step < 0:\n start_o, step_o, end_o = end_o, -step_o, start_o\n if len(self) == 1 and len(other) == 1:\n step_s = step_o = abs(self.start - other.start)\n elif len(self) == 1:\n step_s = step_o\n elif len(other) == 1:\n step_o = step_s\n\n # Determine minimum start value of the result.\n start_r = min(start_s, start_o)\n # Determine maximum end value of the result.\n end_r = max(end_s, end_o)\n result = None\n min_step = min(step_o, step_s)\n\n if ((start_s - start_o) % min_step) == 0:\n # Checking to determine other is a subset of self with\n # equal step size.\n if (\n step_o == step_s\n and (start_s - end_o) <= step_s\n and (start_o - end_s) <= step_s\n ):\n result = type(self)(start_r, end_r + step_s, step_s)\n # Checking if self is a subset of other with unequal\n # step sizes.\n elif (\n step_o % step_s == 0\n and (start_o + step_s >= start_s)\n and (end_o - step_s <= end_s)\n ):\n result = type(self)(start_r, end_r + step_s, step_s)\n # Checking if other is a subset of self with unequal\n # step sizes.\n elif (\n step_s % step_o == 0\n and (start_s + step_o >= start_o)\n and (end_s - step_o <= end_o)\n ):\n result = type(self)(start_r, end_r + step_o, step_o)\n # Checking to determine when the steps are even but one of\n # the inputs spans across is near half or less then half\n # the other input. This case needs manipulation to step\n # size.\n elif (\n step_o == step_s\n and (step_s % 2 == 0)\n and (abs(start_s - start_o) <= step_s / 2)\n and (abs(end_s - end_o) <= step_s / 2)\n ):\n result = type(self)(start_r, end_r + step_s / 2, step_s / 2)\n if result is not None:\n if sort is None and not result.is_monotonic_increasing:\n return result.sort_values()\n else:\n return result\n\n # If all the above optimizations don't cater to the inputs,\n # we materialize RangeIndex's into `Int64Index` and\n # then perform `union`.\n return Int64Index(self._values)._union(other, sort=sort)\n\n @_cudf_nvtx_annotate\n def _intersection(self, other, sort=False):\n if not isinstance(other, RangeIndex):\n return super()._intersection(other, sort=sort)\n\n if not len(self) or not len(other):\n return RangeIndex(0)\n\n first = self._range[::-1] if self.step < 0 else self._range\n second = other._range[::-1] if other.step < 0 else other._range\n\n # check whether intervals intersect\n # deals with in- and decreasing ranges\n int_low = max(first.start, second.start)\n int_high = min(first.stop, second.stop)\n if int_high <= int_low:\n return RangeIndex(0)\n\n # Method hint: linear Diophantine equation\n # solve intersection problem\n # performance hint: for identical step sizes, could use\n # cheaper alternative\n gcd, s, _ = _extended_gcd(first.step, second.step)\n\n # check whether element sets intersect\n if (first.start - second.start) % gcd:\n return RangeIndex(0)\n\n # calculate parameters for the RangeIndex describing the\n # intersection disregarding the lower bounds\n tmp_start = (\n first.start + (second.start - first.start) * first.step // gcd * s\n )\n new_step = first.step * second.step // gcd\n no_steps = -(-(int_low - tmp_start) // abs(new_step))\n new_start = tmp_start + abs(new_step) * no_steps\n new_range = range(new_start, int_high, new_step)\n new_index = RangeIndex(new_range)\n\n if (self.step < 0 and other.step < 0) is not (new_index.step < 0):\n new_index = new_index[::-1]\n if sort is None:\n new_index = new_index.sort_values()\n\n return new_index\n\n @_cudf_nvtx_annotate\n def _gather(self, gather_map, nullify=False, check_bounds=True):\n gather_map = cudf.core.column.as_column(gather_map)\n return Int64Index._from_columns(\n [self._values.take(gather_map, nullify, check_bounds)], [self.name]\n )\n\n @_cudf_nvtx_annotate\n def _apply_boolean_mask(self, boolean_mask):\n return Int64Index._from_columns(\n [self._values.apply_boolean_mask(boolean_mask)], [self.name]\n )\n\n def _split(self, splits):\n return Int64Index._from_columns(\n [self._values.columns_split(splits)], [self.name]\n )\n\n def _binaryop(self, other, op: str):\n return self._as_int64()._binaryop(other, op=op)\n\n\n# Patch in all binops and unary ops, which bypass __getattr__ on the instance\n# and prevent the above overload from working.\nfor unaop in (\"__neg__\", \"__pos__\", \"__abs__\"):\n setattr(\n RangeIndex,\n unaop,\n lambda self, op=unaop: getattr(self._as_int64(), op)(),\n )\n\n\nclass GenericIndex(SingleColumnFrame, BaseIndex):\n \"\"\"\n An array of orderable values that represent the indices of another Column\n\n Attributes\n ----------\n _values: A Column object\n name: A string\n\n Parameters\n ----------\n data : Column\n The Column of data for this index\n name : str optional\n The name of the Index. If not provided, the Index adopts the value\n Column's name. Otherwise if this name is different from the value\n Column's, the data Column will be cloned to adopt this name.\n \"\"\"\n\n @_cudf_nvtx_annotate\n def __init__(self, data, **kwargs):\n kwargs = _setdefault_name(data, **kwargs)\n\n # normalize the input\n if isinstance(data, cudf.Series):\n data = data._column\n elif isinstance(data, column.ColumnBase):\n data = data\n else:\n if isinstance(data, (list, tuple)):\n if len(data) == 0:\n data = np.asarray([], dtype=\"int64\")\n else:\n data = np.asarray(data)\n data = column.as_column(data)\n assert isinstance(data, (NumericalColumn, StringColumn))\n\n name = kwargs.get(\"name\")\n super().__init__({name: data})\n\n @_cudf_nvtx_annotate\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n ret = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)\n\n if ret is not None:\n return ret\n\n # Attempt to dispatch all other functions to cupy.\n cupy_func = getattr(cupy, ufunc.__name__)\n if cupy_func:\n if ufunc.nin == 2:\n other = inputs[self is inputs[0]]\n inputs = self._make_operands_for_binop(other)\n else:\n inputs = {\n name: (col, None, False, None)\n for name, col in self._data.items()\n }\n\n data = self._apply_cupy_ufunc_to_operands(\n ufunc, cupy_func, inputs, **kwargs\n )\n\n out = [_index_from_data(out) for out in data]\n\n # pandas returns numpy arrays when the outputs are boolean.\n for i, o in enumerate(out):\n # We explicitly _do not_ use isinstance here: we want only\n # boolean GenericIndexes, not dtype-specific subclasses.\n if type(o) is GenericIndex and o.dtype.kind == \"b\":\n out[i] = o.values\n\n return out[0] if ufunc.nout == 1 else tuple(out)\n\n return NotImplemented\n\n @_cudf_nvtx_annotate\n def _binaryop(\n self, other: T, op: str, fill_value: Any = None, *args, **kwargs,\n ) -> SingleColumnFrame:\n reflect = self._is_reflected_op(op)\n if reflect:\n op = op[:2] + op[3:]\n operands = self._make_operands_for_binop(other, fill_value, reflect)\n if operands is NotImplemented:\n return NotImplemented\n ret = _index_from_data(self._colwise_binop(operands, op))\n\n # pandas returns numpy arrays when the outputs are boolean. We\n # explicitly _do not_ use isinstance here: we want only boolean\n # GenericIndexes, not dtype-specific subclasses.\n if type(ret) is GenericIndex and ret.dtype.kind == \"b\":\n return ret.values\n return ret\n\n @_cudf_nvtx_annotate\n def _copy_type_metadata(\n self, other: Frame, include_index: bool = True\n ) -> GenericIndex:\n \"\"\"\n Copy type metadata from each column of `other` to the corresponding\n column of `self`.\n See `ColumnBase._with_type_metadata` for more information.\n \"\"\"\n for name, col, other_col in zip(\n self._data.keys(), self._data.values(), other._data.values()\n ):\n self._data.set_by_label(\n name, col._with_type_metadata(other_col.dtype), validate=False\n )\n return self\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def _values(self):\n return self._column\n\n @classmethod\n @_cudf_nvtx_annotate\n def _concat(cls, objs):\n if all(isinstance(obj, RangeIndex) for obj in objs):\n result = _concat_range_index(objs)\n else:\n data = concat_columns([o._values for o in objs])\n result = as_index(data)\n\n names = {obj.name for obj in objs}\n if len(names) == 1:\n [name] = names\n else:\n name = None\n\n result.name = name\n return result\n\n @_cudf_nvtx_annotate\n def memory_usage(self, deep=False):\n return sum(super().memory_usage(deep=deep).values())\n\n @_cudf_nvtx_annotate\n def equals(self, other, **kwargs):\n \"\"\"\n Determine if two Index objects contain the same elements.\n\n Returns\n -------\n out: bool\n True if “other” is an Index and it has the same elements\n as calling index; False otherwise.\n \"\"\"\n if not isinstance(other, BaseIndex):\n return False\n\n check_types = False\n\n self_is_categorical = isinstance(self, CategoricalIndex)\n other_is_categorical = isinstance(other, CategoricalIndex)\n if self_is_categorical and not other_is_categorical:\n other = other.astype(self.dtype)\n check_types = True\n elif other_is_categorical and not self_is_categorical:\n self = self.astype(other.dtype)\n check_types = True\n\n try:\n return super().equals(other, check_types=check_types)\n except TypeError:\n return False\n\n @_cudf_nvtx_annotate\n def copy(self, name=None, deep=False, dtype=None, names=None):\n \"\"\"\n Make a copy of this object.\n\n Parameters\n ----------\n name : object, default None\n Name of index, use original name when None\n deep : bool, default True\n Make a deep copy of the data.\n With ``deep=False`` the original data is used\n dtype : numpy dtype, default None\n Target datatype to cast into, use original dtype when None\n names : list-like, default False\n Kept compatibility with MultiIndex. Should not be used.\n\n Returns\n -------\n New index instance, casted to new dtype\n \"\"\"\n\n dtype = self.dtype if dtype is None else dtype\n name = self.name if name is None else name\n\n col = self._values.astype(dtype)\n return _index_from_data({name: col.copy(True) if deep else col})\n\n @_cudf_nvtx_annotate\n @doc_apply(_index_astype_docstring)\n def astype(self, dtype, copy: bool = True):\n return _index_from_data(super().astype({self.name: dtype}, copy))\n\n @_cudf_nvtx_annotate\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None, 'pad'/'fill', 'backfill'/'bfill', 'nearest'}, optional\n - default: exact matches only.\n - pad / ffill: find the PREVIOUS index value if no exact match.\n - backfill / bfill: use NEXT index value if no exact match.\n - nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index\n value.\n tolerance : int or float, optional\n Maximum distance from index value for inexact matches. The value\n of the index at the matching location must satisfy the equation\n ``abs(index[loc] - key) <= tolerance``.\n\n Returns\n -------\n int or slice or boolean mask\n - If result is unique, return integer index\n - If index is monotonic, loc is returned as a slice object\n - Otherwise, a boolean mask is returned\n\n Examples\n --------\n >>> unique_index = cudf.Index(list('abc'))\n >>> unique_index.get_loc('b')\n 1\n >>> monotonic_index = cudf.Index(list('abbc'))\n >>> monotonic_index.get_loc('b')\n slice(1, 3, None)\n >>> non_monotonic_index = cudf.Index(list('abcb'))\n >>> non_monotonic_index.get_loc('b')\n array([False, True, False, True])\n >>> numeric_unique_index = cudf.Index([1, 2, 3])\n >>> numeric_unique_index.get_loc(3)\n 2\n \"\"\"\n if tolerance is not None:\n raise NotImplementedError(\n \"Parameter tolerance is unsupported yet.\"\n )\n if method not in {\n None,\n \"ffill\",\n \"bfill\",\n \"pad\",\n \"backfill\",\n \"nearest\",\n }:\n raise ValueError(\n f\"Invalid fill method. Expecting pad (ffill), backfill (bfill)\"\n f\" or nearest. Got {method}\"\n )\n\n is_sorted = (\n self.is_monotonic_increasing or self.is_monotonic_decreasing\n )\n\n if not is_sorted and method is not None:\n raise ValueError(\n \"index must be monotonic increasing or decreasing if `method`\"\n \"is specified.\"\n )\n\n key_as_table = cudf.core.frame.Frame(\n {\"None\": as_column(key, length=1)}\n )\n lower_bound, upper_bound, sort_inds = _lexsorted_equal_range(\n self, key_as_table, is_sorted\n )\n\n if lower_bound == upper_bound:\n # Key not found, apply method\n if method in (\"pad\", \"ffill\"):\n if lower_bound == 0:\n raise KeyError(key)\n return lower_bound - 1\n elif method in (\"backfill\", \"bfill\"):\n if lower_bound == self._data.nrows:\n raise KeyError(key)\n return lower_bound\n elif method == \"nearest\":\n if lower_bound == self._data.nrows:\n return lower_bound - 1\n elif lower_bound == 0:\n return 0\n lower_val = self._column.element_indexing(lower_bound - 1)\n upper_val = self._column.element_indexing(lower_bound)\n return (\n lower_bound - 1\n if abs(lower_val - key) < abs(upper_val - key)\n else lower_bound\n )\n else:\n raise KeyError(key)\n\n if lower_bound + 1 == upper_bound:\n # Search result is unique, return int.\n return (\n lower_bound\n if is_sorted\n else sort_inds.element_indexing(lower_bound)\n )\n\n if is_sorted:\n # In monotonic index, lex search result is continuous. A slice for\n # the range is returned.\n return slice(lower_bound, upper_bound)\n\n # Not sorted and not unique. Return a boolean mask\n mask = cupy.full(self._data.nrows, False)\n true_inds = sort_inds.slice(lower_bound, upper_bound).values\n mask[true_inds] = True\n return mask\n\n @_cudf_nvtx_annotate\n def __repr__(self):\n max_seq_items = get_option(\"max_seq_items\") or len(self)\n mr = 0\n if 2 * max_seq_items < len(self):\n mr = max_seq_items + 1\n\n if len(self) > mr and mr != 0:\n top = self[0:mr]\n bottom = self[-1 * mr :]\n\n preprocess = cudf.concat([top, bottom])\n else:\n preprocess = self\n\n # TODO: Change below usages accordingly to\n # utilize `Index.to_string` once it is implemented\n # related issue : https://github.com/pandas-dev/pandas/issues/35389\n if isinstance(preprocess, CategoricalIndex):\n if preprocess.categories.dtype.kind == \"f\":\n output = (\n preprocess.astype(\"str\")\n .to_pandas()\n .astype(\n dtype=pd.CategoricalDtype(\n categories=preprocess.dtype.categories.astype(\n \"str\"\n ).to_pandas(),\n ordered=preprocess.dtype.ordered,\n )\n )\n .__repr__()\n )\n break_idx = output.find(\"ordered=\")\n output = (\n output[:break_idx].replace(\"'\", \"\") + output[break_idx:]\n )\n else:\n output = preprocess.to_pandas().__repr__()\n\n output = output.replace(\"nan\", cudf._NA_REP)\n elif preprocess._values.nullable:\n output = self._clean_nulls_from_index().to_pandas().__repr__()\n\n if not isinstance(self, StringIndex):\n # We should remove all the single quotes\n # from the output due to the type-cast to\n # object dtype happening above.\n # Note : The replacing of single quotes has\n # to happen only incase of non-StringIndex types,\n # as we want to preserve single quotes incase\n # of StringIndex and it is valid to have them.\n output = output.replace(\"'\", \"\")\n else:\n output = preprocess.to_pandas().__repr__()\n\n # Fix and correct the class name of the output\n # string by finding first occurrence of \"(\" in the output\n index_class_split_index = output.find(\"(\")\n output = self.__class__.__name__ + output[index_class_split_index:]\n\n lines = output.split(\"\\n\")\n\n tmp_meta = lines[-1]\n dtype_index = tmp_meta.rfind(\" dtype=\")\n prior_to_dtype = tmp_meta[:dtype_index]\n lines = lines[:-1]\n lines.append(prior_to_dtype + \" dtype='%s'\" % self.dtype)\n if self.name is not None:\n lines[-1] = lines[-1] + \", name='%s'\" % self.name\n if \"length\" in tmp_meta:\n lines[-1] = lines[-1] + \", length=%d)\" % len(self)\n else:\n lines[-1] = lines[-1] + \")\"\n\n return \"\\n\".join(lines)\n\n @_cudf_nvtx_annotate\n def __getitem__(self, index):\n if type(self) == IntervalIndex:\n raise NotImplementedError(\n \"Getting a scalar from an IntervalIndex is not yet supported\"\n )\n res = self._values[index]\n if not isinstance(index, int):\n res = as_index(res)\n res.name = self.name\n return res\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def dtype(self):\n \"\"\"\n `dtype` of the underlying values in GenericIndex.\n \"\"\"\n return self._values.dtype\n\n @_cudf_nvtx_annotate\n def find_label_range(self, first, last):\n \"\"\"Find range that starts with *first* and ends with *last*,\n inclusively.\n\n Returns\n -------\n begin, end : 2-tuple of int\n The starting index and the ending index.\n The *last* value occurs at ``end - 1`` position.\n \"\"\"\n col = self._values\n begin, end = None, None\n if first is not None:\n begin = col.find_first_value(first, closest=True)\n if last is not None:\n end = col.find_last_value(last, closest=True)\n end += 1\n return begin, end\n\n @_cudf_nvtx_annotate\n def get_slice_bound(self, label, side, kind=None):\n return self._values.get_slice_bound(label, side, kind)\n\n def is_numeric(self):\n return False\n\n def is_boolean(self):\n return True\n\n def is_integer(self):\n return False\n\n def is_floating(self):\n return False\n\n def is_object(self):\n return False\n\n def is_categorical(self):\n return False\n\n def is_interval(self):\n return False\n\n @_cudf_nvtx_annotate\n def argsort(\n self,\n axis=0,\n kind=\"quicksort\",\n order=None,\n ascending=True,\n na_position=\"last\",\n ):\n \"\"\"Return the integer indices that would sort the Series values.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Has no effect but is accepted for compatibility with numpy.\n kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable\n algorithms. Only quicksort is supported in cuDF.\n order : None\n Has no effect but is accepted for compatibility with numpy.\n ascending : bool or list of bool, default True\n If True, sort values in ascending order, otherwise descending.\n na_position : {‘first’ or ‘last’}, default ‘last’\n Argument ‘first’ puts NaNs at the beginning, ‘last’ puts NaNs\n at the end.\n\n Returns\n -------\n cupy.ndarray: The indices sorted based on input.\n \"\"\" # noqa: E501\n return super().argsort(\n axis=axis,\n kind=kind,\n order=order,\n ascending=ascending,\n na_position=na_position,\n )\n\n\nclass NumericIndex(GenericIndex):\n \"\"\"Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Index\n \"\"\"\n\n # Subclasses must define the dtype they are associated with.\n _dtype: Union[None, Type[np.number]] = None\n\n @_cudf_nvtx_annotate\n def __init__(self, data=None, dtype=None, copy=False, name=None):\n\n dtype = type(self)._dtype\n if copy:\n data = column.as_column(data, dtype=dtype).copy()\n\n kwargs = _setdefault_name(data, name=name)\n\n data = column.as_column(data, dtype=dtype)\n\n super().__init__(data, **kwargs)\n\n def is_numeric(self):\n return True\n\n def is_boolean(self):\n return False\n\n def is_integer(self):\n return True\n\n def is_floating(self):\n return False\n\n def is_object(self):\n return False\n\n def is_categorical(self):\n return False\n\n def is_interval(self):\n return False\n\n\nclass Int8Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n Int8Index is a special case of Index with purely\n integer(``int8``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Int8Index\n \"\"\"\n\n _dtype = np.int8\n\n\nclass Int16Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n Int16Index is a special case of Index with purely\n integer(``int16``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Int16Index\n \"\"\"\n\n _dtype = np.int16\n\n\nclass Int32Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n Int32Index is a special case of Index with purely\n integer(``int32``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Int32Index\n \"\"\"\n\n _dtype = np.int32\n\n\nclass Int64Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n Int64Index is a special case of Index with purely\n integer(``int64``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Int64Index\n \"\"\"\n\n _dtype = np.int64\n\n\nclass UInt8Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n UInt8Index is a special case of Index with purely\n integer(``uint64``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n UInt8Index\n \"\"\"\n\n _dtype = np.uint8\n\n\nclass UInt16Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n UInt16Index is a special case of Index with purely\n integer(``uint16``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n UInt16Index\n \"\"\"\n\n _dtype = np.uint16\n\n\nclass UInt32Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n UInt32Index is a special case of Index with purely\n integer(``uint32``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n UInt32Index\n \"\"\"\n\n _dtype = np.uint32\n\n\nclass UInt64Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n UInt64Index is a special case of Index with purely\n integer(``uint64``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n UInt64Index\n \"\"\"\n\n _dtype = np.uint64\n\n\nclass Float32Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n Float32Index is a special case of Index with purely\n float(``float32``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Float32Index\n \"\"\"\n\n _dtype = np.float32\n\n def is_integer(self):\n return False\n\n def is_floating(self):\n return True\n\n\nclass Float64Index(NumericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of labels.\n The basic object storing row labels for all cuDF objects.\n Float64Index is a special case of Index with purely\n float(``float64``) labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype,\n but not used.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n Float64Index\n \"\"\"\n\n _dtype = np.float64\n\n def is_integer(self):\n return False\n\n def is_floating(self):\n return True\n\n\nclass DatetimeIndex(GenericIndex):\n \"\"\"\n Immutable , ordered and sliceable sequence of datetime64 data,\n represented internally as int64.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional datetime-like data to construct index with.\n copy : bool\n Make a copy of input.\n freq : str, optional\n This is not yet supported\n tz : pytz.timezone or dateutil.tz.tzfile\n This is not yet supported\n ambiguous : ‘infer’, bool-ndarray, ‘NaT’, default ‘raise’\n This is not yet supported\n name : object\n Name to be stored in the index.\n dayfirst : bool, default False\n If True, parse dates in data with the day first order.\n This is not yet supported\n yearfirst : bool, default False\n If True parse dates in data with the year first order.\n This is not yet supported\n\n Returns\n -------\n DatetimeIndex\n\n Examples\n --------\n >>> import cudf\n >>> cudf.DatetimeIndex([1, 2, 3, 4], name=\"a\")\n DatetimeIndex(['1970-01-01 00:00:00.000000001',\n '1970-01-01 00:00:00.000000002',\n '1970-01-01 00:00:00.000000003',\n '1970-01-01 00:00:00.000000004'],\n dtype='datetime64[ns]', name='a')\n \"\"\"\n\n @_cudf_nvtx_annotate\n def __init__(\n self,\n data=None,\n freq=None,\n tz=None,\n normalize=False,\n closed=None,\n ambiguous=\"raise\",\n dayfirst=False,\n yearfirst=False,\n dtype=None,\n copy=False,\n name=None,\n ):\n # we should be more strict on what we accept here but\n # we'd have to go and figure out all the semantics around\n # pandas dtindex creation first which. For now\n # just make sure we handle np.datetime64 arrays\n # and then just dispatch upstream\n if freq is not None:\n raise NotImplementedError(\"Freq is not yet supported\")\n if tz is not None:\n raise NotImplementedError(\"tz is not yet supported\")\n if normalize is not False:\n raise NotImplementedError(\"normalize == True is not yet supported\")\n if closed is not None:\n raise NotImplementedError(\"closed is not yet supported\")\n if ambiguous != \"raise\":\n raise NotImplementedError(\"ambiguous is not yet supported\")\n if dayfirst is not False:\n raise NotImplementedError(\"dayfirst == True is not yet supported\")\n if yearfirst is not False:\n raise NotImplementedError(\"yearfirst == True is not yet supported\")\n\n valid_dtypes = tuple(\n f\"datetime64[{res}]\" for res in (\"s\", \"ms\", \"us\", \"ns\")\n )\n if dtype is None:\n # nanosecond default matches pandas\n dtype = \"datetime64[ns]\"\n elif dtype not in valid_dtypes:\n raise TypeError(\"Invalid dtype\")\n\n if copy:\n data = column.as_column(data).copy()\n kwargs = _setdefault_name(data, name=name)\n if isinstance(data, np.ndarray) and data.dtype.kind == \"M\":\n data = column.as_column(data)\n elif isinstance(data, pd.DatetimeIndex):\n data = column.as_column(data.values)\n elif isinstance(data, (list, tuple)):\n data = column.as_column(np.array(data, dtype=dtype))\n super().__init__(data, **kwargs)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def year(self):\n \"\"\"\n The year of the datetime.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> datetime_index = cudf.Index(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"Y\"))\n >>> datetime_index\n DatetimeIndex(['2000-12-31', '2001-12-31', '2002-12-31'], dtype='datetime64[ns]')\n >>> datetime_index.year\n Int16Index([2000, 2001, 2002], dtype='int16')\n \"\"\" # noqa: E501\n return self._get_dt_field(\"year\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def month(self):\n \"\"\"\n The month as January=1, December=12.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> datetime_index = cudf.Index(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"M\"))\n >>> datetime_index\n DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31'], dtype='datetime64[ns]')\n >>> datetime_index.month\n Int16Index([1, 2, 3], dtype='int16')\n \"\"\" # noqa: E501\n return self._get_dt_field(\"month\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def day(self):\n \"\"\"\n The day of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"D\"))\n >>> datetime_index\n DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], dtype='datetime64[ns]')\n >>> datetime_index.day\n Int16Index([1, 2, 3], dtype='int16')\n \"\"\" # noqa: E501\n return self._get_dt_field(\"day\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def hour(self):\n \"\"\"\n The hours of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"h\"))\n >>> datetime_index\n DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 01:00:00',\n '2000-01-01 02:00:00'],\n dtype='datetime64[ns]')\n >>> datetime_index.hour\n Int16Index([0, 1, 2], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"hour\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def minute(self):\n \"\"\"\n The minutes of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"T\"))\n >>> datetime_index\n DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00',\n '2000-01-01 00:02:00'],\n dtype='datetime64[ns]')\n >>> datetime_index.minute\n Int16Index([0, 1, 2], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"minute\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def second(self):\n \"\"\"\n The seconds of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"s\"))\n >>> datetime_index\n DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:00:01',\n '2000-01-01 00:00:02'],\n dtype='datetime64[ns]')\n >>> datetime_index.second\n Int16Index([0, 1, 2], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"second\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def weekday(self):\n \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2016-12-31\",\n ... \"2017-01-08\", freq=\"D\"))\n >>> datetime_index\n DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',\n '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',\n '2017-01-08'],\n dtype='datetime64[ns]')\n >>> datetime_index.weekday\n Int16Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"weekday\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def dayofweek(self):\n \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2016-12-31\",\n ... \"2017-01-08\", freq=\"D\"))\n >>> datetime_index\n DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',\n '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',\n '2017-01-08'],\n dtype='datetime64[ns]')\n >>> datetime_index.dayofweek\n Int16Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"weekday\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def dayofyear(self):\n \"\"\"\n The day of the year, from 1-365 in non-leap years and\n from 1-366 in leap years.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2016-12-31\",\n ... \"2017-01-08\", freq=\"D\"))\n >>> datetime_index\n DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',\n '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',\n '2017-01-08'],\n dtype='datetime64[ns]')\n >>> datetime_index.dayofyear\n Int16Index([366, 1, 2, 3, 4, 5, 6, 7, 8], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"day_of_year\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def day_of_year(self):\n \"\"\"\n The day of the year, from 1-365 in non-leap years and\n from 1-366 in leap years.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_index = cudf.Index(pd.date_range(\"2016-12-31\",\n ... \"2017-01-08\", freq=\"D\"))\n >>> datetime_index\n DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',\n '2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',\n '2017-01-08'],\n dtype='datetime64[ns]')\n >>> datetime_index.day_of_year\n Int16Index([366, 1, 2, 3, 4, 5, 6, 7, 8], dtype='int16')\n \"\"\"\n return self._get_dt_field(\"day_of_year\")\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def is_leap_year(self):\n \"\"\"\n Boolean indicator if the date belongs to a leap year.\n\n A leap year is a year, which has 366 days (instead of 365) including\n 29th of February as an intercalary day. Leap years are years which are\n multiples of four with the exception of years divisible by 100 but not\n by 400.\n\n Returns\n -------\n ndarray\n Booleans indicating if dates belong to a leap year.\n \"\"\"\n res = is_leap_year(self._values).fillna(False)\n return cupy.asarray(res)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def quarter(self):\n \"\"\"\n Integer indicator for which quarter of the year the date belongs in.\n\n There are 4 quarters in a year. With the first quarter being from\n January - March, second quarter being April - June, third quarter\n being July - September and fourth quarter being October - December.\n\n Returns\n -------\n Int8Index\n Integer indicating which quarter the date belongs to.\n\n Examples\n --------\n >>> import cudf\n >>> gIndex = cudf.DatetimeIndex([\"2020-05-31 08:00:00\",\n ... \"1999-12-31 18:40:00\"])\n >>> gIndex.quarter\n Int8Index([2, 4], dtype='int8')\n \"\"\"\n res = extract_quarter(self._values)\n return Int8Index(res, dtype=\"int8\")\n\n @_cudf_nvtx_annotate\n def isocalendar(self):\n \"\"\"\n Returns a DataFrame with the year, week, and day\n calculated according to the ISO 8601 standard.\n\n Returns\n -------\n DataFrame\n with columns year, week and day\n\n Examples\n --------\n >>> gIndex = cudf.DatetimeIndex([\"2020-05-31 08:00:00\",\n ... \"1999-12-31 18:40:00\"])\n >>> gIndex.isocalendar()\n year week day\n 2020-05-31 08:00:00 2020 22 7\n 1999-12-31 18:40:00 1999 52 5\n \"\"\"\n return cudf.core.tools.datetimes._to_iso_calendar(self)\n\n @_cudf_nvtx_annotate\n def to_pandas(self):\n nanos = self._values.astype(\"datetime64[ns]\")\n return pd.DatetimeIndex(nanos.to_pandas(), name=self.name)\n\n @_cudf_nvtx_annotate\n def _get_dt_field(self, field):\n out_column = self._values.get_dt_field(field)\n # column.column_empty_like always returns a Column object\n # but we need a NumericalColumn for GenericIndex..\n # how should this be handled?\n out_column = column.build_column(\n data=out_column.base_data,\n dtype=out_column.dtype,\n mask=out_column.base_mask,\n offset=out_column.offset,\n )\n return as_index(out_column, name=self.name)\n\n def is_boolean(self):\n return False\n\n @_cudf_nvtx_annotate\n def ceil(self, freq):\n \"\"\"\n Perform ceil operation on the data to the specified freq.\n\n Parameters\n ----------\n freq : str\n One of [\"D\", \"H\", \"T\", \"min\", \"S\", \"L\", \"ms\", \"U\", \"us\", \"N\"].\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\n See `frequency aliases <https://pandas.pydata.org/docs/\\\n user_guide/timeseries.html#timeseries-offset-aliases>`__\n for more details on these aliases.\n\n Returns\n -------\n DatetimeIndex\n Index of the same type for a DatetimeIndex\n\n Examples\n --------\n >>> import cudf\n >>> gIndex = cudf.DatetimeIndex([\n ... \"2020-05-31 08:05:42\",\n ... \"1999-12-31 18:40:30\",\n ... ])\n >>> gIndex.ceil(\"T\")\n DatetimeIndex(['2020-05-31 08:06:00', '1999-12-31 18:41:00'], dtype='datetime64[ns]')\n \"\"\" # noqa: E501\n out_column = self._values.ceil(freq)\n\n return self.__class__._from_data({self.name: out_column})\n\n @_cudf_nvtx_annotate\n def floor(self, freq):\n \"\"\"\n Perform floor operation on the data to the specified freq.\n\n Parameters\n ----------\n freq : str\n One of [\"D\", \"H\", \"T\", \"min\", \"S\", \"L\", \"ms\", \"U\", \"us\", \"N\"].\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\n See `frequency aliases <https://pandas.pydata.org/docs/\\\n user_guide/timeseries.html#timeseries-offset-aliases>`__\n for more details on these aliases.\n\n Returns\n -------\n DatetimeIndex\n Index of the same type for a DatetimeIndex\n\n Examples\n --------\n >>> import cudf\n >>> gIndex = cudf.DatetimeIndex([\n ... \"2020-05-31 08:59:59\",\n ... \"1999-12-31 18:44:59\",\n ... ])\n >>> gIndex.floor(\"T\")\n DatetimeIndex(['2020-05-31 08:59:00', '1999-12-31 18:44:00'], dtype='datetime64[ns]')\n \"\"\" # noqa: E501\n out_column = self._values.floor(freq)\n\n return self.__class__._from_data({self.name: out_column})\n\n @_cudf_nvtx_annotate\n def round(self, freq):\n \"\"\"\n Perform round operation on the data to the specified freq.\n\n Parameters\n ----------\n freq : str\n One of [\"D\", \"H\", \"T\", \"min\", \"S\", \"L\", \"ms\", \"U\", \"us\", \"N\"].\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\n See `frequency aliases <https://pandas.pydata.org/docs/\\\n user_guide/timeseries.html#timeseries-offset-aliases>`__\n for more details on these aliases.\n\n Returns\n -------\n DatetimeIndex\n Index containing rounded datetimes.\n\n Examples\n --------\n >>> import cudf\n >>> dt_idx = cudf.Index([\n ... \"2001-01-01 00:04:45\",\n ... \"2001-01-01 00:04:58\",\n ... \"2001-01-01 00:05:04\",\n ... ], dtype=\"datetime64[ns]\")\n >>> dt_idx\n DatetimeIndex(['2001-01-01 00:04:45', '2001-01-01 00:04:58',\n '2001-01-01 00:05:04'],\n dtype='datetime64[ns]')\n >>> dt_idx.round('H')\n DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01'], dtype='datetime64[ns]')\n >>> dt_idx.round('T')\n DatetimeIndex(['2001-01-01 00:05:00', '2001-01-01 00:05:00', '2001-01-01 00:05:00'], dtype='datetime64[ns]')\n \"\"\" # noqa: E501\n out_column = self._values.round(freq)\n\n return self.__class__._from_data({self.name: out_column})\n\n\nclass TimedeltaIndex(GenericIndex):\n \"\"\"\n Immutable, ordered and sliceable sequence of timedelta64 data,\n represented internally as int64.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional datetime-like data to construct index with.\n unit : str, optional\n This is not yet supported\n copy : bool\n Make a copy of input.\n freq : str, optional\n This is not yet supported\n closed : str, optional\n This is not yet supported\n dtype : str or numpy.dtype, optional\n Data type for the output Index. If not specified, the\n default dtype will be ``timedelta64[ns]``.\n name : object\n Name to be stored in the index.\n\n Returns\n -------\n TimedeltaIndex\n\n Examples\n --------\n >>> import cudf\n >>> cudf.TimedeltaIndex([1132223, 2023232, 342234324, 4234324],\n ... dtype=\"timedelta64[ns]\")\n TimedeltaIndex(['0 days 00:00:00.001132223', '0 days 00:00:00.002023232',\n '0 days 00:00:00.342234324', '0 days 00:00:00.004234324'],\n dtype='timedelta64[ns]')\n >>> cudf.TimedeltaIndex([1, 2, 3, 4], dtype=\"timedelta64[s]\",\n ... name=\"delta-index\")\n TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03',\n '0 days 00:00:04'],\n dtype='timedelta64[s]', name='delta-index')\n \"\"\"\n\n @_cudf_nvtx_annotate\n def __init__(\n self,\n data=None,\n unit=None,\n freq=None,\n closed=None,\n dtype=\"timedelta64[ns]\",\n copy=False,\n name=None,\n ):\n\n if freq is not None:\n raise NotImplementedError(\"freq is not yet supported\")\n\n if unit is not None:\n raise NotImplementedError(\n \"unit is not yet supported, alternatively \"\n \"dtype parameter is supported\"\n )\n\n if copy:\n data = column.as_column(data).copy()\n kwargs = _setdefault_name(data, name=name)\n if isinstance(data, np.ndarray) and data.dtype.kind == \"m\":\n data = column.as_column(data)\n elif isinstance(data, pd.TimedeltaIndex):\n data = column.as_column(data.values)\n elif isinstance(data, (list, tuple)):\n data = column.as_column(np.array(data, dtype=dtype))\n super().__init__(data, **kwargs)\n\n @_cudf_nvtx_annotate\n def to_pandas(self):\n return pd.TimedeltaIndex(\n self._values.to_pandas(),\n name=self.name,\n unit=self._values.time_unit,\n )\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def days(self):\n \"\"\"\n Number of days for each element.\n \"\"\"\n return as_index(arbitrary=self._values.days, name=self.name)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def seconds(self):\n \"\"\"\n Number of seconds (>= 0 and less than 1 day) for each element.\n \"\"\"\n return as_index(arbitrary=self._values.seconds, name=self.name)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def microseconds(self):\n \"\"\"\n Number of microseconds (>= 0 and less than 1 second) for each element.\n \"\"\"\n return as_index(arbitrary=self._values.microseconds, name=self.name)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def nanoseconds(self):\n \"\"\"\n Number of nanoseconds (>= 0 and less than 1 microsecond) for each\n element.\n \"\"\"\n return as_index(arbitrary=self._values.nanoseconds, name=self.name)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def components(self):\n \"\"\"\n Return a dataframe of the components (days, hours, minutes,\n seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.\n \"\"\"\n return self._values.components()\n\n @property\n def inferred_freq(self):\n \"\"\"\n Infers frequency of TimedeltaIndex.\n\n Notes\n -----\n This property is currently not supported.\n \"\"\"\n raise NotImplementedError(\"inferred_freq is not yet supported\")\n\n def is_boolean(self):\n return False\n\n\nclass CategoricalIndex(GenericIndex):\n \"\"\"\n A categorical of orderable values that represent the indices of another\n Column\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n The values of the categorical. If categories are given,\n values not in categories will be replaced with None/NaN.\n categories : list-like, optional\n The categories for the categorical. Items need to be unique.\n If the categories are not given here (and also not in dtype),\n they will be inferred from the data.\n ordered : bool, optional\n Whether or not this categorical is treated as an ordered categorical.\n If not given here or in dtype, the resulting categorical will be\n unordered.\n dtype : CategoricalDtype or “category”, optional\n If CategoricalDtype, cannot be used together with categories or\n ordered.\n copy : bool, default False\n Make a copy of input.\n name : object, optional\n Name to be stored in the index.\n\n Returns\n -------\n CategoricalIndex\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> cudf.CategoricalIndex(\n ... data=[1, 2, 3, 4], categories=[1, 2], ordered=False, name=\"a\")\n CategoricalIndex([1, 2, <NA>, <NA>], categories=[1, 2], ordered=False, dtype='category', name='a')\n\n >>> cudf.CategoricalIndex(\n ... data=[1, 2, 3, 4], dtype=pd.CategoricalDtype([1, 2, 3]), name=\"a\")\n CategoricalIndex([1, 2, 3, <NA>], categories=[1, 2, 3], ordered=False, dtype='category', name='a')\n \"\"\" # noqa: E501\n\n @_cudf_nvtx_annotate\n def __init__(\n self,\n data=None,\n categories=None,\n ordered=None,\n dtype=None,\n copy=False,\n name=None,\n ):\n if isinstance(dtype, (pd.CategoricalDtype, cudf.CategoricalDtype)):\n if categories is not None or ordered is not None:\n raise ValueError(\n \"Cannot specify `categories` or \"\n \"`ordered` together with `dtype`.\"\n )\n if copy:\n data = column.as_column(data, dtype=dtype).copy(deep=True)\n kwargs = _setdefault_name(data, name=name)\n if isinstance(data, CategoricalColumn):\n data = data\n elif isinstance(data, pd.Series) and (\n is_categorical_dtype(data.dtype)\n ):\n codes_data = column.as_column(data.cat.codes.values)\n data = column.build_categorical_column(\n categories=data.cat.categories,\n codes=codes_data,\n ordered=data.cat.ordered,\n )\n elif isinstance(data, (pd.Categorical, pd.CategoricalIndex)):\n codes_data = column.as_column(data.codes)\n data = column.build_categorical_column(\n categories=data.categories,\n codes=codes_data,\n ordered=data.ordered,\n )\n else:\n data = column.as_column(\n data, dtype=\"category\" if dtype is None else dtype\n )\n # dtype has already been taken care\n dtype = None\n\n if categories is not None:\n data = data.set_categories(categories, ordered=ordered)\n elif isinstance(dtype, (pd.CategoricalDtype, cudf.CategoricalDtype)):\n data = data.set_categories(dtype.categories, ordered=ordered)\n elif ordered is True and data.ordered is False:\n data = data.as_ordered()\n elif ordered is False and data.ordered is True:\n data = data.as_unordered()\n\n super().__init__(data, **kwargs)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def codes(self):\n \"\"\"\n The category codes of this categorical.\n \"\"\"\n return as_index(self._values.codes)\n\n @property # type: ignore\n @_cudf_nvtx_annotate\n def categories(self):\n \"\"\"\n The categories of this categorical.\n \"\"\"\n return as_index(self._values.categories)\n\n def is_boolean(self):\n return False\n\n def is_categorical(self):\n return True\n\n\n@_cudf_nvtx_annotate\ndef interval_range(\n start=None, end=None, periods=None, freq=None, name=None, closed=\"right\",\n) -> \"IntervalIndex\":\n \"\"\"\n Returns a fixed frequency IntervalIndex.\n\n Parameters\n ----------\n start : numeric, default None\n Left bound for generating intervals.\n end : numeric , default None\n Right bound for generating intervals.\n periods : int, default None\n Number of periods to generate\n freq : numeric, default None\n The length of each interval. Must be consistent\n with the type of start and end\n name : str, default None\n Name of the resulting IntervalIndex.\n closed : {\"left\", \"right\", \"both\", \"neither\"}, default \"right\"\n Whether the intervals are closed on the left-side, right-side,\n both or neither.\n\n Returns\n -------\n IntervalIndex\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> cudf.interval_range(start=0,end=5)\n IntervalIndex([(0, 0], (1, 1], (2, 2], (3, 3], (4, 4], (5, 5]],\n ...closed='right',dtype='interval')\n >>> cudf.interval_range(start=0,end=10, freq=2,closed='left')\n IntervalIndex([[0, 2), [2, 4), [4, 6), [6, 8), [8, 10)],\n ...closed='left',dtype='interval')\n >>> cudf.interval_range(start=0,end=10, periods=3,closed='left')\n ...IntervalIndex([[0.0, 3.3333333333333335),\n [3.3333333333333335, 6.666666666666667),\n [6.666666666666667, 10.0)],\n closed='left',\n dtype='interval')\n \"\"\"\n if freq and periods and start and end:\n raise ValueError(\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n args = [\n cudf.Scalar(x) if x is not None else None\n for x in (start, end, freq, periods)\n ]\n if any(\n not _is_non_decimal_numeric_dtype(x.dtype) if x is not None else False\n for x in args\n ):\n raise ValueError(\"start, end, periods, freq must be numeric values.\")\n *rargs, periods = args\n common_dtype = find_common_type([x.dtype for x in rargs if x])\n start, end, freq = rargs\n periods = periods.astype(\"int64\") if periods is not None else None\n\n if periods and not freq:\n # if statement for mypy to pass\n if end is not None and start is not None:\n # divmod only supported on host side scalars\n quotient, remainder = divmod((end - start).value, periods.value)\n if remainder:\n freq_step = cudf.Scalar((end - start) / periods)\n else:\n freq_step = cudf.Scalar(quotient)\n if start.dtype != freq_step.dtype:\n start = start.astype(freq_step.dtype)\n bin_edges = sequence(\n size=periods + 1,\n init=start.device_value,\n step=freq_step.device_value,\n )\n left_col = bin_edges[:-1]\n right_col = bin_edges[1:]\n elif freq and periods:\n if end:\n start = end - (freq * periods)\n if start:\n end = freq * periods + start\n if end is not None and start is not None:\n left_col = arange(\n start.value, end.value, freq.value, dtype=common_dtype\n )\n end = end + 1\n start = start + freq\n right_col = arange(\n start.value, end.value, freq.value, dtype=common_dtype\n )\n elif freq and not periods:\n if end is not None and start is not None:\n end = end - freq + 1\n left_col = arange(\n start.value, end.value, freq.value, dtype=common_dtype\n )\n end = end + freq + 1\n start = start + freq\n right_col = arange(\n start.value, end.value, freq.value, dtype=common_dtype\n )\n elif start is not None and end is not None:\n # if statements for mypy to pass\n if freq:\n left_col = arange(\n start.value, end.value, freq.value, dtype=common_dtype\n )\n else:\n left_col = arange(start.value, end.value, dtype=common_dtype)\n start = start + 1\n end = end + 1\n if freq:\n right_col = arange(\n start.value, end.value, freq.value, dtype=common_dtype\n )\n else:\n right_col = arange(start.value, end.value, dtype=common_dtype)\n else:\n raise ValueError(\n \"Of the four parameters: start, end, periods, and \"\n \"freq, at least two must be specified\"\n )\n if len(right_col) == 0 or len(left_col) == 0:\n dtype = IntervalDtype(\"int64\", closed)\n data = column.column_empty_like_same_mask(left_col, dtype)\n return IntervalIndex(data, closed=closed)\n\n interval_col = column.build_interval_column(\n left_col, right_col, closed=closed\n )\n return IntervalIndex(interval_col)\n\n\nclass IntervalIndex(GenericIndex):\n \"\"\"\n Immutable index of intervals that are closed on the same side.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n Array-like containing Interval objects from which to build the\n IntervalIndex.\n closed : {\"left\", \"right\", \"both\", \"neither\"}, default \"right\"\n Whether the intervals are closed on the left-side, right-side,\n both or neither.\n dtype : dtype or None, default None\n If None, dtype will be inferred.\n copy : bool, default False\n Copy the input data.\n name : object, optional\n Name to be stored in the index.\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n\n @_cudf_nvtx_annotate\n def __init__(\n self, data, closed=None, dtype=None, copy=False, name=None,\n ):\n if copy:\n data = column.as_column(data, dtype=dtype).copy()\n kwargs = _setdefault_name(data, name=name)\n if isinstance(data, IntervalColumn):\n data = data\n elif isinstance(data, pd.Series) and (is_interval_dtype(data.dtype)):\n data = column.as_column(data, data.dtype)\n elif isinstance(data, (pd._libs.interval.Interval, pd.IntervalIndex)):\n data = column.as_column(data, dtype=dtype,)\n elif not data:\n dtype = IntervalDtype(\"int64\", closed)\n data = column.column_empty_like_same_mask(\n column.as_column(data), dtype\n )\n else:\n data = column.as_column(data)\n data.dtype.closed = closed\n\n self.closed = closed\n super().__init__(data, **kwargs)\n\n @_cudf_nvtx_annotate\n def from_breaks(breaks, closed=\"right\", name=None, copy=False, dtype=None):\n \"\"\"\n Construct an IntervalIndex from an array of splits.\n\n Parameters\n ----------\n breaks : array-like (1-dimensional)\n Left and right bounds for each interval.\n closed : {\"left\", \"right\", \"both\", \"neither\"}, default \"right\"\n Whether the intervals are closed on the left-side, right-side,\n both or neither.\n copy : bool, default False\n Copy the input data.\n name : object, optional\n Name to be stored in the index.\n dtype : dtype or None, default None\n If None, dtype will be inferred.\n\n Returns\n -------\n IntervalIndex\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> cudf.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval')\n \"\"\"\n if copy:\n breaks = column.as_column(breaks, dtype=dtype).copy()\n left_col = breaks[:-1:]\n right_col = breaks[+1::]\n\n interval_col = column.build_interval_column(\n left_col, right_col, closed=closed\n )\n\n return IntervalIndex(interval_col, name=name)\n\n def is_interval(self):\n return True\n\n def is_boolean(self):\n return False\n\n\nclass StringIndex(GenericIndex):\n \"\"\"String defined indices into another Column\n\n Attributes\n ----------\n _values: A StringColumn object or NDArray of strings\n name: A string\n \"\"\"\n\n @_cudf_nvtx_annotate\n def __init__(self, values, copy=False, **kwargs):\n kwargs = _setdefault_name(values, **kwargs)\n if isinstance(values, StringColumn):\n values = values.copy(deep=copy)\n elif isinstance(values, StringIndex):\n values = values._values.copy(deep=copy)\n else:\n values = column.as_column(values, dtype=\"str\")\n if not is_string_dtype(values.dtype):\n raise ValueError(\n \"Couldn't create StringIndex from passed in object\"\n )\n\n super().__init__(values, **kwargs)\n\n @_cudf_nvtx_annotate\n def to_pandas(self):\n return pd.Index(\n self.to_numpy(na_value=None), name=self.name, dtype=\"object\"\n )\n\n @_cudf_nvtx_annotate\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}({self._values.values_host},\"\n f\" dtype='object'\"\n + (\n f\", name={pd.io.formats.printing.default_pprint(self.name)}\"\n if self.name is not None\n else \"\"\n )\n + \")\"\n )\n\n @copy_docstring(StringMethods) # type: ignore\n @property\n @_cudf_nvtx_annotate\n def str(self):\n return StringMethods(parent=self)\n\n def _clean_nulls_from_index(self):\n \"\"\"\n Convert all na values(if any) in Index object\n to `<NA>` as a preprocessing step to `__repr__` methods.\n \"\"\"\n if self._values.has_nulls():\n return self.fillna(cudf._NA_REP)\n else:\n return self\n\n def is_boolean(self):\n return False\n\n def is_object(self):\n return True\n\n\n@_cudf_nvtx_annotate\ndef as_index(arbitrary, nan_as_null=None, **kwargs) -> BaseIndex:\n \"\"\"Create an Index from an arbitrary object\n\n Currently supported inputs are:\n\n * ``Column``\n * ``Buffer``\n * ``Series``\n * ``Index``\n * numba device array\n * numpy array\n * pyarrow array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of Index\n - CategoricalIndex for Categorical input.\n - DatetimeIndex for Datetime input.\n - GenericIndex for all other inputs.\n \"\"\"\n kwargs = _setdefault_name(arbitrary, **kwargs)\n if isinstance(arbitrary, cudf.MultiIndex):\n return arbitrary\n elif isinstance(arbitrary, BaseIndex):\n if arbitrary.name == kwargs[\"name\"]:\n return arbitrary\n idx = arbitrary.copy(deep=False)\n idx.rename(kwargs[\"name\"], inplace=True)\n return idx\n elif isinstance(arbitrary, ColumnBase):\n return _index_from_data({kwargs.get(\"name\", None): arbitrary})\n elif isinstance(arbitrary, cudf.Series):\n return as_index(arbitrary._column, nan_as_null=nan_as_null, **kwargs)\n elif isinstance(arbitrary, (pd.RangeIndex, range)):\n return RangeIndex(\n start=arbitrary.start,\n stop=arbitrary.stop,\n step=arbitrary.step,\n **kwargs,\n )\n elif isinstance(arbitrary, pd.MultiIndex):\n return cudf.MultiIndex.from_pandas(arbitrary, nan_as_null=nan_as_null)\n elif isinstance(arbitrary, cudf.DataFrame):\n return cudf.MultiIndex.from_frame(arbitrary)\n return as_index(\n column.as_column(\n arbitrary, dtype=kwargs.get(\"dtype\", None), nan_as_null=nan_as_null\n ),\n **kwargs,\n )\n\n\n_dtype_to_index: Dict[Any, Type[NumericIndex]] = {\n np.int8: Int8Index,\n np.int16: Int16Index,\n np.int32: Int32Index,\n np.int64: Int64Index,\n np.uint8: UInt8Index,\n np.uint16: UInt16Index,\n np.uint32: UInt32Index,\n np.uint64: UInt64Index,\n np.float32: Float32Index,\n np.float64: Float64Index,\n}\n\n\ndef _setdefault_name(values, **kwargs):\n if kwargs.get(\"name\") is None:\n kwargs[\"name\"] = getattr(values, \"name\", None)\n return kwargs\n\n\nclass IndexMeta(type):\n \"\"\"Custom metaclass for Index that overrides instance/subclass tests.\"\"\"\n\n def __instancecheck__(self, instance):\n return isinstance(instance, BaseIndex)\n\n def __subclasscheck__(self, subclass):\n return issubclass(subclass, BaseIndex)\n\n\nclass Index(BaseIndex, metaclass=IndexMeta):\n \"\"\"The basic object storing row labels for all cuDF objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional)/ DataFrame\n If it is a DataFrame, it will return a MultiIndex\n dtype : NumPy dtype (default: object)\n If dtype is None, we find the dtype that best fits the data.\n copy : bool\n Make a copy of input data.\n name : object\n Name to be stored in the index.\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible.\n tupleize_cols == False is not yet supported.\n nan_as_null : bool, Default True\n If ``None``/``True``, converts ``np.nan`` values to\n ``null`` values.\n If ``False``, leaves ``np.nan`` values as is.\n\n Returns\n -------\n Index\n cudf Index\n\n Warnings\n --------\n This class should not be subclassed. It is designed as a factory for\n different subclasses of :class:`BaseIndex` depending on the provided input.\n If you absolutely must, and if you're intimately familiar with the\n internals of cuDF, subclass :class:`BaseIndex` instead.\n\n Examples\n --------\n >>> import cudf\n >>> cudf.Index([1, 2, 3], dtype=\"uint64\", name=\"a\")\n UInt64Index([1, 2, 3], dtype='uint64', name='a')\n\n >>> cudf.Index(cudf.DataFrame({\"a\":[1, 2], \"b\":[2, 3]}))\n MultiIndex([(1, 2),\n (2, 3)],\n names=['a', 'b'])\n \"\"\"\n\n @_cudf_nvtx_annotate\n def __new__(\n cls,\n data=None,\n dtype=None,\n copy=False,\n name=None,\n tupleize_cols=True,\n nan_as_null=True,\n **kwargs,\n ):\n assert (\n cls is Index\n ), \"Index cannot be subclassed, extend BaseIndex instead.\"\n if tupleize_cols is not True:\n raise NotImplementedError(\n \"tupleize_cols != True is not yet supported\"\n )\n\n return as_index(\n data,\n copy=copy,\n dtype=dtype,\n name=name,\n nan_as_null=nan_as_null,\n **kwargs,\n )\n\n @classmethod\n @_cudf_nvtx_annotate\n def from_arrow(cls, obj):\n try:\n return cls(ColumnBase.from_arrow(obj))\n except TypeError:\n # Try interpreting object as a MultiIndex before failing.\n return cudf.MultiIndex.from_arrow(obj)\n\n\n@_cudf_nvtx_annotate\ndef _concat_range_index(indexes: List[RangeIndex]) -> BaseIndex:\n \"\"\"\n An internal Utility function to concat RangeIndex objects.\n \"\"\"\n start = step = next_ = None\n\n # Filter the empty indexes\n non_empty_indexes = [obj for obj in indexes if len(obj)]\n\n if not non_empty_indexes:\n # Here all \"indexes\" had 0 length, i.e. were empty.\n # In this case return an empty range index.\n return RangeIndex(0, 0)\n\n for obj in non_empty_indexes:\n if start is None:\n # This is set by the first non-empty index\n start = obj.start\n if step is None and len(obj) > 1:\n step = obj.step\n elif step is None:\n # First non-empty index had only one element\n if obj.start == start:\n result = as_index(concat_columns([x._values for x in indexes]))\n return result\n step = obj.start - start\n\n non_consecutive = (step != obj.step and len(obj) > 1) or (\n next_ is not None and obj.start != next_\n )\n if non_consecutive:\n result = as_index(concat_columns([x._values for x in indexes]))\n return result\n if step is not None:\n next_ = obj[-1] + step\n\n stop = non_empty_indexes[-1].stop if next_ is None else next_\n return RangeIndex(start, stop, step)\n\n\n@_cudf_nvtx_annotate\ndef _extended_gcd(a: int, b: int) -> Tuple[int, int, int]:\n \"\"\"\n Extended Euclidean algorithms to solve Bezout's identity:\n a*x + b*y = gcd(x, y)\n Finds one particular solution for x, y: s, t\n Returns: gcd, s, t\n \"\"\"\n s, old_s = 0, 1\n t, old_t = 1, 0\n r, old_r = b, a\n while r:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n return old_r, old_s, old_t\n"
]
| [
[
"pandas.RangeIndex",
"numpy.asarray",
"numpy.issubdtype",
"pandas.io.formats.printing.default_pprint",
"pandas._config.get_option",
"numpy.min_scalar_type",
"numpy.array"
]
]
|
hllj/fsdl-text-recognizer-2021-labs | [
"c9b5fb64cf90ba79844e0b58d2ac776e1e4349c3"
]
| [
"lab1/text_recognizer/models/mlp.py"
]
| [
"from typing import Any, Dict\nimport argparse\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nFC1_DIM = 1024\nFC2_DIM = 128\nFC3_DIM = 128\n\n\nclass MLP(nn.Module):\n \"\"\"Simple MLP suitable for recognizing single characters.\"\"\"\n\n def __init__(\n self,\n data_config: Dict[str, Any],\n args: argparse.Namespace = None,\n ) -> None:\n super().__init__()\n self.args = vars(args) if args is not None else {}\n\n input_dim = np.prod(data_config[\"input_dims\"])\n num_classes = len(data_config[\"mapping\"])\n\n fc1_dim = self.args.get(\"fc1\", FC1_DIM)\n fc2_dim = self.args.get(\"fc2\", FC2_DIM)\n fc3_dim = self.args.get(\"fc3\", FC3_DIM)\n\n self.dropout = nn.Dropout(0.5)\n self.fc1 = nn.Linear(input_dim, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.fc3 = nn.Linear(fc2_dim, fc3_dim)\n self.fc4 = nn.Linear(fc3_dim, num_classes)\n\n def forward(self, x):\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout(x)\n\n x = self.fc2(x)\n x = F.relu(x)\n x = self.dropout(x)\n\n x = self.fc3(x)\n x = F.relu(x)\n x = self.dropout(x)\n \n x = self.fc4(x)\n return x\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\"--fc1\", type=int, default=1024)\n parser.add_argument(\"--fc2\", type=int, default=128)\n parser.add_argument(\"--fc3\", type=int, default=128)\n return parser\n"
]
| [
[
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.functional.relu",
"numpy.prod",
"torch.flatten"
]
]
|
dgketchum/pymetric | [
"f08a7e4fd12196be59f424e4a8a11448d8e805a1"
]
| [
"code/support/et_common.py"
]
| [
"#--------------------------------\n# Name: et_common.py\n# Purpose: Common ET support functions\n#--------------------------------\n\nfrom calendar import monthrange\nfrom collections import defaultdict\nimport datetime as dt\nimport logging\nimport math\nimport os\nimport re\nimport sys\n\nimport drigo\nimport numpy as np\n# Used by soil water balance point_swb_func\nfrom osgeo import gdal, ogr, osr\n\ntry:\n import python_common as dripy\nexcept ModuleNotFoundError:\n import sys\n sys.path.append('/home/dgketchum/PycharmProjects/pymetric/code')\n from support import python_common as dripy\n\n\ndef landsat_folder_split(landsat_ws):\n \"\"\"Return the ID portion of a full Landsat scene folder\"\"\"\n return landsat_id_split(os.path.basename(landsat_ws))\n\n\n# def landsat_name_split(folder_name):\n# \"\"\"Split Landsat folder name into components (Landsat, path, row, year, month, year)\n#\n# \"\"\"\n# # DEADBEEF - Scenes will not be merged, so it is unnecessary to\n# # to return a row_start and row_end\n# landsat_pre_re = re.compile('^(LT4|LT5|LE7|LC8)\\d{3}\\d{3}\\d{7}')\n# landsat_c1_re = re.compile('^(LT04|LT05|LE07|LC08)\\d{3}_\\d{6}_\\d{8}')\n# if landsat_pre_re.match(folder_name):\n# landsat = folder_name[0: 3]\n# path = folder_name[3: 6]\n# row_start = folder_name[7: 10]\n# row_end = folder_name[10: 13]\n# year = folder_name[14: 18]\n# doy = folder_name[18: 21]\n# elif landsat_c1_re.match(folder_name):\n# landsat = folder_name[0: 3]\n# path = folder_name[3: 6]\n# row_start = folder_name[6: 9]\n# row_end = folder_name[6: 9]\n# year = folder_name[9:13]\n# month = folder_name[13:16]\n# day = folder_name[13:16]\n# # elif landsat_cloud_re.match(folder_name):\n# # landsat = folder_name[0: 3]\n# # path = folder_name[3: 6]\n# # row_start = folder_name[7: 10]\n# # row_end = folder_name[10: 13]\n# # year = folder_name[14: 18]\n# # doy = folder_name[18: 21]\n# else:\n# logging.error(\n# 'ERROR: Could not parse landsat folder {}'.format(folder_name))\n# return landsat, path, row_start, row_end, year, month, day\n\n\ndef landsat_id_split(landsat_id):\n \"\"\"Split Landsat ID into components (Landsat, path, row, year, DOY)\n\n Parameters\n ----------\n landsat_id : str\n\n Returns\n -------\n tuple of the Landsat ID components\n\n \"\"\"\n landsat_re = re.compile(\n '^(LT04|LT05|LE07|LC08)_(?:\\w{4})_(\\d{3})(\\d{3})_'\n '(\\d{4})(\\d{2})(\\d{2})_(?:\\d{8})_(?:\\d{2})_(?:\\w{2})$')\n if landsat_re.match(landsat_id):\n m_groups = landsat_re.match(landsat_id).groups()\n satellite, path, row, year, month, day = m_groups[0:6]\n else:\n logging.error(\n 'ERROR: Could not parse landsat folder {}'.format(landsat_id))\n landsat, path, row, year, month, day = None, None, None, None, None, None\n\n return satellite, path, row, year, month, day\n\n\ndef band_dict_to_array(data_dict, band_dict):\n \"\"\"\n \n Parameters\n ----------\n data_dict : dict\n band_dict: dict\n\n Returns\n -------\n ndarray\n\n \"\"\"\n return np.array(\n [v for k, v in sorted(data_dict.items())\n if k in band_dict.keys()]).astype(np.float32)\n\n\ndef landsat_band_image_dict(ws, landsat_re):\n \"\"\"Return a dictionary of Landsat images and band strings\n\n Copied from landsat_prep_scene_func.py\n Consider moving into et_common.py or making part of image class\n\n Parameters\n ----------\n ws :\n landsat_re :\n\n Returns\n -------\n dict\n\n \"\"\"\n if not os.path.isdir(ws):\n return dict()\n output_dict = dict()\n for item in os.listdir(ws):\n if not os.path.isfile(os.path.join(ws, item)):\n continue\n landsat_match = landsat_re.match(item)\n if not landsat_match:\n continue\n band = landsat_match.group('band').replace('B', '')\n # Only keep first thermal band from Landsat 7: \"B6_VCID_1\" -> \"6\"\n band = band.replace('_VCID_1', '')\n output_dict[band] = os.path.join(ws, item)\n\n return output_dict\n\n\ndef doy_range_func(landsat_doy_list, year, min_days_in_month=10):\n \"\"\"Calculate DOY Range\n\n Parameters\n ----------\n landsat_doy_list : list\n year : int\n min_days_in_month : int, optional\n\n Returns\n -------\n list\n \n \"\"\"\n # logging.info('\\nDOY List: {}'.format(landsat_doy_list))\n year = int(year)\n doy_start = int(landsat_doy_list[0])\n doy_end = int(landsat_doy_list[-1])\n doy_zero_dt = dt.datetime(year, 1, 1) + dt.timedelta(-1)\n doy_start_dt = doy_zero_dt + dt.timedelta(doy_start)\n doy_end_dt = doy_zero_dt + dt.timedelta(doy_end)\n # First day of current start month and last day of current end month\n month_start_dt = dt.datetime(\n year, dripy.doy2month(year, doy_start), 1)\n month_end_dt = dt.datetime(\n year, dripy.doy2month(year, doy_end),\n monthrange(year, dripy.doy2month(year, doy_end))[-1])\n # Won't work for December because datetime doesn't accept month 13\n # month_end_dt = dt.datetime(year, month_end + 1, 1) + dt.timedelta(-1)\n # First day of next start month and last day of prior end month\n month_start_next_dt = dt.datetime(\n year, dripy.doy2month(year, doy_start)+1, 1)\n month_end_prev_dt = dt.datetime(\n year, dripy.doy2month(year, doy_end), 1) + dt.timedelta(-1)\n # Count of number of days between doy and inner month endpoints\n month_start_day_count = (month_start_next_dt - doy_start_dt).days\n month_end_day_count = (doy_end_dt - month_end_prev_dt).days\n # Check that there are enough days in start/end months\n if month_start_day_count < min_days_in_month:\n doy_start = (month_start_next_dt - (doy_zero_dt)).days\n doy_start_dt = doy_zero_dt + dt.timedelta(doy_start)\n logging.info(\n ('\\nFirst day set to DOY: {:>3d} ({})\\n since there are '\n 'only {} days of data in the previous month').format(\n doy_start, doy_start_dt, month_start_day_count))\n else:\n doy_start = (month_start_dt - (doy_zero_dt)).days\n doy_start_dt = doy_zero_dt + dt.timedelta(doy_start)\n logging.info(('\\nFirst day set to DOY: {:>3d} ({})').format(\n doy_start, doy_start_dt))\n if month_end_day_count < min_days_in_month:\n doy_end = (month_end_prev_dt - (doy_zero_dt)).days\n doy_end_dt = doy_zero_dt + dt.timedelta(doy_end)\n logging.info(\n ('Last day set to DOY: {:>3d} ({})\\n There are '\n 'only {} days of data in the next month').format(\n doy_end, doy_end_dt, month_end_day_count))\n else:\n doy_end = (month_end_dt - (doy_zero_dt)).days\n doy_end_dt = doy_zero_dt + dt.timedelta(doy_end)\n logging.info(('Last day set to DOY: {:>3d} ({})').format(\n doy_end, doy_end_dt))\n return range(doy_start, doy_end+1)\n\n\ndef read_refet_instantaneous_func(refet_file, year, doy, localtime=None,\n ref_type='ETR'):\n \"\"\"Read in instantaneous RefET data\n\n Parameters\n ----------\n refet_file : str\n year : int\n doy : int\n Day of year.\n localtime :\n ref_type: {'ETR' (default), 'ETO'}, optional\n Reference surface type.\n\n Returns\n -------\n tuple of floats: dew_point, wind_speed, ea, etr, & etr_24hr\n\n \"\"\"\n logging.debug(' RefET: {}'.format(refet_file))\n\n # Field names\n year_field = 'Yr'\n month_field = 'Mo'\n day_field = 'Day'\n doy_field = 'DoY'\n hrmn_field = 'HrMn'\n tmax_field = 'Tmax'\n tmin_field = 'Tmin'\n rs_field = 'Rs'\n wind_field = 'Wind'\n dewp_field = 'DewP'\n if ref_type.upper() == 'ETO':\n etr_field = 'ASCE_stPM_ETo'\n else:\n etr_field = 'ASCE_stPM_ETr'\n\n # Field properties\n field_dict = dict()\n field_dict[month_field] = ('i8', '{:>2d}', '{:>2s}')\n field_dict[day_field] = ('i8', '{:>3d}', '{:>3s}')\n field_dict[year_field] = ('i8', '{:>4d}', '{:>4s}')\n field_dict[doy_field] = ('i8', '{:>3d}', '{:>3s}')\n field_dict[hrmn_field] = ('i8', '{:>4d}', '{:>4s}')\n field_dict[tmax_field] = ('f8', '{:>5.2f}', '{:>5s}')\n field_dict[tmin_field] = ('f8', '{:>5.2f}', '{:>5s}')\n field_dict[rs_field] = ('f8', '{:>4.0f}', '{:>4s}')\n field_dict[wind_field] = ('f8', '{:>5.2f}', '{:>5s}')\n field_dict[dewp_field] = ('f8', '{:>5.2f}', '{:>5s}')\n field_dict[etr_field] = ('f8', '{:>5.2f}', '{:>5s}')\n\n # If localtime is not set, return daily means\n if localtime is None:\n daily_flag = True\n # If localtime is set, check that localtime value is valid\n elif not (0 <= localtime <= 24):\n logging.error((\n '\\nERROR: localtime must be between 0 and 24.\\n'\n 'ERROR: value {} is invalid').format(localtime))\n sys.exit()\n else:\n daily_flag = False\n\n # Read in RefET file\n with open(refet_file, 'r') as refet_f:\n refet_list = refet_f.readlines()\n refet_f.close\n\n # Get line number where data starts\n header_split_line = 'RESULTS (SI Units):'\n refet_strip_list = [line.strip() for line in refet_list]\n try:\n header_line = refet_strip_list.index(header_split_line.strip())\n data_line = header_line + 6\n except IndexError:\n logging.error(\n '\\nERROR: The line \"RESULTS (SI Units):\" could not be found in the RefET file'\n '\\nERROR: This line is used to determine where to read data from the RefET file'\n '\\nERROR: The units may not be metric or the file may be empty\\n')\n sys.exit()\n # Split RefET file into header and data sections\n refet_header_list = refet_strip_list[header_line+2:data_line]\n refet_data_list = refet_list[data_line:]\n del refet_list, refet_strip_list\n\n # Filter spaces and newline characters at beginning and end\n # refet_list = [line.strip() for line in refet_list]\n refet_header_list = [line.strip() for line in refet_header_list]\n\n # This splits on whitespace\n # refet_list = [re.findall(r'[^\\s]+', line) for line in refet_list]\n refet_header_list = [\n re.findall(r'[^\\s]+', line) for line in refet_header_list]\n\n # Get field names\n refet_field_list = list(map(list, zip(*refet_header_list)))\n\n # join with spaces, remove '-', remove leading/trailing whitespace\n # Last, to match genfromtxt, replace ' ' with '_'\n refet_field_name_list = [\n ' '.join(l[:3]).replace('-', '').strip().replace(' ', '_')\n for l in refet_field_list]\n refet_field_unit_list = [\n l[3].replace('-', '') for l in refet_field_list]\n refet_field_count = len(refet_field_list)\n logging.debug(\n ' Field name list:\\n {}'.format(refet_field_name_list))\n logging.debug(\n ' Field unit list:\\n {}'.format(refet_field_unit_list))\n\n # Check that date fields exist\n if year_field not in refet_field_name_list:\n logging.error(\n ('\\nERROR: Year field {} was not found in the '\n 'RefET file\\n').format(year_field))\n sys.exit()\n if (month_field in refet_field_name_list and\n day_field in refet_field_name_list):\n doy_flag = False\n elif doy_field in refet_field_name_list:\n doy_flag = True\n else:\n logging.error((\n '\\nERROR: Month field {} and Day field {} or DOY field '\n '{} were not found in the RefET file\\n').format(\n month_field, day_field, doy_field))\n sys.exit()\n refet_field_name_list = [\n f for f in refet_field_name_list if f in field_dict.keys()]\n dtype_list = ','.join([field_dict[f][0] for f in refet_field_name_list])\n\n # Read data as record array\n refet_data = np.genfromtxt(\n refet_data_list, names=refet_field_name_list,\n dtype=dtype_list)\n\n # Build doy_array if necessary\n year_array = refet_data[year_field].astype(np.int)\n if not doy_flag:\n month_array = refet_data[month_field].astype(np.int)\n day_array = refet_data[day_field].astype(np.int)\n dt_array = np.array([\n dt.datetime(int(year), int(month), int(day))\n for year, month, day in zip(year_array, month_array, day_array)])\n doy_array = np.array([d.timetuple().tm_yday for d in dt_array])\n del month_array, day_array\n del dt_array\n else:\n doy_array = refet_data[doy_field].astype(np.int)\n doy_mask = (doy_array == doy) & (year_array == year)\n\n # Check that there is data for year/doy\n if not np.any(doy_mask):\n logging.error(\n '\\nERROR: No data for Year {} and DOY {}\\n'.format(\n year, doy))\n sys.exit()\n\n # Print daily data\n refet_data_subset = refet_data[doy_mask]\n del refet_data, doy_mask\n logging.debug(' ' + ' '.join(\n field_dict[f][2].format(f) for f in refet_field_name_list))\n for row in refet_data_subset:\n # DEADBEEF - In a try/except since it crashes for NumPy 1.6.1\n # The default for ArcGIS 10.1 is NumPy 1.6.1\n try:\n logging.debug(' ' + ' '.join(\n field_dict[f][1].format(value)\n for f, value in zip(refet_field_name_list, row)))\n except:\n pass\n\n # Extract sub arrays for interpolating\n hrmn_array = refet_data_subset[hrmn_field].astype(np.float32)\n\n # If times go from 1,2,...22,23,0 in a day, interpolation will break\n if hrmn_array[-1] == 0:\n hrmn_array[-1] = 2400\n\n # Convert HHMM to a float HH.MM to match localtime\n hrmn_array *= 0.01\n tmin_array = refet_data_subset[tmin_field].astype(np.float32)\n tmax_array = refet_data_subset[tmax_field].astype(np.float32)\n # rs_array = refet_data_subset[rs_field].astype(np.float32)\n wind_array = refet_data_subset[wind_field].astype(np.float32)\n dewp_array = refet_data_subset[dewp_field].astype(np.float32)\n etr_array = refet_data_subset[etr_field].astype(np.float32)\n\n # Calculate vapor pressure\n ea_array = saturation_vapor_pressure_func(dewp_array)\n # Interpolate dewpoint from RefET data\n # Add 0.5 hours because RefET data is an average of\n # the previous hour\n\n # This may need to be set by the user or adjusted\n tair_inst = float(np.interp(\n [localtime + 0.5], hrmn_array, tmax_array)[0])\n dew_point = float(np.interp(\n [localtime + 0.5], hrmn_array, dewp_array)[0])\n ea_inst = float(np.interp(\n [localtime + 0.5], hrmn_array, ea_array)[0])\n wind_speed = float(np.interp(\n [localtime + 0.5], hrmn_array, wind_array)[0])\n etr_inst = float(np.interp(\n [localtime + 0.5], hrmn_array, etr_array)[0])\n\n # ETr 24hr (mm)\n etr_24hr = float(np.sum(etr_array))\n\n return dew_point, wind_speed, ea_inst, etr_inst, etr_24hr\n\n\ndef read_refet_daily_func(refet_list, year, doy_range, ref_type='ETR'):\n \"\"\"Read in daily RefET data\n\n Parameters\n ----------\n refet_list :\n year : int\n doy_range : list\n ref_type: {'ETR' (default), 'ETO'}, optional\n Reference surface type.\n\n Returns\n -------\n dict of DOY,ETr key/values\n\n \"\"\"\n # Initialize ETr dictionary\n # doy_etr_dict = dict([(doy, 0) for doy in range(1,367)])\n doy_etr_dict = defaultdict(float)\n\n if ref_type.upper() == 'ETO':\n etr_field = 'ETo'\n else:\n etr_field = 'ETr'\n\n # Remove header information, everything above RESULTS\n # This re checks that any whitespace character can separate the words\n refet_results_re = re.compile('RESULTS\\s+\\(SI\\s+Units\\):')\n for i, refet_line in enumerate(refet_list):\n if refet_results_re.match(refet_line):\n refet_list[0:i+2] = []\n refet_header_list = refet_list[0:4]\n logging.debug('\\n RefET Data:')\n for refet_header_line in refet_header_list[0:4]:\n # logging.debug(' {}'.format(refet_header_line))\n refet_split_line = re.findall(r'[^\\s]+', refet_header_line)\n logging.debug(' ' + ' '.join(\n ['{:<5}'.format(i) for i in refet_split_line]))\n break\n try:\n len(refet_header_list)\n except NameError:\n logging.error(\n '\\nERROR: The line \"RESULTS (SI Units):\" could not be found in the RefET file'\n '\\nERROR: This line is used to determine where to read data from the RefET file'\n '\\nERROR: The units may not be metric or the file may be empty\\n')\n sys.exit()\n\n # From header rows, determine index for necessary fields\n for refet_header_line in refet_header_list:\n refet_split_line = re.findall(r'[^\\s]+', refet_header_line)\n try:\n refet_yr_col = refet_split_line.index('Yr')\n refet_header_col_count = len(refet_split_line)\n except ValueError:\n pass\n try:\n refet_doy_col = refet_split_line.index('DoY')\n except ValueError:\n pass\n try:\n refet_etr_col = refet_split_line.index(etr_field)\n except ValueError:\n pass\n if not refet_yr_col or not refet_doy_col or not refet_etr_col:\n logging.error('\\nERROR: One of the necessary fields was not '\n 'found in the RefET file\\n')\n sys.exit()\n\n # Calculate daily refet\n for refet_line in refet_list:\n # re finds every character that is not a whitespace character\n # and splits on the whitespace\n refet_split_line = re.findall(r'[^\\s]+', refet_line)\n if refet_split_line[refet_yr_col] == str(year):\n if refet_header_col_count != len(refet_split_line):\n logging.info(' {}'.format(refet_line))\n logging.error('\\nERROR: The RefET file may be missing data\\n'\n 'ERROR: The # of columns in the header '\n 'does not equal the # of columns of data')\n sys.exit()\n doy = int(refet_split_line[refet_doy_col])\n doy_etr_dict[doy] += float(refet_split_line[refet_etr_col])\n\n if not set(doy_range).issubset(doy_etr_dict.keys()):\n logging.error(\n ('\\nERROR: The RefET file does not have ' +\n 'data for year {}').format(year))\n sys.exit()\n return doy_etr_dict\n\n\ndef read_nvet_daily_func(nvet_list, year, doy_range):\n \"\"\"Read in daily NVET data\n\n Parameters\n ----------\n nvet_list\n year : int\n doy_range\n\n Returns\n -------\n dict of DOY,ETr key/values\n\n \"\"\"\n # Initialize RefET dictionary\n doy_etr_dict = dict([(doy, 0) for doy in range(1, 367)])\n\n # Remove header information, everything above RESULTS\n # This re checks that any whitespace character can separate the words\n nvet_header_list = nvet_list[0:5]\n nvet_list[0:5] = []\n logging.info(' NVET Header:')\n for nvet_header_line in nvet_header_list[0:5]:\n logging.info(' {}'.format(nvet_header_line))\n for nvet_line in nvet_list[0:3]:\n logging.info(' {}'.format(nvet_line))\n nvet_list[0:5] = []\n\n # Column numbers are hardcoded here\n logging.warning('\\n NVET columns are hardcoded and need to be checked')\n nvet_yr_col = 2\n nvet_doy_col = 3\n nvet_etr_col = 14\n logging.warning(' Year Column: {:2d}'.format(nvet_yr_col+1))\n logging.warning(' DOY Column: {:2d}'.format(nvet_doy_col+1))\n logging.warning(' RefET Column: {:2d}'.format(nvet_etr_col+1))\n\n # Calculate daily refet\n for nvet_line in nvet_list:\n # re finds every character that is not a whitespace character\n # and splits on the whitespace\n nvet_split_line = re.findall(r'[^\\s]+', nvet_line)\n if nvet_split_line[nvet_yr_col] == year:\n doy = int(nvet_split_line[nvet_doy_col])\n etr = float(nvet_split_line[nvet_etr_col])\n doy_etr_dict[doy] = etr\n\n # ETr must be greater than 0 to be valid?\n doy_valid_etr_list = [doy for doy in doy_range if doy_etr_dict[doy] > 0]\n\n # Check that there is ETr data for each DOY in doy_range\n if len(doy_valid_etr_list) == 0:\n logging.error('\\nERROR: The CSV ETr file does not contain data '\n 'for the year {}\\n'.format(year))\n sys.exit()\n elif set(doy_range) - set(doy_valid_etr_list):\n logging.error(\n '\\nERROR: The CSV ETr appears to have missing data'\n '\\n The following days are missing:\\n {}'.format(sorted(list(\n map(int, list(set(doy_range)-set(doy_valid_etr_list)))))))\n sys.exit()\n return doy_etr_dict\n\n\ndef read_csv_etr_daily_func(csv_etr_list, year, doy_range):\n \"\"\"Read in daily ETr from a CSV file\n\n Parameters\n ----------\n csv_etr_list :\n year : int\n doy_range :\n\n Returns\n -------\n dict\n\n \"\"\"\n # Initialize RefET dictionary\n doy_etr_dict = dict([(doy, 0) for doy in range(1, 367)])\n\n # Remove header information, everything above RESULTS\n # This re checks that any whitespace character can separate the words\n header_line = csv_etr_list[0]\n data_list = csv_etr_list[1:]\n logging.info(' CSV ETr data:')\n logging.info(' {}'.format(header_line))\n\n # Print the first few lines as a check\n for data_line in data_list[0:3]:\n logging.info(' {}'.format(data_line))\n\n # Column names are hardcoded here\n year_field = 'YEAR'\n doy_field = 'DOY'\n etr_field = 'ETR'\n field_i_dict = dict()\n\n # Figure out column index for each field name\n split_line = [s.upper() for s in header_line.split(',')]\n for field in [year_field, doy_field, etr_field]:\n try:\n field_i_dict[field] = split_line.index(field.upper())\n logging.info(' {} Column: {:>2d}'.format(\n field, field_i_dict[field]+1))\n except ValueError:\n logging.error(\n ('\\nERROR: {} field does not exist in '\n 'CSV ETr file').format(field))\n sys.exit()\n\n # Calculate daily refet\n for data_line in data_list:\n # re finds every character that is not a whitespace character\n # and splits on the whitespace\n split_line = data_line.split(',')\n if int(split_line[field_i_dict[year_field]]) == int(year):\n doy = int(split_line[field_i_dict[doy_field]])\n etr = float(split_line[field_i_dict[etr_field]])\n doy_etr_dict[doy] = etr\n\n # ETr must be greater than 0 to be valid?\n doy_valid_etr_list = [doy for doy in doy_range if doy_etr_dict[doy] > 0]\n\n # Check that there is ETr data for each DOY in doy_range\n if len(doy_valid_etr_list) == 0:\n logging.error(('\\nERROR: The CSV ETr file does not contain data '\n 'for the year {}\\n').format(year))\n sys.exit()\n elif set(doy_range) - set(doy_valid_etr_list):\n logging.error(\n '\\nERROR: The CSV ETr appears to have missing data'\n '\\n The following days are missing:\\n {}'.format(sorted(list(\n map(int, list(set(doy_range) - set(doy_valid_etr_list)))))))\n sys.exit()\n return doy_etr_dict\n\n\ndef fixed_etr_data_func(etr, year, doy_range):\n \"\"\"Assign a fixed ETr value to all doys in doy_range\n\n Parameters\n ----------\n etr\n year\n doy_range\n\n Returns\n -------\n dict\n\n \"\"\"\n return dict([(doy, etr) for doy in range(1, 367) if doy in doy_range])\n\n\ndef u_star_station_func(wind_speed_height, station_roughness,\n wind_speed_mod):\n \"\"\"U* at the station [m/s]\n\n Parameters\n ----------\n wind_speed_height : float\n station_roughness : float\n wind_speed_mod : float\n\n Returns\n -------\n float\n\n \"\"\"\n return ((wind_speed_mod * 0.41) /\n math.log(wind_speed_height / station_roughness))\n\n\ndef u3_func(u_star_station, z3, station_roughness):\n \"\"\"U at blending height (200m) [m/s]\n\n Parameters\n ----------\n u_star_station : float\n z3 : float\n station_roughness : float\n\n Returns\n -------\n float\n\n \"\"\"\n return (u_star_station * math.log(z3 / station_roughness)) / 0.41\n\n\ndef saturation_vapor_pressure_func(temperature):\n \"\"\"Saturation vapor pressure [kPa] from temperature\n\n Parameters\n ----------\n temperature : array_like\n Air temperature [C].\n\n Returns\n -------\n es : ndarray\n\n Notes\n -----\n es = 0.6108 * exp(17.27 * temperature / (temperature + 237.3))\n\n \"\"\"\n es = np.array(temperature, copy=True, ndmin=1).astype(np.float64)\n es += 237.3\n np.reciprocal(es, out=es)\n es *= temperature\n es *= 17.27\n np.exp(es, out=es)\n es *= 0.6108\n return es.astype(np.float32)\n\n\ndef doy_fraction_func(doy):\n \"\"\"Fraction of the DOY in the year [radians]\n\n Parameters\n ----------\n doy : float\n\n Returns\n -------\n float\n\n \"\"\"\n return doy * (2 * math.pi / 365.)\n\n\ndef delta_func(doy):\n \"\"\"Earth declination [radians]\n\n Parameters\n ----------\n doy : array_like\n\n Returns\n -------\n ndarray\n\n \"\"\"\n return 0.409 * np.sin(doy_fraction_func(doy) - 1.39)\n\n\ndef air_pressure_func(elevation):\n \"\"\"Air pressure [kPa]\n\n Parameters\n ----------\n elevation : array_like\n Elevation [m].\n\n Returns\n -------\n pair : ndarray\n\n Notes\n -----\n pair = 101.3 * (((293.15 - 0.0065 * elev) / 293.15) ** 5.26)\n\n \"\"\"\n pair = np.array(elevation, copy=True, ndmin=1).astype(np.float64)\n pair *= -0.0065\n pair += 293.15\n pair /= 293.15\n np.power(pair, 5.26, out=pair)\n pair *= 101.3\n return pair.astype(np.float32)\n\n\ndef precipitable_water_func(pair, ea):\n \"\"\"Precipitable water in the atmopshere\n\n Parameters\n ----------\n pair : array_like or float\n Air pressure [kPa].\n ea : array_like or float\n Vapor pressure [kPa].\n\n Returns\n -------\n array_like or float\n\n References\n ----------\n .. [1] Garrison, J. and Adler, G. (1990). Estimation of precipitable water\n over the United States for application to the division of solar\n radiation into its direct and diffuse components. Solar Energy, 44(4).\n https://doi.org/10.1016/0038-092X(90)90151-2\n .. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy\n Balance for Mapping Evapotranspiration with Internalized Calibration\n (METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).\n https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)\n\n \"\"\"\n return pair * 0.14 * ea + 2.1\n\n\ndef dr_func(doy):\n \"\"\"Inverse square of the Earth-Sun Distance\n\n Parameters\n ----------\n doy: array_like\n Day of year.\n\n Returns\n -------\n ndarray\n\n Notes\n -----\n This function returns 1 / d^2, not d, for direct use in radiance to\n TOA reflectance calculation.\n pi * L * d^2 / (ESUN * cos(theta)) -> pi * L / (ESUN * cos(theta) * d)\n\n \"\"\"\n return 1.0 + 0.033 * np.cos(doy_fraction_func(doy))\n\n\ndef ee_dr_func(doy):\n \"\"\"Earth-Sun Distance values used by Earth Engine\n\n Parameters\n ----------\n doy: array_like\n Day of year.\n\n Returns\n -------\n ndarray\n\n \"\"\"\n return 0.033 * np.cos(doy_fraction_func(doy)) + 1.0\n\n\ndef seasonal_correction_func(doy):\n \"\"\"Seasonal correction for solar time [hour]\n\n Parameters\n ----------\n doy: array_like\n Day of year.\n\n Returns\n -------\n ndarray\n\n \"\"\"\n b = 2 * math.pi * (doy - 81.) / 364.\n return 0.1645 * np.sin(2 * b) - 0.1255 * np.cos(b) - 0.0250 * np.sin(b)\n\n\ndef solar_time_rad_func(lon, time, sc):\n \"\"\"Solar time [hours]\n\n Parameters\n ----------\n lon : array_like\n UTC hour [radians].\n time : array_like\n UTC hour [hours].\n sc : array_like\n seasonal correction [hours].\n\n Returns\n -------\n ndarray\n\n \"\"\"\n return time + (lon * 24 / (2 * math.pi)) + sc - 12\n\n\n# def solar_time_func(lon, time, sc):\n# \"\"\"Solar time (seconds) with longitude in degrees\"\"\"\n# return time + (lon / 15.) + sc\n\n\n# def solar_time_deg_func(lon, time, sc):\n# \"\"\"Solar time (seconds) with longitude in degrees\"\"\"\n# return time + (lon / 15.) + sc\n\n\ndef omega_func(solar_time):\n \"\"\"Hour angle [radians]\n\n Parameters\n ----------\n solar_time : array_like\n UTC hour.\n\n Returns\n -------\n omega : ndarray\n\n \"\"\"\n omega = (2 * math.pi / 24.0) * solar_time\n\n # Need to adjust omega so that the values go from -pi to pi\n # Values outside this range are wrapped (i.e. -3*pi/2 -> pi/2)\n omega = wrap_func(omega, -math.pi, math.pi)\n\n return omega\n\n\ndef wrap_func(x, x_min, x_max):\n \"\"\"Wrap floating point values into range\n\n Parameters\n ----------\n x : array_like\n array of values to wrap.\n x_min : float\n Minimum value in output range.\n x_max : float\n Maximum value in output range.\n\n Returns\n -------\n ndarray\n\n \"\"\"\n return np.mod((x - x_min), (x_max - x_min)) + x_min\n\n\ndef omega_sunset_func(lat, delta):\n \"\"\"Sunset hour angle [radians] (Eqn 59)\n\n Parameters\n ----------\n lat : array_like\n Latitude [radians].\n delta : array_like\n Earth declination [radians].\n\n Returns\n -------\n ndarray\n\n \"\"\"\n return np.arccos(-np.tan(lat) * np.tan(delta))\n\n\ndef ra_daily_func(lat, doy):\n \"\"\"Daily extraterrestrial radiation [MJ m-2 d-1]\n\n Parameters\n ----------\n lat : array_like\n Latitude [radians].\n doy : array_like\n Day of year.\n\n Returns\n -------\n ndarray\n\n Notes\n -----\n This function is only being called by et_numpy.rn_24_func().\n That function could be changed to use the refet.calcs._ra_daily() function\n instead, in which case this function could be removed.\n\n \"\"\"\n\n delta = delta_func(doy)\n omegas = omega_sunset_func(lat, delta)\n theta = (omegas * np.sin(lat) * np.sin(delta) +\n np.cos(lat) * np.cos(delta) * np.sin(omegas))\n\n return (24. / math.pi) * 4.92 * dr_func(doy) * theta\n\n\ndef cos_theta_solar_func(sun_elevation):\n \"\"\"Cosine of theta at a point given sun elevation angle\"\"\"\n return math.sin(sun_elevation * math.pi / 180.)\n\n\ndef cos_theta_centroid_func(t, doy, dr, lon_center, lat_center):\n \"\"\"Cosine of theta at a point\n\n Parameters\n ----------\n t :\n doy :\n dr :\n lon_center :\n lat_center :\n\n Returns\n -------\n cos_theta : float\n\n \"\"\"\n # Solar time seasonal correction\n sc = seasonal_correction_func(doy)\n # Earth declination\n delta = delta_func(doy)\n # Latitude in radians\n solar_time = solar_time_rad_func(lon_center, t, sc)\n omega = omega_func(solar_time)\n # Cosine of theta for a flat earth\n cos_theta = ((math.sin(delta) * math.sin(lat_center)) +\n (math.cos(delta) * math.cos(lat_center) * math.cos(omega)))\n log_f = ' {:<18s} {}'\n logging.debug('\\n' + log_f.format(\n 'Latitude Center:', (lat_center * math.pi / 180)))\n logging.debug(log_f.format(\n 'Longitude Center:', (lon_center * math.pi / 180)))\n logging.debug(log_f.format('Delta:', delta))\n logging.debug(log_f.format('Sc [hour]:', sc))\n logging.debug(log_f.format('Sc [min]:', sc*60))\n logging.debug(log_f.format('Phi:', lat_center))\n logging.debug(log_f.format('SolarTime [hour]:', solar_time))\n logging.debug(log_f.format('SolarTime [min]:', solar_time*60))\n logging.debug(log_f.format('Omega: ', omega))\n logging.debug(log_f.format('cos_theta:', cos_theta))\n # return (env.mask_array * cos_theta).astype(np.float32)\n\n return cos_theta\n\n\ndef cell_value_set(test_raster, test_name, cold_xy, hot_xy, log_level='INFO'):\n \"\"\"Extract the raster values at the cold and hot calibration points\n\n X and Y coordinates need to be in the same spatial reference as the raster\n\n Parameters\n ----------\n test_raster : str\n File path of the raster to be sampled.\n test_name : str\n Display name of the raster (for logging).\n cold_xy : tuple\n x and y coordinate of the cold calibration point.\n hot_xy : tuple\n x and y coordinate of the cold calibration point.\n log_level : str\n Logging level (INFO, DEBUG).\n\n Returns\n -------\n tuple of the values at the calibration points\n\n \"\"\"\n cold_flt = drigo.raster_value_at_xy(test_raster, cold_xy)\n hot_flt = drigo.raster_value_at_xy(test_raster, hot_xy)\n log_str = ' {:<14s} {:14.8f} {:14.8f}'.format(\n test_name + ':', cold_flt, hot_flt)\n if log_level == 'DEBUG':\n logging.debug(log_str)\n else:\n logging.info(log_str)\n\n return cold_flt, hot_flt\n\n\n\ndef raster_swb_func(output_dt, output_osr, output_cs, output_extent,\n awc_path, etr_ws, etr_re, ppt_ws, ppt_re,\n spinup_days=30, min_spinup_days=5):\n \"\"\"Compute the daily soil water balance for a raster for a single date\n\n Parameters\n ----------\n output_dt : datetime\n Target date.\n output_osr : class:`osr.SpatialReference\n Spatial reference.\n output_cs : float\n Cellsize.\n output_extent :\n Extent\n awc_path : str\n File path of the available water content raster.\n etr_ws : str\n Directory path of the ETr workspace, which contains separate rasters\n for each year.\n etr_re (:class:`re`):\n Compiled regular expression object from the Python native 're' module\n that will match ETr rasters.\n ppt_ws : str\n Directory path of the precipitation workspace, which contains separate\n rasters for each year.\n ppt_re : :class:`re`\n Compiled regular expression object from the native Python re module\n that will match precipitaiton rasters.\n spinup_days : int, optional\n Number of days that should be used in the spinup of the model\n (the default is 30).\n min_spinup_days : int, optional\n Minimum number of days that are needed for spinup of the model\n (the default is 5).\n\n Returns\n -------\n array: :class:`numpy.array`: soil evaporation coeficient (Ke)\n\n Notes\n -----\n Calculations will be done in AWC spatial reference & cellsize.\n Final Ke will be projected to output spatial reference & cellsize.\n\n References\n ----------\n .. [1] Allen, R., Pereira, L., Smith, M., Raes, D., & Wright, J. (2005).\n FAO-56 dual crop coefficient method for estimating evaporation from soil\n and application extensions.\n Jourlnal of Irrigation and Drainage Engineering, 131(1).\n 10.1061/(ASCE)0733-9437(2005)131:1(2)\n .. [2] Allen, R. (2011). Skin layer evaporation to account for small\n precipitation events - an enhancement to the FAO-56 evaporation model.\n Agricultural Water Management, 99.\n https://doi.org/10.1016/j.agwat.2011.08.008\n\n \"\"\"\n # DEADBEEF - There is probably a better way to handle the daterange input.\n # Perhaps something like setting a minimum spinup and maximum spinup\n # days and allowing the code to take however many etr and ppt rasters\n # it can find within that range is good. Also, we should probably\n # add in a flag for dry vs wet starting point (when it comes to\n # total evaporative water [tew])\n # logging.debug('Daily Soil Water Balance')\n\n # Compute list of dates needed for spinup\n # date_range function doesn't return end date so add 1 day to end\n dt_list = sorted(dripy.date_range(\n output_dt - dt.timedelta(days=spinup_days),\n output_dt + dt.timedelta(days=1)))\n year_list = sorted(list(set([d.year for d in dt_list])))\n\n # Get all available ETr and PPT paths in date range\n if not os.path.isdir(etr_ws):\n logging.error(' ETr folder does not exist\\n {}'.format(\n etr_ws))\n sys.exit()\n if not os.path.isdir(ppt_ws):\n logging.info(' PPT folder does not exist\\n {}'.format(\n ppt_ws))\n sys.exit()\n\n # DEADBEEF - What specific errors should be caught here?\n etr_path_dict = dict()\n ppt_path_dict = dict()\n for etr_name in os.listdir(etr_ws):\n try:\n test_year = etr_re.match(etr_name).group('YYYY')\n except:\n continue\n if int(test_year) in year_list:\n etr_path_dict[str(test_year)] = os.path.join(etr_ws, etr_name)\n for ppt_name in os.listdir(ppt_ws):\n try:\n test_year = ppt_re.match(ppt_name).group('YYYY')\n except:\n continue\n if int(test_year) in year_list:\n ppt_path_dict[str(test_year)] = os.path.join(ppt_ws, ppt_name)\n if not etr_path_dict:\n logging.error(' No ETr rasters were found for the point SWB\\n')\n sys.exit()\n elif not ppt_path_dict:\n logging.error(' No PPT rasters were found for the point SWB\\n')\n sys.exit()\n\n # Get raster properties from one of the rasters\n # Project Landsat scene extent to ETr/PPT rasters\n logging.debug(' ETr: {}'.format(etr_path_dict[str(output_dt.year)]))\n etr_ds = gdal.Open(etr_path_dict[str(output_dt.year)], 0)\n etr_osr = drigo.raster_ds_osr(etr_ds)\n etr_cs = drigo.raster_ds_cellsize(etr_ds, x_only=True)\n etr_x, etr_y = drigo.raster_ds_origin(etr_ds)\n etr_extent = drigo.project_extent(\n output_extent, output_osr, etr_osr, cellsize=output_cs)\n etr_extent.buffer_extent(etr_cs * 2)\n etr_extent.adjust_to_snap('EXPAND', etr_x, etr_y, etr_cs)\n etr_ds = None\n\n logging.debug(' PPT: {}'.format(ppt_path_dict[str(output_dt.year)]))\n ppt_ds = gdal.Open(ppt_path_dict[str(output_dt.year)], 0)\n ppt_osr = drigo.raster_ds_osr(ppt_ds)\n ppt_cs = drigo.raster_ds_cellsize(ppt_ds, x_only=True)\n ppt_x, ppt_y = drigo.raster_ds_origin(ppt_ds)\n ppt_extent = drigo.project_extent(\n output_extent, output_osr, ppt_osr, cellsize=output_cs)\n ppt_extent.buffer_extent(ppt_cs * 2)\n ppt_extent.adjust_to_snap('EXPAND', ppt_x, ppt_y, ppt_cs)\n ppt_ds = None\n\n # Get AWC raster properties\n # Project Landsat scene extent to AWC raster\n logging.debug(' AWC: {}'.format(awc_path))\n awc_ds = gdal.Open(awc_path, 0)\n awc_osr = drigo.raster_ds_osr(awc_ds)\n awc_cs = drigo.raster_ds_cellsize(awc_ds, x_only=True)\n awc_x, awc_y = drigo.raster_ds_origin(awc_ds)\n awc_extent = drigo.project_extent(\n output_extent, output_osr, awc_osr, cellsize=output_cs)\n awc_extent.buffer_extent(awc_cs * 4)\n awc_extent.adjust_to_snap('EXPAND', awc_x, awc_y, awc_cs)\n awc_ds = None\n\n # SWB computations will be done in the AWC OSR, cellsize, and extent\n awc = drigo.raster_to_array(\n awc_path, band=1, mask_extent=awc_extent,\n return_nodata=False).astype(np.float32)\n # Clip/project AWC to Landsat scene\n # awc = clip_project_raster_func(\n # awc_path, 1, gdal.GRA_NearestNeighbour,\n # awc_osr, awc_cs, awc_extent,\n # output_osr, output_cs, output_extent)\n\n # Convert units from cm/cm to mm/m\n # awc *= 1000\n # Scale available water capacity by 1000\n # Scale field capacity and wilting point from percentage to decimal\n # fc *= 0.01\n # wp *= 0.01\n\n # Initialize Soil Water Balance parameters\n # Readily evaporable water (mm)\n rew = 54.4 * awc + 0.8\n # rew = (54.4 * awc / 1000) + 0.8\n\n # Total evaporable water (mm)\n tew = 166.0 * awc - 3.7\n # tew = (166.0 * awc / 1000) - 3.7\n\n # Total evaporable water (mm)\n # tew = (fc - 0.5 * wp) * (0.1 * 1000)\n\n # Difference of TEW and REW\n # tew_rew = tew - rew\n\n # Dry initial Depletion\n de = np.copy(tew)\n d_rew = np.copy(rew)\n # de = np.copy(tew)\n # d_rew = np.copy(rew)\n # Half Initial Depletion\n # de = 0.5 * tew\n # d_rew = 0.5 * rew\n # Wet Initial Depletion\n # de = 0\n # d_rew = 0\n\n # Coefficients for Skin Layer Retention Efficiency, Allen (2011)\n c0 = 0.8\n c1 = 2 * (1 - c0)\n # ETr ke max\n ke_max = 1.0\n # ETo Ke max\n # ke_max = 1.2\n\n # Spinup model up to test date, iteratively calculating Ke\n # Pass doy as band number to raster_value_at_xy\n for spinup_dt in dt_list:\n logging.debug(' {}'.format(spinup_dt.date().isoformat()))\n etr = clip_project_raster_func(\n etr_path_dict[str(spinup_dt.year)],\n # int(spinup_dt.strftime('%j')), gdal.GRA_NearestNeighbour,\n int(spinup_dt.strftime('%j')), gdal.GRA_Bilinear,\n etr_osr, etr_cs, etr_extent, awc_osr, awc_cs, awc_extent)\n ppt = clip_project_raster_func(\n ppt_path_dict[str(spinup_dt.year)],\n # int(spinup_dt.strftime('%j')), gdal.GRA_NearestNeighbour,\n int(spinup_dt.strftime('%j')), gdal.GRA_Bilinear,\n ppt_osr, ppt_cs, ppt_extent, awc_osr, awc_cs, awc_extent)\n ke, de, d_rew = daily_swb_func(\n etr, ppt, de, d_rew, rew, tew, c0, c1, ke_max)\n\n # Project to output spatial reference, cellsize, and extent\n ke = drigo.project_array(\n # ke, gdal.GRA_NearestNeighbour,\n ke, gdal.GRA_Bilinear,\n awc_osr, awc_cs, awc_extent,\n output_osr, output_cs, output_extent,\n output_nodata=None)\n\n return ke\n\n\ndef clip_project_raster_func(input_raster, band, resampling_type,\n input_osr, input_cs, input_extent,\n ouput_osr, output_cs, output_extent):\n \"\"\"Clip and then project an input raster\n \n Parameters\n ----------\n input_raster\n band\n resampling_type\n input_osr\n input_cs\n input_extent\n ouput_osr\n output_cs\n output_extent\n\n Returns\n -------\n ndarray\n\n Notes\n -----\n This function is only called by the raster_swb_func and could eventually\n be moved to the drigo module.\n\n \"\"\"\n # Read array from input raster using input extent\n input_array = drigo.raster_to_array(\n input_raster, band=band, mask_extent=input_extent,\n return_nodata=False).astype(np.float32)\n\n # Convert nan to a nodata value so a copy isn't made in project_array\n input_array[np.isnan(input_array)] = drigo.numpy_type_nodata(\n input_array.dtype)\n\n # Project and clip array to block\n output_array = drigo.project_array(\n input_array, resampling_type,\n input_osr, input_cs, input_extent,\n ouput_osr, output_cs, output_extent)\n return output_array\n\n\ndef point_swb_func(test_dt, test_xy, test_osr, awc_path,\n etr_ws, etr_re, ppt_ws, ppt_re,\n spinup_days=30, min_spinup_days=5):\n \"\"\"Compute the daily soil water balance at a point for a single date\n\n Parameters\n ----------\n test_dt : class:`datetime.datetime`\n Target date.\n test_xy : tuple\n Tuple of the x and y coordinates for which the soil water balance is\n to be calculated. Must be in the same projection as the test_osr.\n test_osr : class:`osr.SpatialReference\n Spatial reference of the text_xy point coordinates.\n awc_path : str\n Filepath of the available water content raster.\n etr_ws : str\n Directory path of the ETr workspace, which Contains separate rasters \n for each year.\n etr_re : class:`re`\n Compiled regular expression object from the Python native 're' module\n that will match ETr rasters.\n ppt_ws : str\n Directory path of the precipitation workspace, which contains separate\n rasters for each year.\n ppt_re : class:`re`\n Compiled regular expression object from the native Python re module\n that will match precipitaiton rasters.\n spinup_days : int, optional\n Number of days that should be used in the spinup of the model\n (the default is 30).\n min_spinup_days : int, optinal\n Minimum number of days that are needed for spinup of the model\n (the default is 5).\n\n Returns\n -------\n ke : float\n Soil evaporation coefficient.\n\n Notes\n -----\n Spinup SWB model for N spinup dates and calculate the Ke (soil evaporation\n coefficient) for the desired x/y coordinates.\n\n References\n ----------\n .. [1] Allen, R., Pereira, L., Smith, M., Raes, D., & Wright, J. (2005).\n FAO-56 dual crop coefficient method for estimating evaporation from soil\n and application extensions.\n Jourlnal of Irrigation and Drainage Engineering, 131(1).\n 10.1061/(ASCE)0733-9437(2005)131:1(2)\n .. [2] Allen, R. (2011). Skin layer evaporation to account for small\n precipitation events - an enhancement to the FAO-56 evaporation model.\n Agricultural Water Management, 99.\n https://doi.org/10.1016/j.agwat.2011.08.008\n\n \"\"\"\n # DEADBEEF - There is probably a better way to handle the date range input.\n # Perhaps something like setting a minimum spinup and maximum spinup\n # days and allowing the code to take however many etr and ppt rasters\n # it can find within that range is good. Also, we should probably\n # add in a flag for dry vs wet starting point (when it comes to\n # total evaporative water [tew])\n logging.debug('Daily Soil Water Balance')\n logging.debug(' Test Point: {} {}'.format(*test_xy))\n\n # Compute list of dates needed for spinup\n # date_range function doesn't return end date so add 1 day to end\n dt_list = sorted(dripy.date_range(\n test_dt - dt.timedelta(days=spinup_days),\n test_dt + dt.timedelta(days=1)))\n year_list = sorted(list(set([d.year for d in dt_list])))\n\n # Get all available ETr and PPT paths in date range\n etr_path_dict = dict()\n ppt_path_dict = dict()\n if not os.path.isdir(etr_ws):\n logging.error(' ETr folder does not exist\\n {}'.format(\n etr_ws))\n sys.exit()\n if not os.path.isdir(ppt_ws):\n logging.info(' PPT folder does not exist\\n {}'.format(\n ppt_ws))\n sys.exit()\n\n # DEADBEEF - What specific errors should be caught here?\n for etr_name in os.listdir(etr_ws):\n try:\n test_year = etr_re.match(etr_name).group('YYYY')\n except:\n continue\n if int(test_year) in year_list:\n etr_path_dict[str(test_year)] = os.path.join(etr_ws, etr_name)\n for ppt_name in os.listdir(ppt_ws):\n try:\n test_year = ppt_re.match(ppt_name).group('YYYY')\n except:\n continue\n if int(test_year) in year_list:\n ppt_path_dict[str(test_year)] = os.path.join(ppt_ws, ppt_name)\n if not etr_path_dict:\n logging.error(' No ETr rasters were found for the point SWB\\n')\n sys.exit()\n elif not ppt_path_dict:\n logging.error(' No PPT rasters were found for the point SWB\\n')\n sys.exit()\n\n # for year in year_list:\n # etr_year_ws = os.path.join(etr_ws, str(year))\n # if os.path.isdir(etr_year_ws):\n # for etr_name in os.listdir(etr_year_ws):\n # try:\n # test_dt = dt.datetime.strptime(\n # etr_re.match(etr_name).group('YYYYMMDD'), '%Y%m%d')\n # except:\n # continue\n # if test_dt in dt_list:\n # etr_path_dict[test_dt.date().isoformat()] = os.path.join(\n # etr_year_ws, etr_name)\n # else:\n # logging.info(' ETr year folder does not exist\\n {}'.format(\n # etr_year_ws))\n\n # ppt_year_ws = os.path.join(ppt_ws, str(year))\n # if os.path.isdir(ppt_year_ws):\n # for ppt_name in os.listdir(ppt_year_ws):\n # try:\n # test_dt = dt.datetime.strptime(\n # ppt_re.match(ppt_name).group('YYYYMMDD'), '%Y%m%d')\n # except:\n # continue\n # if test_dt in dt_list:\n # ppt_path_dict[test_dt.date().isoformat()] = os.path.join(\n # ppt_year_ws, ppt_name)\n # else:\n # logging.info(' PPT year folder does not exist\\n {}'.format(\n # ppt_year_ws))\n\n # DEADBEEF - Need a different way to check for spin up dates\n # # Check the number of available ETr/PPT images\n # etr_spinup_days = len(etr_path_dict.keys()) - 1\n # ppt_spinup_days = len(ppt_path_dict.keys()) - 1\n # if etr_spinup_days < spinup_days:\n # logging.warning(' Only {}/{} ETr spinup days available'.format(\n # etr_spinup_days, spinup_days))\n # if etr_spinup_days <= min_spinup_days:\n # logging.error(' Exiting')\n # exit()\n # if ppt_spinup_days < spinup_days:\n # logging.warning(' Only {}/{} PPT spinup days available'.format(\n # ppt_spinup_days, spinup_days))\n # if ppt_spinup_days <= min_spinup_days:\n # logging.error(' Exiting')\n # sys.exit()\n\n # Project input point to AWC coordinate system\n awc_pnt = ogr.Geometry(ogr.wkbPoint)\n awc_pnt.AddPoint(test_xy[0], test_xy[1])\n awc_pnt.Transform(osr.CoordinateTransformation(\n test_osr, drigo.raster_path_osr(awc_path)))\n logging.debug(' AWC Point: {} {}'.format(\n awc_pnt.GetX(), awc_pnt.GetY()))\n\n # Project input point to ETr coordinate system\n etr_pnt = ogr.Geometry(ogr.wkbPoint)\n etr_pnt.AddPoint(test_xy[0], test_xy[1])\n etr_pnt.Transform(osr.CoordinateTransformation(\n test_osr, drigo.raster_path_osr(list(etr_path_dict.values())[0])))\n logging.debug(' ETr Point: {} {}'.format(\n etr_pnt.GetX(), etr_pnt.GetY()))\n\n # Project input point to PPT coordinate system\n ppt_pnt = ogr.Geometry(ogr.wkbPoint)\n ppt_pnt.AddPoint(test_xy[0], test_xy[1])\n ppt_pnt.Transform(osr.CoordinateTransformation(\n test_osr, drigo.raster_path_osr(list(ppt_path_dict.values())[0])))\n logging.debug(' PPT Point: {} {}'.format(\n ppt_pnt.GetX(), ppt_pnt.GetY()))\n\n # Read in soil properties\n awc = drigo.raster_value_at_point(awc_path, awc_pnt)\n # Convert units from cm/cm to mm/m\n # awc *= 1000\n # fc = drigo.raster_value_at_point(fc_path, test_pnt)\n # wp = drigo.raster_value_at_point(wp_path, test_pnt)\n # Scale available water capacity by 1000\n # Scale field capacity and wilting point from percentage to decimal\n # fc *= 0.01\n # wp *= 0.01\n\n # Initialize Soil Water Balance parameters\n # Readily evaporable water (mm)\n rew = 54.4 * awc + 0.8\n # rew = (54.4 * awc / 1000) + 0.8\n # Total evaporable water (mm)\n tew = 166.0 * awc - 3.7\n # tew = (166.0 * awc / 1000) - 3.7\n # Total evaporable water (mm)\n # tew = (fc - 0.5 * wp) * (0.1 * 1000)\n # Difference of TEW and REW\n # tew_rew = tew - rew\n\n # Dry initial Depletion\n de = float(tew)\n d_rew = float(rew)\n # de = np.copy(tew)\n # d_rew = np.copy(rew)\n # Half Initial Depletion\n # de = 0.5 * tew\n # d_rew = 0.5 * rew\n # Wet Initial Depletion\n # de = 0\n # d_rew = 0\n\n # Coefficients for Skin Layer Retention Efficiency, Allen (2011)\n c0 = 0.8\n c1 = 2 * (1 - c0)\n # ETr ke max\n ke_max = 1.0\n # ETo Ke max\n # ke_max = 1.2\n\n logging.debug(' AWC: {}'.format(awc))\n # logging.debug(' FC: {}'.format(fc))\n # logging.debug(' WP: {}'.format(wp))\n logging.debug(' REW: {}'.format(rew))\n logging.debug(' TEW: {}'.format(tew))\n logging.debug(' de: {}'.format(de))\n logging.debug(\n '\\n {:>10s} {:>5s} {:>5s} {:>5s} {:>5s} {:>5s}'.format(\n *'DATE,ETR,PPT,KE,DE,D_REW'.split(',')))\n\n # Spinup model up to test date, iteratively calculating Ke\n # Pass doy as band number to raster_value_at_point\n for spinup_dt in dt_list:\n etr, ppt = 0., 0.\n try:\n etr = drigo.raster_value_at_point(\n etr_path_dict[str(spinup_dt.year)], etr_pnt,\n band=int(spinup_dt.strftime('%j')))\n except KeyError:\n logging.debug(\n ' ETr raster for date {} does not exist'.format(\n spinup_dt.date().isoformat()))\n try:\n ppt = drigo.raster_value_at_point(\n ppt_path_dict[str(spinup_dt.year)], ppt_pnt,\n band=int(spinup_dt.strftime('%j')))\n except KeyError:\n logging.debug(\n ' PPT raster for date {} does not exist'.format(\n spinup_dt.date().isoformat()))\n\n ke, de, d_rew = map(float, daily_swb_func(\n etr, ppt, de, d_rew, rew, tew, c0, c1, ke_max))\n logging.debug((\n ' {:>10s} {:>5.2f} {:>5.2f} {:>5.2f} {:>5.2f} {:>5.2f}').format(\n spinup_dt.date().isoformat(), etr, ppt, ke, de, d_rew))\n \n return ke\n\n\ndef array_swb_func(etr, ppt, awc):\n \"\"\"Iteratively compute the daily soil water balance through an array stack\n\n Parameters\n ----------\n etr : ndarray\n Daily reference ET [mm].\n ppt : ndarray\n Daily Precipitaiton [mm].\n awc : array_like\n Available water content [mm].\n\n Returns\n -------\n ke : ndarray\n Soil evaporation coefficient.\n\n Notes\n -----\n Script will assume the 0th axis of the input arrays is time.\n\n Spinup days are assumed to be in the data.\n\n References\n ----------\n .. [1] Allen, R., Pereira, L., Smith, M., Raes, D., & Wright, J. (2005).\n FAO-56 dual crop coefficient method for estimating evaporation from soil\n and application extensions.\n Jourlnal of Irrigation and Drainage Engineering, 131(1).\n 10.1061/(ASCE)0733-9437(2005)131:1(2)\n .. [2] Allen, R. (2011). Skin layer evaporation to account for small\n precipitation events - an enhancement to the FAO-56 evaporation model.\n Agricultural Water Management, 99.\n https://doi.org/10.1016/j.agwat.2011.08.008\n\n \"\"\"\n # logging.debug('Daily Soil Water Balance')\n ke = np.full(etr.shape, np.nan, np.float32)\n\n # Initialize Soil Water Balance parameters\n # Readily evaporable water (mm)\n rew = 54.4 * awc + 0.8\n # rew = (54.4 * awc / 1000) + 0.8\n\n # Total evaporable water (mm)\n tew = 166.0 * awc - 3.7\n # tew = (166.0 * awc / 1000) - 3.7\n\n # Total evaporable water (mm)\n # tew = (fc - 0.5 * wp) * (0.1 * 1000)\n\n # Difference of TEW and REW\n # tew_rew = tew - rew\n\n # Half Initial Depletion\n # de = 0.5 * tew\n # d_rew = 0.5 * rew\n\n # Dry initial Depletion\n de = np.copy(tew)\n d_rew = np.copy(rew)\n\n # Wet Initial Depletion\n # de = 0\n # d_rew = 0\n\n # Coefficients for Skin Layer Retention Efficiency, Allen (2011)\n c0 = 0.8\n c1 = 2 * (1 - c0)\n\n # ETr ke max\n ke_max = 1.0\n\n for i in range(etr.shape[0]):\n ke[i], de, d_rew = daily_swb_func(\n etr[i], ppt[i], de, d_rew, rew, tew, c0, c1, ke_max)\n return ke\n\n\ndef daily_swb_func(etr, ppt, de_prev, d_rew_prev, rew, tew,\n c0=0.8, c1=0.4, ke_max=1.0):\n \"\"\"Compute the daily soil water balance for a single time step\n\n Parameters\n ----------\n etr : array_like\n Daily reference ET [mm].\n ppt : array_like\n Precipitation [mm].\n de_prev : \n d_rew_prev : \n rew : \n tew : \n c0 :\n (the default is 0.8).\n c1 :\n (the default is 0.4).\n ke_max :\n (the default is 1.0).\n\n Returns\n -------\n tuple: numpy.arrays (ke, de, d_rew)\n\n References\n ----------\n .. [1] Allen, R., Pereira, L., Smith, M., Raes, D., & Wright, J. (2005).\n FAO-56 dual crop coefficient method for estimating evaporation from soil\n and application extensions.\n Jourlnal of Irrigation and Drainage Engineering, 131(1).\n https://10.1061/(ASCE)0733-9437(2005)131:1(2)\n .. [2] Allen, R. (2011). Skin layer evaporation to account for small\n precipitation events - an enhancement to the FAO-56 evaporation model.\n Agricultural Water Management, 99.\n https://doi.org/10.1016/j.agwat.2011.08.008\n\n \"\"\"\n # Stage 1 evaporation (Eqn 1)\n e1 = np.array(etr, copy=True, ndmin=1)\n e1 *= ke_max\n\n # Evaporation reduction coefficient (Eqn 5b)\n kr = np.clip((tew - de_prev) / (tew - rew), 0, 1)\n\n # Fraction of time interval residing in stage 1 (Eqn 10b)\n # Don't calculate \".min(1)\" here, wait until in Es calc\n ft = np.clip(np.nan_to_num((rew - d_rew_prev) / e1), 0, 1)\n\n # Total evaporation from the soil (Eqn 11)\n es = np.clip((1 - ft) * kr * e1 - d_rew_prev + rew, 0, e1)\n\n # Infiltration efficiency factor (Eqn 13)\n ceff = np.clip(c1 * ((tew - de_prev) / tew) + c0, 0, 1)\n\n # Depletion of the skin layer\n # With skin evap calculation (Eqn 12)\n # Modified to remove fb adjustment\n d_rew = np.copy(np.clip((d_rew_prev - (ceff * ppt) + es), 0, rew))\n # d_rew = np.clip((d_rew_prev - (ceff * ppt) + es), 0, rew)\n\n # Without skin evap (Eqn )\n # var d_rew = rew\n # Depth of evaporation of the TEW surface soil layer (Eqn 9)\n # Modified to remove fb adjustment\n de = np.copy(np.clip(de_prev - ppt + es, 0, tew))\n # de = np.clip(de_prev - ppt + es, 0, tew)\n\n # # Save current as previous for next iteration\n # de_prev = de\n # d_rew_prev = d_rew\n\n # Evaporation coefficient (Eqn )\n ke = np.clip(np.nan_to_num(es / etr), 0, 1)\n \n return ke, de, d_rew\n"
]
| [
[
"numpy.array",
"numpy.power",
"numpy.clip",
"numpy.isnan",
"numpy.nan_to_num",
"numpy.cos",
"numpy.genfromtxt",
"numpy.full",
"numpy.sin",
"numpy.copy",
"numpy.tan",
"numpy.any",
"numpy.interp",
"numpy.mod",
"numpy.reciprocal",
"numpy.exp",
"numpy.sum"
]
]
|
ohadoh-math/invertible-resnet | [
"4f05b9d1761c2d46cc05d9748ef3e690f8b9c0b2"
]
| [
"design-experiments-digest.py"
]
| [
"#!/usr/bin/env python3\n\n\nimport os\nimport json\nimport logging\nimport numpy\nfrom pathlib import Path\nfrom argparse import ArgumentParser\nfrom collections import defaultdict\nfrom matplotlib import pyplot\n\n\ndef main():\n arg_parser = ArgumentParser(description=\"Generate output digest from a design experiments directory.\")\n arg_parser.add_argument(\"-o\", \"--output\", type=Path, default=Path(\"/tmp/design-results.png\"), help=\"Output graph.\")\n arg_parser.add_argument(\"resultsdir\", type=Path, help=\"The results directory containing all the experiments.\")\n args = arg_parser.parse_args()\n\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n )\n\n logging.info(\"looking for parameters and accuracy files...\")\n param_files = [\n (Path(base_dir)/'params.json', Path(base_dir)/'accuracy.json')\n for base_dir, _dirs, files in os.walk(args.resultsdir)\n if {'params.json', 'accuracy.json'} <= set(files)\n ]\n\n logging.info(\"found %d experiments, collecting them\", len(param_files))\n experiments = defaultdict(lambda: defaultdict(list))\n for params_file, acc_file in param_files:\n params = json.loads(params_file.read_text())\n experiments[params['design']][params['coeff']].append(json.loads(acc_file.read_text())['accuracy'])\n\n logging.info(\"averaging\")\n experiments = {\n design: sorted(\n {\n coeff: sum(acc_list)/len(acc_list)\n for coeff, acc_list in params_info.items()\n }.items(),\n key=lambda param_pair: param_pair[0], # sort by coeff\n )\n for design, params_info in experiments.items()\n }\n\n logging.info(\"to numpy\")\n experiments = {\n design: (\n numpy.array([coeff for coeff, _ in samples]),\n numpy.array([sample for _, sample in samples]),\n )\n for design, samples in experiments.items()\n }\n\n logging.info(\"plotting!\")\n fig, axis = pyplot.subplots(nrows=1, ncols=1, figsize=(16, 12), dpi=80)\n axis.set_title(\"Total Loss\")\n for design, (coeff, samples) in experiments.items():\n logging.info(\" plotting %s\", design)\n axis.plot(coeff, samples, label=design)\n fig.legend(loc=\"upper right\")\n fig.savefig(args.output)\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.array",
"matplotlib.pyplot.subplots"
]
]
|
cdchushig/imbalanced-learn | [
"f02e7c7c2c021c85823cace405ca2c58ad4ff147"
]
| [
"imblearn/under_sampling/_prototype_selection/tests/test_condensed_nearest_neighbour.py"
]
| [
"\"\"\"Test the module condensed nearest neighbour.\"\"\"\n# Authors: Guillaume Lemaitre <[email protected]>\n# Christos Aridas\n# License: MIT\n\nimport pytest\nimport numpy as np\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom imblearn.under_sampling import CondensedNearestNeighbour\n\nRND_SEED = 0\nX = np.array(\n [\n [2.59928271, 0.93323465],\n [0.25738379, 0.95564169],\n [1.42772181, 0.526027],\n [1.92365863, 0.82718767],\n [-0.10903849, -0.12085181],\n [-0.284881, -0.62730973],\n [0.57062627, 1.19528323],\n [0.03394306, 0.03986753],\n [0.78318102, 2.59153329],\n [0.35831463, 1.33483198],\n [-0.14313184, -1.0412815],\n [0.01936241, 0.17799828],\n [-1.25020462, -0.40402054],\n [-0.09816301, -0.74662486],\n [-0.01252787, 0.34102657],\n [0.52726792, -0.38735648],\n [0.2821046, -0.07862747],\n [0.05230552, 0.09043907],\n [0.15198585, 0.12512646],\n [0.70524765, 0.39816382],\n ]\n)\nY = np.array([1, 2, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 1, 2, 2, 2, 2, 1, 2, 1])\n\n\ndef test_cnn_init():\n cnn = CondensedNearestNeighbour(random_state=RND_SEED)\n\n assert cnn.n_seeds_S == 1\n assert cnn.n_jobs is None\n\n\ndef test_cnn_fit_resample():\n cnn = CondensedNearestNeighbour(random_state=RND_SEED)\n X_resampled, y_resampled = cnn.fit_resample(X, Y)\n\n X_gt = np.array(\n [\n [-0.10903849, -0.12085181],\n [0.01936241, 0.17799828],\n [0.05230552, 0.09043907],\n [-1.25020462, -0.40402054],\n [0.70524765, 0.39816382],\n [0.35831463, 1.33483198],\n [-0.284881, -0.62730973],\n [0.03394306, 0.03986753],\n [-0.01252787, 0.34102657],\n [0.15198585, 0.12512646],\n ]\n )\n y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])\n assert_array_equal(X_resampled, X_gt)\n assert_array_equal(y_resampled, y_gt)\n\n\ndef test_cnn_fit_resample_with_object():\n knn = KNeighborsClassifier(n_neighbors=1)\n cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=knn)\n X_resampled, y_resampled = cnn.fit_resample(X, Y)\n\n X_gt = np.array(\n [\n [-0.10903849, -0.12085181],\n [0.01936241, 0.17799828],\n [0.05230552, 0.09043907],\n [-1.25020462, -0.40402054],\n [0.70524765, 0.39816382],\n [0.35831463, 1.33483198],\n [-0.284881, -0.62730973],\n [0.03394306, 0.03986753],\n [-0.01252787, 0.34102657],\n [0.15198585, 0.12512646],\n ]\n )\n y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])\n assert_array_equal(X_resampled, X_gt)\n assert_array_equal(y_resampled, y_gt)\n\n cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=1)\n X_resampled, y_resampled = cnn.fit_resample(X, Y)\n assert_array_equal(X_resampled, X_gt)\n assert_array_equal(y_resampled, y_gt)\n\n\ndef test_cnn_fit_resample_with_wrong_object():\n knn = \"rnd\"\n cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=knn)\n with pytest.raises(ValueError, match=\"has to be a int or an \"):\n cnn.fit_resample(X, Y)\n"
]
| [
[
"numpy.array",
"sklearn.utils._testing.assert_array_equal",
"sklearn.neighbors.KNeighborsClassifier"
]
]
|
fuz-woo/jpype | [
"3ffb1e7a75402545c1d669f4bc5836b08b76b6ae"
]
| [
"test/jpypetest/test_conversionShort.py"
]
| [
"# *****************************************************************************\n# Copyright 2017 Karl Einar Nelson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# *****************************************************************************\nimport jpype\nimport sys\nimport logging\nimport time\nimport common\n\nif sys.version > '3':\n long = int\n\n\ndef haveNumpy():\n try:\n import numpy\n return True\n except ImportError:\n return False\n\n\nclass ConversionShortTestCase(common.JPypeTestCase):\n def setUp(self):\n common.JPypeTestCase.setUp(self)\n self.Test = jpype.JClass(\"jpype.types.MethodsTest\")()\n\n def testShortFromInt(self):\n self.assertEqual(self.Test.callShort(int(123)), 123)\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPInt(self):\n import numpy as np\n self.assertEqual(self.Test.callShort(np.int(123)), 123)\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPInt8(self):\n import numpy as np\n self.assertEqual(self.Test.callShort(np.int8(123)), 123)\n self.assertEqual(self.Test.callShort(np.uint8(123)), 123)\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPInt16(self):\n import numpy as np\n self.assertEqual(self.Test.callShort(np.int16(123)), 123)\n self.assertEqual(self.Test.callShort(np.uint16(123)), 123)\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPInt32(self):\n import numpy as np\n self.assertEqual(self.Test.callShort(np.int32(123)), 123)\n self.assertEqual(self.Test.callShort(np.uint32(123)), 123)\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPInt64(self):\n import numpy as np\n self.assertEqual(self.Test.callShort(np.int64(123)), 123)\n self.assertEqual(self.Test.callShort(np.uint64(123)), 123)\n\n def testShortFromFloat(self):\n with self.assertRaises(TypeError):\n self.Test.callShort(float(2))\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPFloat(self):\n import numpy as np\n with self.assertRaises(TypeError):\n self.Test.callShort(np.float(2))\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPFloat32(self):\n import numpy as np\n with self.assertRaises(TypeError):\n self.Test.callShort(np.float32(2))\n\n @common.unittest.skipUnless(haveNumpy(), \"numpy not available\")\n def testShortFromNPFloat64(self):\n import numpy as np\n with self.assertRaises(TypeError):\n self.Test.callShort(np.float64(2))\n\n def testShortRange(self):\n with self.assertRaises(OverflowError):\n self.Test.callShort(long(1e10))\n with self.assertRaises(OverflowError):\n self.Test.callShort(long(-1e10))\n"
]
| [
[
"numpy.uint32",
"numpy.uint8",
"numpy.int32",
"numpy.int8",
"numpy.int16",
"numpy.int",
"numpy.int64",
"numpy.uint16",
"numpy.uint64",
"numpy.float64",
"numpy.float32",
"numpy.float"
]
]
|
rgrumbine/seaice-concentration | [
"80449bb23028c8a211fd050ff694b52eb5fc6bde"
]
| [
"sorc/filter/version3.py"
]
| [
"import os\nimport sys\nimport numpy as np\nimport numpy.ma as ma\nimport netCDF4\nfrom netCDF4 import Dataset\n\n#----------------------------------------------\ndef oiv2(lat, lon):\n dlat = 0.25\n dlon = 0.25\n firstlat = -89.875\n firstlon = 0.125\n if (lon < 0):\n lon += 360.\n j = round( (lat - firstlat)/dlat )\n i = round( (lon - firstlon)/dlon )\n return (j,i)\n\ndef rg12th(lat, lon):\n dlat = -1./12.\n dlon = 1./12.\n firstlat = 90. - dlat/2.\n firstlon = dlon/2.\n if (lon < 0):\n lon += 360.\n j = int(round( (lat - firstlat)/dlat ))\n i = int(round( (lon - firstlon)/dlon ))\n return (j,i)\n\ndef delta(x,y):\n return (x-y)/(x+y)\n\n#----------------------------------------------\n#matchup :\n#longitude, latitude, quality, land, icec; \n# ice_land, ice_post, ice_distance; sst, ice_sst\nclass match:\n\n def __init__(self, satid=248, latitude = 95., longitude = 95., icec = 95., land = 95, quality = 95, ice_land = 95, ice_post = 95, ice_distance = 95., sst = 95., ice_sst = 95.):\n self.satid = satid\n self.latitude = latitude\n self.longitude = longitude\n self.icec = icec\n self.land = land\n self.quality = quality\n self.ice_land = 95\n self.ice_post = 95\n self.ice_distance = 95.\n self.sst = 95.\n self.ice_sst = 95.\n self.tb = np.zeros((7))\n #print(\"done with init\", flush=True)\n\n def show(self, fout = sys.stdout):\n print(satid, \"{:9.4f}\".format(self.longitude), \"{:8.4f}\".format(self.latitude), \n \"{:.2f}\".format(self.icec), \"{:.2f}\".format(self.land), self.quality, \n \"{:3d}\".format(self.ice_land), \"{:3d}\".format(self.ice_post), \n \"{:7.2f}\".format(self.ice_distance), \n \" \", \"{:.2f}\".format(self.sst), \"{:.2f}\".format(self.ice_sst), \n \" \", \"{:6.2f}\".format(self.tb[0]),\n \"{:6.2f}\".format(self.tb[1]),\n \"{:6.2f}\".format(self.tb[2]),\n \"{:6.2f}\".format(self.tb[3]),\n \"{:6.2f}\".format(self.tb[4]),\n \"{:6.2f}\".format(self.tb[5]),\n \"{:6.2f}\".format(self.tb[6]),\n file=fout)\n\n def add_tb(self, tb):\n for i in range (0,7):\n self.tb[i] = tb[i]\n\n def add_oiv2(self, sst, ice_sst):\n j,i = oiv2(self.latitude, self.longitude)\n self.sst = sst[j,i]\n self.ice_sst = ice_sst[j,i]\n\n def add_icefix(self, ice_land, ice_post, ice_distance):\n j,i = rg12th(self.latitude, self.longitude)\n self.ice_land = ice_land[j,i]\n self.ice_post = ice_post[j,i]\n self.ice_distance = ice_distance[j,i]\n\n def __getitem__(self, i):\n return(tb[i])\n\n###############################################################\n\ntb = np.zeros((7))\n\nicenc = Dataset('l2out.f248.51.nc', 'r', format='NETCDF4')\nnobs = len(icenc.dimensions[\"nobs\"])\nprint(\"nobs = \",nobs)\nlongitude = np.zeros((nobs)) \nlatitude = np.zeros((nobs)) \nicec = np.zeros((nobs)) \nquality = np.zeros((nobs), dtype='int') \nsatid = np.zeros((nobs), dtype='int') \nland = np.zeros((nobs)) \ndtg1 = np.zeros((nobs), dtype='int') \ndtg2 = np.zeros((nobs), dtype='int') \nt19v = np.zeros((nobs)) \nt19h = np.zeros((nobs)) \nt22v = np.zeros((nobs)) \nt37v = np.zeros((nobs)) \nt37h = np.zeros((nobs)) \nt85v = np.zeros((nobs)) \nt85h = np.zeros((nobs)) \n\nlongitude = icenc.variables[\"longitude\"][:] \nlatitude = icenc.variables[\"latitude\"][:] \nicec = icenc.variables[\"ice_concentration\"][:] \nquality = icenc.variables[\"quality\"][:] \nsatid = icenc.variables[\"satid\"][:] \nland = icenc.variables[\"land_flag\"][:] \ndtg1 = icenc.variables[\"dtg_yyyymmdd\"][:] \ndtg2 = icenc.variables[\"dtg_hhmm\"][:] \nt19v = icenc.variables[\"tb_19V\"][:] \nt19h = icenc.variables[\"tb_19H\"][:] \nt22v = icenc.variables[\"tb_22V\"][:] \nt37v = icenc.variables[\"tb_37V\"][:] \nt37h = icenc.variables[\"tb_37H\"][:] \nt85v = icenc.variables[\"tb_85V\"][:] \nt85h = icenc.variables[\"tb_85H\"][:] \n\nall = []\nnpts = nobs\nfor k in range(0,npts):\n tmp = match(satid = satid[k], longitude = longitude[k], latitude = latitude[k], \n quality = quality[k], land = land[k], icec = icec[k])\n all.append(tmp) \n tb[0] = t19v[k]\n tb[1] = t19h[k]\n tb[2] = t22v[k]\n tb[3] = t37v[k]\n tb[4] = t37h[k]\n tb[5] = t85v[k]\n tb[6] = t85h[k]\n all[k].add_tb(tb)\n\nprint(\"done reading in\",flush=True)\n# create logical masks:\n#unknown = ma.masked_array(ice_land > -1) # unknown points, which starts as all of them\nunknown = ma.masked_array(satid != 248) # unknown points, which is anything not F15\nknown = ma.logical_not(unknown)\nprint(\"unknown, known lens: \",len(unknown.nonzero()[0]), len(known.nonzero()[0]) , flush=True)\nnobs = len(unknown.nonzero()[0])\n\n#exit(0)\n\n#----------------------------------------------\n#read in skip file\n#read in land mask file\n#read in distance to land\n\nicefix = Dataset('seaice_fixed_fields.nc', 'r', format='NETCDF4')\nnlats = len(icefix.dimensions[\"nlats\"])\nnlons = len(icefix.dimensions[\"nlons\"])\n\nice_longitude = np.zeros((nlats, nlons),dtype=\"double\") \nice_latitude = np.zeros((nlats, nlons),dtype=\"double\") \nice_distance = np.zeros((nlats, nlons),dtype=\"float\") \n\nice_land = np.zeros((nlats, nlons))\nice_land = icefix.variables[\"land\"] [:,:] \n\nice_post = np.zeros((nlats, nlons))\nice_post = icefix.variables[\"posteriori\"][:,:] \n\nice_longitude = icefix.variables[\"longitude\"][:,:] \nice_latitude = icefix.variables[\"latitude\"] [:,:] \nice_distance = icefix.variables[\"distance_to_land\"][:,:] \nice_distance /= 1000. #Convert to km\n\nfor k in range(0,len(all)):\n all[k].add_icefix(ice_land, ice_post, ice_distance)\n\nprint(\"done adding in ice fixed\",flush=True)\n#exit(0)\n\n#--------------------------------------------------------\n# Use SST from qdoi v2, including its sea ice cover\n#sstgrid = Dataset('avhrr-only-v2.20180228.nc', 'r', format='NETCDF4')\nsstgrid = Dataset('avhrr-only.nc', 'r', format='NETCDF4')\nsst_nlats = len(sstgrid.dimensions[\"lat\"])\nsst_nlons = len(sstgrid.dimensions[\"lon\"])\n\nsst = np.zeros((sst_nlats, sst_nlons))\nice_sst = np.zeros((sst_nlats, sst_nlons))\n\nsst = sstgrid.variables[\"sst\"][0,0,:,:]\nice_sst = sstgrid.variables[\"ice\"][0,0,:,:]\n\nfor k in range(0,len(all)):\n all[k].add_oiv2(sst, ice_sst)\n\nprint(\"done adding in sst \",flush=True)\n#---------------------------------------------------------------------\n#------------- All collected now, print out : ----------\n#fout = open(\"all_tb\",\"w\")\n#for i in range(0,len(all)):\n# #if (all[i].ice_land != 157):\n# all[i].show()\n#fout.close()\n#--------------------------------------------------------\n\ndel ice_land\ndel icec\ndel sst\n\nice_land = np.zeros((npts))\nicec = np.zeros((npts))\nsst = np.zeros((npts))\nfor i in range(0,npts):\n ice_land[i] = all[i].ice_land\n icec[i] = all[i].ice_sst\n sst[i] = all[i].sst\n\n\n#include coast points as being land (sidelobe issues)\nicemask = ma.masked_array(icec > 0)\nlandmask = ma.masked_array(ice_land >= 157 )\nwatermask = ma.masked_array(ice_land < 100)\nicemask = ma.logical_and(icemask, unknown)\nlandmask = ma.logical_and(landmask, unknown)\nwatermask = ma.logical_and(watermask, unknown)\n\n#Distinguish between water and ice-covered water\nnot_ice = np.logical_not(icemask)\nwatermask = ma.logical_and(watermask, not_ice)\n\n# Get the indices of the 'true' points\niceindices = icemask.nonzero()\nmland = landmask.nonzero()\nwater = watermask.nonzero()\n\nnicepts = len(iceindices[0])\nnlandpts = len(mland[0])\nnwaterpts = len(water[0])\n#---------------------------------------------------------------------\n# All data read in and apportioned, \n# ice, land, water, and unknown pts. masks defined\n#\n#---------------------------------------------------------------------\nprint(\"n ice, land, water, nobs \",nicepts, nlandpts, nwaterpts, nobs)\nprint(\"p ice, land, water, nobs \",nicepts/float(nobs), nlandpts/float(nobs), \n nwaterpts/float(nobs), nobs, flush=True)\npwater = nwaterpts/float(nobs)\npland = nlandpts/float(nobs)\npice = nicepts/float(nobs)\n\n# Define utilities for doing the assessment:\n\ndef bayes(xvec, xcrit, label, unknown, fout = sys.stdout ):\n warm = ma.masked_array(xvec > xcrit)\n warm = ma.logical_and(warm, unknown)\n nwarm = len(warm.nonzero()[0]) \n lmask = np.logical_and(landmask, warm)\n imask = np.logical_and(icemask, warm)\n omask = np.logical_and(watermask, warm)\n pwarm = float(nwarm)/float(nobs)\n pover_land = len(lmask.nonzero()[0])/nlandpts\n pover_water = len(omask.nonzero()[0])/nwaterpts\n pover_ice = len(imask.nonzero()[0])/nicepts\n if (pwarm > 0):\n print(label, \"hot \", xcrit,\n \"{:5.3f}\".format(pover_ice * pice / pwarm) ,\n \"{:5.3f}\".format(pover_land * pland / pwarm) ,\n \"{:5.3f}\".format(pover_water * pwater / pwarm), nwarm, file = fout )\n\n cold = ma.masked_array(xvec < xcrit)\n cold = ma.logical_and(cold, unknown)\n ncold = len(cold.nonzero()[0]) \n lmask = np.logical_and(landmask, cold)\n imask = np.logical_and(icemask, cold)\n omask = np.logical_and(watermask, cold)\n pcold = float(ncold)/float(nobs)\n pover_land = len(lmask.nonzero()[0])/nlandpts\n pover_water = len(omask.nonzero()[0])/nwaterpts\n pover_ice = len(imask.nonzero()[0])/nicepts\n if (pcold > 0):\n print(label,\"cold \",xcrit,\n \"{:5.3f}\".format(pover_ice * pice / pcold) ,\n \"{:5.3f}\".format(pover_land * pland / pcold) ,\n \"{:5.3f}\".format(pover_water * pwater / pcold), ncold, file = fout )\n\ndef dr(x, y, label, unknown, fout = sys.stdout):\n ratio = delta(x,y)\n tc = np.linspace(ratio.min(), ratio.max(), num=100)\n for i in range(0,len(tc)):\n bayes(ratio, tc[i], label, unknown, fout)\n del ratio\n\n#----------------------------------------------------------------\nfout = open(\"round1\",\"w\")\nfor thot in range (75, 315):\n bayes(t19v, thot, \"t19v\", unknown, fout)\n bayes(t19h, thot, \"t19h\", unknown, fout)\n bayes(t22v, thot, \"t22v\", unknown, fout)\n bayes(t37v, thot, \"t37v\", unknown, fout)\n bayes(t37h, thot, \"t37h\", unknown, fout)\n bayes(t85v, thot, \"t85v\", unknown, fout)\n bayes(t85h, thot, \"t85h\", unknown, fout)\n\ndr(t19v, t19h, \"drt19vt19h\", unknown, fout)\ndr(t19v, t22v, \"drt19vt22v\", unknown, fout)\ndr(t19v, t37v, \"drt19vt37v\", unknown, fout)\ndr(t19v, t37h, \"drt19vt37h\", unknown, fout)\ndr(t19v, t85v, \"drt19vt85v\", unknown, fout)\ndr(t19v, t85h, \"drt19vt85h\", unknown, fout)\ndr(t19h, t22v, \"drt19ht22v\", unknown, fout)\ndr(t19h, t37v, \"drt19ht37v\", unknown, fout)\ndr(t19h, t37h, \"drt19ht37h\", unknown, fout)\ndr(t19h, t85v, \"drt19ht85v\", unknown, fout)\ndr(t19h, t85h, \"drt19ht85h\", unknown, fout)\ndr(t22v, t37v, \"drt22vt37v\", unknown, fout)\ndr(t22v, t37h, \"drt22vt37h\", unknown, fout)\ndr(t22v, t85v, \"drt22vt85v\", unknown, fout)\ndr(t22v, t85h, \"drt22vt85h\", unknown, fout)\ndr(t37v, t37h, \"drt37vt37h\", unknown, fout)\ndr(t37v, t85v, \"drt37vt85v\", unknown, fout)\ndr(t37v, t85h, \"drt37vt85h\", unknown, fout)\ndr(t37h, t85v, \"drt37ht85v\", unknown, fout)\ndr(t37h, t85h, \"drt37ht85h\", unknown, fout)\ndr(t85v, t85h, \"drt85vt85h\", unknown, fout)\nfout.close()\n\nexit(0)\n\n#----------------------------------------------------------------\n\n# Next step, pre-filter based on perfect land filters, perfect ice-free ocean filters\n# Exclude 22v because of F15\n#\n# Land points:\n#satellite-based land mask\n#Start with false everywhere and then 'or' in the trues:\nsland_mask = ma.masked_array(t19h > 3000)\nswater_mask = ma.masked_array(t19h > 3000)\nsland_mask = ma.logical_or(sland_mask, known)\nswater_mask = ma.logical_or(swater_mask, known)\n\n# tb filters, perfect not-ice, very good land:\n#hot side:\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t19v > 270))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t19h > 263))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t22v > 270))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t37v > 267))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t37h > 262))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t85v > 270))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t85h > 263))\n# Cold side:\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t19v < 176))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t22v < 185))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t37v < 195))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t85v < 184))\nsland_mask = ma.logical_or(sland_mask, ma.masked_array(t85h < 170))\n\n\n# dr filters, perfect not-ice, very good water:\ntmp = ma.masked_array(delta(t37v, t85v) < -0.08223462165004075)\nswater_mask = ma.logical_or(swater_mask, tmp)\ntmp = ma.masked_array(delta(t19v, t22v) < -0.05371907187832736)\nswater_mask = ma.logical_or(swater_mask, tmp)\ntmp = ma.masked_array(delta(t37h, t85h) < -0.17385640469464386)\nswater_mask = ma.logical_or(swater_mask, tmp)\ntmp = ma.masked_array(delta(t19v, t85v) < -0.14244067066847677)\nswater_mask = ma.logical_or(swater_mask, tmp)\n\n#very good not-ice, very good water\ntmp = ma.masked_array(t19v >= 176 )\ntmp = ma.logical_and(tmp, t19v < 191)\nswater_mask = ma.logical_or(swater_mask, tmp)\nswater_mask = ma.logical_or(swater_mask, t19h < 126)\n\ntmp = ma.masked_array(delta(t19v, t22v) < -0.045881619056065914) #extends above\nswater_mask = ma.logical_or(swater_mask, tmp)\ntmp = ma.masked_array(delta(t19v, t37v) < -0.05464559974092431)\nswater_mask = ma.logical_or(swater_mask, tmp)\ntmp = ma.masked_array(delta(t19v, t85v) < -0.13226512947467844) #extends above\nswater_mask = ma.logical_or(swater_mask, tmp)\n\n\n#satellite-based land mask\nsland_indices = sland_mask.nonzero()\nnsland_pts = len(sland_indices[0])\nprint(\"number of satellite-caught land pts: \",nsland_pts)\n\n# satellite-based water points: \nswater_indices = swater_mask.nonzero()\nnswater_pts = len(swater_indices[0])\nprint(\"number of satellite-caught water pts: \",nswater_pts)\n\n# ----------------- Prepare masks for second pass ------------------\nknown = ma.logical_or(swater_mask, sland_mask)\nunknown = ma.logical_not(known)\nunknown_indices = unknown.nonzero()\nnobs = len(unknown_indices[0])\nprint(\"nobs for round 2: \",nobs, flush=True)\n\nlandmask = ma.logical_and(landmask , unknown)\nicemask = ma.logical_and(icemask , unknown)\nwatermask = ma.logical_and(watermask , unknown)\nnicepts = len(icemask.nonzero()[0])\nnlandpts = len(landmask.nonzero()[0])\nnwaterpts = len(watermask.nonzero()[0])\npwater = nwaterpts/float(nobs)\npland = nlandpts/float(nobs)\npice = nicepts/float(nobs)\nprint(\"after first pass, nobs, ice, land, water \",nobs, nicepts, nlandpts, \n nwaterpts, pice, pland, pwater, flush=True)\n\n#----------------------------------------------------------------\n# Repeat original process check on the remaining points --\n# -- can we improve the identification of land and ice-free water?\n# -- next step: finding a filter for 'not-ice', maybe land or water,\n# but definitely not sea ice\n# -- or next: finding a filter for 'is sea ice'\n#-----------------------------------------------\n#fout2 = open(\"tb2\",\"w\")\n#for i in range(0,nobs):\n# all[unknown_indices[0][i] ].show(fout2)\n#fout2.close()\n#-----------------------------------------------\n\nfout = open(\"round2\",\"w\")\nfor thot in range (125, 275):\n bayes(t19v, thot, \"t19v\", unknown, fout)\n bayes(t19h, thot, \"t19h\", unknown, fout)\n bayes(t22v, thot, \"t22v\", unknown, fout)\n bayes(t37v, thot, \"t37v\", unknown, fout)\n bayes(t37h, thot, \"t37h\", unknown, fout)\n bayes(t85v, thot, \"t85v\", unknown, fout)\n bayes(t85h, thot, \"t85h\", unknown, fout)\n\ndr(t19v, t19h, \"drt19vt19h\", unknown, fout)\ndr(t19v, t22v, \"drt19vt22v\", unknown, fout)\ndr(t19v, t37v, \"drt19vt37v\", unknown, fout)\ndr(t19v, t37h, \"drt19vt37h\", unknown, fout)\ndr(t19v, t85v, \"drt19vt85v\", unknown, fout)\ndr(t19v, t85h, \"drt19vt85h\", unknown, fout)\ndr(t19h, t22v, \"drt19ht22v\", unknown, fout)\ndr(t19h, t37v, \"drt19ht37v\", unknown, fout)\ndr(t19h, t37h, \"drt19ht37h\", unknown, fout)\ndr(t19h, t85v, \"drt19ht85v\", unknown, fout)\ndr(t19h, t85h, \"drt19ht85h\", unknown, fout)\ndr(t22v, t37v, \"drt22vt37v\", unknown, fout)\ndr(t22v, t37h, \"drt22vt37h\", unknown, fout)\ndr(t22v, t85v, \"drt22vt85v\", unknown, fout)\ndr(t22v, t85h, \"drt22vt85h\", unknown, fout)\ndr(t37v, t37h, \"drt37vt37h\", unknown, fout)\ndr(t37v, t85v, \"drt37vt85v\", unknown, fout)\ndr(t37v, t85h, \"drt37vt85h\", unknown, fout)\ndr(t37h, t85v, \"drt37ht85v\", unknown, fout)\ndr(t37h, t85h, \"drt37ht85h\", unknown, fout)\ndr(t85v, t85h, \"drt85vt85h\", unknown, fout)\n\nfout.close() \n#\n#----------------------------------------------------------------\nfrom algorithms import *\n\n#tie points -- added to algorithms\n\nfout = open(\"unknown\",\"w\")\n\nfor k in range(0,len(unknown_indices[0])):\n i = unknown_indices[0][k]\n x = all[unknown_indices[0][k]]\n if (x.latitude > 0):\n CT = nasa(t19v[i], t19h[i], t37v[i], tiepts_nh)\n else:\n CT = nasa(t19v[i], t19h[i], t37v[i], tiepts_sh)\n CT=min(1.,CT)\n\n print(\"{:9.4f}\".format(x.longitude), \"{:8.4f}\".format(x.latitude),\n \"{:.2f}\".format(x.land), \n \"{:3d}\".format(x.ice_land), \"{:3d}\".format(x.ice_post),\n \"{:7.2f}\".format(x.ice_distance),\n \" \", \"{:.2f}\".format(x.sst), \"{:.2f}\".format(x.ice_sst),\n \" \", \"{:6.2f}\".format(x.tb[0]),\n \"{:6.2f}\".format(x.tb[1]),\n \"{:6.2f}\".format(x.tb[2]),\n \"{:6.2f}\".format(x.tb[3]),\n \"{:6.2f}\".format(x.tb[4]),\n \"{:6.2f}\".format(x.tb[5]),\n \"{:6.2f}\".format(x.tb[6]),\n CT, CT - x.ice_sst,\n file=fout)\n"
]
| [
[
"numpy.logical_not",
"numpy.ma.logical_or",
"numpy.ma.logical_not",
"numpy.ma.masked_array",
"numpy.ma.logical_and",
"numpy.logical_and",
"numpy.zeros"
]
]
|
whatbeg/DataScienceTools | [
"64398cf97cbe9a8d5bf15c10f8e48cdd34324492"
]
| [
"src/main/plot_log.py"
]
| [
"# ==================================\n# Author: whatbeg (Qiu Hu)\n# Created by: 2017. 5\n# Personal Site: http://whatbeg.com\n# ==================================\n\n\"\"\"Plot Log files on different forms\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef analyse_bigdl(files, title):\n \"\"\"\n analyse log files in files(list), and plot 3 figures for bigdl-form training or converge process.\n the newest (up to 2017. 5. 30) bigdl log is like:\n `2017-05-29 14:26:53 INFO DistriOptimizer$:280 - [Epoch 1 0/32561][Iteration 1][Wall Clock 0.0s] Train 256 in 0.318120388seconds. Throughput is 804.7268 records/second. Loss is 30565.014.`\n `2017-05-29 14:26:58 INFO DistriOptimizer$:568 - [Wall Clock 5.592995018s] Validate model...`\n `2017-05-29 14:26:59 INFO DistriOptimizer$:610 - Top1Accuracy is Accuracy(correct: 11752, count: 16281, accuracy: 0.7218229838462011)`\n `2017-05-29 14:26:59 INFO DistriOptimizer$:610 - Loss is (Loss: 250195.16, count: 2036, Average Loss: 122.885635)`\n\n 3 figures is following:\n Wall clock -- Top1 Accuracy\n Epoch -- Top1 Accuracy\n Wall clock -- Train Loss\n\n :param files: log file list\n :param title: figure title\n :return: None\n \"\"\"\n assert len(files) > 0\n for filename in files:\n wallclock, trainwallclock, top1acc, loss, throughput, testloss = ([], [], [], [], [], [])\n with open(filename, 'r') as f:\n for line in f.readlines():\n if line.count(\"Validate model...\"):\n wallclock.append(line.strip().split(' ')[8][:-2])\n elif line.count(\"Top1Accuracy is\"):\n top1acc.append(line.strip().split(' ')[13][:-1])\n elif line.count(\"records/second. Loss is\"):\n loss.append(line.strip().split(' ')[22][:-1])\n trainwallclock.append(line.strip().split(' ')[11][:-2])\n elif line.count(\"Throughput is\"):\n throughput.append(line.strip().split(' ')[18][:-1])\n elif line.count(\"Loss is (Loss:\"):\n testloss.append(line.strip().split(' ')[14][:-1])\n plt.figure(1)\n plt.title(title)\n plt.ylabel(\"Top1 Accuracy\")\n plt.xlabel(\"Wall Clock (s)\")\n plt.plot(wallclock, top1acc, label=filename[:-4])\n plt.legend(loc=\"lower right\")\n plt.grid()\n plt.figure(2)\n plt.title(title)\n plt.ylabel(\"Top1 Accuracy\")\n plt.xlabel(\"Epoch\")\n plt.plot(range(1, len(top1acc)+1), top1acc, label=filename[:-4])\n plt.legend(loc=\"lower right\")\n plt.grid()\n plt.figure(3)\n plt.title(title)\n plt.ylabel(\"Train Loss\")\n plt.xlabel(\"Wall Clock (s)\")\n plt.plot(trainwallclock, loss, label=filename[:-4])\n plt.legend(loc=\"upper right\")\n plt.grid()\n plt.show()\n\n\ndef analyse_pytorch(files, title):\n \"\"\"\n analyse pytorch like log.\n For example,\n `Train Epoch: 1 [20480/32561 (62%)]\tLoss: 0.598629`\n `Test set: Average loss: 0.0024, Accuracy: 12302/16281 (75.6%)`\n\n :param files: log file list\n :param title: figures title\n :return: None\n \"\"\"\n assert len(files) > 0\n for filename in files:\n testloss, top1acc = [], []\n with open(filename, 'r') as f:\n for line in f.readlines():\n if line.count('Test set: Average loss'):\n testloss.append(float(line.strip().split(' ')[6][:-1]))\n top1acc.append(float(line.strip().split(' ')[9][1:-2]))\n\n plt.figure(1)\n plt.title(title)\n plt.ylabel(\"Top1 Accuracy (%)\")\n plt.xlabel(\"Epoch\")\n plt.plot(range(1, len(top1acc) + 1), top1acc, label=filename[:-4])\n plt.legend(loc=\"lower right\")\n plt.grid()\n plt.figure(2)\n plt.title(title)\n plt.ylabel(\"Test Loss\")\n plt.xlabel(\"Epoch\")\n plt.plot(range(1, len(testloss) + 1), testloss, label=filename[:-4])\n plt.legend(loc=\"upper right\")\n plt.grid()\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
unanan/face-parsing.PyTorch | [
"85e66d30014efdbe5a3440250a310e8281fd6040"
]
| [
"test.py"
]
| [
"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\nfrom model import BiSeNet\n\nimport torch\nimport argparse\nimport os\nimport os.path as osp\nimport numpy as np\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport cv2\n\n\ndef vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='vis_results/parsing_map_on_im.jpg'):\n # Colors for all 20 parts\n part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],\n [255, 0, 85], [255, 0, 170],\n [0, 255, 0], [85, 255, 0], [170, 255, 0],\n [0, 255, 85], [0, 255, 170],\n [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [0, 85, 255], [0, 170, 255],\n [255, 255, 0], [255, 255, 85], [255, 255, 170],\n [255, 0, 255], [255, 85, 255], [255, 170, 255],\n [0, 255, 255], [85, 255, 255], [170, 255, 255]]\n\n im = np.array(im)\n vis_im = im.copy().astype(np.uint8)\n vis_parsing_anno = parsing_anno.copy().astype(np.uint8)\n vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)\n vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255\n\n num_of_class = np.max(vis_parsing_anno)\n\n for pi in range(1, num_of_class + 1):\n index = np.where(vis_parsing_anno == pi)\n vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]\n\n vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)\n # print(vis_parsing_anno_color.shape, vis_im.shape)\n vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)\n\n # Save result or not\n if save_im:\n cv2.imwrite(save_path[:-4] +'.png', vis_parsing_anno)\n cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])\n\n # return vis_im\n\n\ndef evaluate(respth='./res/test_res', dspth='./data', cp='model_final_diss.pth'):\n if not os.path.exists(respth):\n os.makedirs(respth)\n\n n_classes = 19\n net = BiSeNet(n_classes=n_classes)\n net.to(torch.device(\"cuda:0\"))\n net.load_state_dict(torch.load(cp))\n net.eval()\n\n to_tensor = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n with torch.no_grad():\n for image_path in os.listdir(dspth):\n img = cv2.imread(osp.join(dspth, image_path))\n image = cv2.resize(img, (512, 512), cv2.INTER_LINEAR)\n\n # img = Image.open(osp.join(dspth, image_path))\n # image = img.resize((512, 512), Image.BILINEAR)\n # image = np.array(image)[:, np.newaxis]\n # print(image.shape)\n img = to_tensor(image)\n img = torch.unsqueeze(img, 0)\n img = img.to(torch.device(\"cuda:0\"))\n out = net(img)[0]\n parsing = out.squeeze(0).cpu().numpy().argmax(0)\n # print(parsing)\n print(np.unique(parsing))\n\n vis_parsing_maps(image, parsing, stride=1, save_im=True, save_path=osp.join(respth, image_path))\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dspth\", type=str, default=\"/media/face_parsing/images\")\n parser.add_argument(\"--cp\", type=str, default=\"/media/face_parsing/models/face_parsing.pth\")\n parser.add_argument(\"--respth\", type=str, default=\"/media/face_parsing/results\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = arg_parser()\n evaluate(args.respth, args.dspth, args.cp)\n"
]
| [
[
"torch.load",
"numpy.unique",
"torch.unsqueeze",
"numpy.max",
"torch.no_grad",
"torch.device",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
]
|
chapochn/ORN-LN_circuit | [
"42e0ee5e81a8f711564f45e21d3b4e3ce3e02f69"
]
| [
"bin/functions/plotting.py"
]
| [
"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Nikolai M Chapochnikov\n\"\"\"\n\n# #############################################################################\n# ################################# IMPORTS ###################################\n# #############################################################################\nimport numpy as np\nimport pandas as pd\nimport matplotlib\n# import itertools\nimport scipy.linalg as LA\nimport ast\nimport sklearn.decomposition as skd\nimport statsmodels.stats.multitest as smsm # for the multihypothesis testing\nimport matplotlib.pyplot as plt\nimport functions.general as FG\nfrom typing import Tuple\n\n# #############################################################################\n# ########################### GENERAL FUNCTIONS ##############################\n# #############################################################################\n\ndef set_plotting(plot_plots: bool):\n if plot_plots:\n plt.ion()\n else:\n # this weird construction is needed otherwise ioff gets hijacked\n # with\n # plt.ioff()\n f = plt.figure()\n plt.close(f)\n plt.ioff()\n return 0\n\n\ndef save_plot(f: plt.Figure, path: str, cond: bool, **kwargs):\n \"\"\"\n saves the figure only if cond is True, otherwise doesn't do anything\n can take any options that is then transmitted to savefig\n This is basically a simple wrapper for savefig\n \"\"\"\n if cond:\n f.savefig(path, **kwargs)\n plt.close(f)\n\n\n# function for setting back the default style\n# can be useful after messing up with different styles.\ndef set_default_plot_params():\n matplotlib.rcParams.update(matplotlib.rcParamsDefault)\n\n # to get the available styles: plt.style.available\n # to apply one of the available styles:\n # plt.style.use('seaborn-paper')\n\n\ndef unpack_vlim(vlim) -> Tuple[float, float]:\n \"\"\"\n unpack vlim\n Parameters\n ----------\n vlim\n\n Returns\n -------\n\n \"\"\"\n if type(vlim) is not list and vlim is not None:\n (vmin, vmax) = (-vlim, vlim)\n elif vlim is None:\n (vmin, vmax) = (vlim, vlim)\n elif len(vlim) == 2:\n (vmin, vmax) = vlim\n else:\n raise ValueError(f'there is a problem with vlim: {vlim}')\n return vmin, vmax\n\n\ndef set_aspect_ratio(ax, alpha):\n x0, x1 = ax.get_xlim()\n y0, y1 = ax.get_ylim()\n ax.set_aspect((x1-x0)/(y1-y0)/alpha)\n\n\ndef add_x_splits(ax, splits, n, **plot_args):\n for s in splits:\n ax.plot([s-0.5, s-0.5], [-0.5, n-0.5], **plot_args)\n\n\ndef add_y_splits(ax, splits, n, **plot_args):\n for s in splits:\n ax.plot([-0.5, n-0.5], [s-0.5, s-0.5], **plot_args)\n\n\ndef imshow_df(df: pd.DataFrame, ax=None, cb=True, title='', vlim=None,\n cmap=plt.cm.viridis, figsize=None, tight=False,\n splits_x=[], splits_y=[], rot=90, ha='center', lw=1,\n show_lab_x: bool = True, show_lab_y: bool = True,\n cb_frac: float = 0.043,\n cbtitle: str = '', show_values: bool = False):\n \"\"\"\n very general wrapper around imshow to plot dataframes with labels\n splits_x and split_y adds lines to separate different categories in the\n data\n \"\"\"\n if ax is None:\n f, ax = plt.subplots(1, 1, figsize=figsize)\n\n (vmin, vmax) = unpack_vlim(vlim)\n print(f'vmin and vmax in imshow_df: {vmin}, {vmax}')\n cmap.set_bad('black')\n cp = ax.imshow(df, cmap=cmap, vmin=vmin, vmax=vmax, interpolation='none')\n ax.set_title(title)\n\n plt.sca(ax)\n if show_lab_x is True:\n plt.xticks(np.arange(len(df.T)), list(df.columns), rotation=rot, ha=ha)\n ax.tick_params('x', bottom=False, pad=0)\n\n label = ''\n # for a multiindex creates a label from the mi names\n if df.columns.names[0] is not None:\n for i in range(len(df.columns.names)):\n label = label + ', ' + df.columns.names[i]\n label = label[2:]\n ax.set_xlabel(label)\n else:\n plt.xticks([], [])\n\n if show_lab_y is True:\n plt.yticks(np.arange(len(df)), list(df.index))\n ax.tick_params('y', left=False, pad=0)\n\n label = ''\n if df.index.names[0] is not None:\n for i in range(len(df.index.names)):\n label = label + ', ' + df.index.names[i]\n label = label[2:]\n ax.set_ylabel(label)\n else:\n plt.yticks([], [])\n\n add_x_splits(ax, splits_x, len(df.index), c='w', lw=lw)\n add_y_splits(ax, splits_y, len(df.columns), c='w', lw=lw)\n\n if cb is True:\n clb = plt.colorbar(cp, ax=ax, fraction=cb_frac, pad=0.04)\n clb.outline.set_linewidth(0.00)\n clb.ax.tick_params(size=2, direction='in', pad=1.5)\n clb.ax.set_title(cbtitle)\n else:\n clb = None\n\n if tight is True:\n plt.tight_layout()\n\n # removing the borders\n for spine in ax.spines.values():\n spine.set_visible(False)\n\n # printing the actual values inside the squares.\n if show_values:\n for (j, i), label in np.ndenumerate(df):\n ax.text(i, j, int(label), ha='center', va='center',\n size=matplotlib.rcParams['font.size']*0.8**2)\n\n return (plt.gcf(), ax, clb)\n\n\ndef imshow_df2(df, ax, title='', vlim=None, cmap=plt.cm.viridis,\n splits_x=[], splits_y=[], rot=90, ha='center', lw=1,\n show_lab_x=True, show_lab_y=True, show_values=False,\n aspect='equal', splits_c='w', **kwargs):\n \"\"\"\n very general wrapper around imshow to plot dataframes with labels\n splits_x and split_y adds lines to separate different categories in the\n data\n \"\"\"\n\n (vmin, vmax) = unpack_vlim(vlim)\n print(f'vmin and vmax in imshow_df: {vmin}, {vmax}')\n# cmap.set_bad('black')\n cp = ax.imshow(df, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect,\n **kwargs)\n ax.set_title(title)\n\n plt.sca(ax)\n if show_lab_x is True:\n plt.xticks(np.arange(len(df.T)), list(df.columns), rotation=rot, ha=ha)\n ax.tick_params('x', bottom=False, pad=-1)\n\n label = ''\n # for a multiindex creates a label from the mi names\n if df.columns.names[0] is not None:\n for i in range(len(df.columns.names)):\n label = label + ', ' + df.columns.names[i]\n label = label[2:]\n ax.set_xlabel(label)\n else:\n plt.xticks([], [])\n\n if show_lab_y is True:\n plt.yticks(np.arange(len(df)), list(df.index))\n ax.tick_params('y', left=False, pad=-1)\n\n label = ''\n if df.index.names[0] is not None:\n for i in range(len(df.index.names)):\n label = label + ', ' + df.index.names[i]\n label = label[2:]\n ax.set_ylabel(label)\n else:\n plt.yticks([], [])\n\n add_x_splits(ax, splits_x, df.shape[0], c=splits_c, lw=lw)\n add_y_splits(ax, splits_y, df.shape[1], c=splits_c, lw=lw)\n\n # removing the borders\n for spine in ax.spines.values():\n spine.set_visible(False)\n\n # printing the actual values inside the squares.\n if show_values:\n for (j, i), label in np.ndenumerate(df):\n ax.text(i, j, int(label), ha='center', va='center',\n size=matplotlib.rcParams['font.size']*0.8**2)\n\n return cp\n\n\ndef add_colorbar(cp, ax, cbtitle='', ticks=[], pad=None, extend='neither'):\n \"\"\"\n add a colorbar at the location specified by ax and for the plot referenced\n by cp. The pad is for the title position of the colorbar\n \"\"\"\n clb = plt.colorbar(cp, cax=ax, extend=extend)\n clb.outline.set_linewidth(0.00)\n clb.ax.tick_params(size=2, direction='in', pad=1.5)\n clb.ax.set_title(cbtitle, pad=pad)\n clb.set_ticks(ticks)\n return clb\n\n\ndef calc_fs_ax_df(df, pads, gw=None, gh=None, sq=None):\n \"\"\"\n calculates the size of the figure based on the requirement on the padding\n and of the size of the data\n \"\"\"\n l, r, b, t = pads\n h_n, w_n = df.shape\n if gw is not None:\n sq = gw/w_n\n gh = sq*h_n\n elif gh is not None:\n sq = gh/h_n\n gw = sq*w_n\n elif sq is not None:\n gh = sq*h_n\n gw = sq*w_n\n else:\n raise ValueError(f'gw or gh or sq needed, got {gw}, {gh}, {sq}')\n fs = (l + r + gw, b + t + gh)\n axes = [l/fs[0], b/fs[1], 1 - (l+r)/fs[0], 1 - (b+t)/fs[1]]\n return sq, fs, axes\n\n\ndef calc_fs_ax(pads, gw, gh):\n \"\"\"\n calculates the size of the figure based on the requirement on the padding\n and of the size of graph we want\n gw is the graph width\n gh is the graph height\n \"\"\"\n l, r, b, t = pads\n fs = (l + r + gw, b + t + gh)\n axes = [l/fs[0], b/fs[1], 1 - (l+r)/fs[0], 1 - (b+t)/fs[1]]\n return fs, axes\n\n\ndef calc_fs_ax_2plts(df1, df2, pads, d_h, sq, cb_dx, cb_w):\n \"\"\"\n if one want to plot 2 plots on top of each other\n pads are for the both plots together\n d_h is the distance between the 2 plots in inches\n sq is the size of the square in inches\n cb_dx is the distance from the plot to the colorbar\n cb_w is the width of the colorbar\n \"\"\"\n pads1 = pads.copy() # for the top plot, changing the bottom padding\n pads1[2] = pads[2] + d_h + sq*len(df2.index)\n\n pads2 = pads.copy() # for the bottom plot, changing the top padding\n pads2[3] = pads[3] + d_h + sq*len(df1.index)\n\n _, fs, axs1 = calc_fs_ax_df(df1, pads1, sq=sq)\n _, _, axs2 = calc_fs_ax_df(df2, pads2, sq=sq)\n # ax : [x, y, dx, dy]\n\n cb_x = axs1[0] + axs1[2] + cb_dx/fs[0] # x position of colorbar\n # corresponds to x + dx + bd_dx\n axs_cb = [cb_x, axs2[1], cb_w/fs[0], axs1[3] + axs2[3] + d_h/fs[1]]\n # [x_position, y_position,\n return fs, axs1, axs2, axs_cb\n\n\ndef calc_fs_ax_2plts_side(df1, df2, pads, d_x, sq, cb_dx, cb_w):\n \"\"\"\n if one want to plot 2 plots side to side of each other\n pads are for the both plots together\n d_x is the distance between the 2 plots in inches\n sq is the size of the square in inches\n cb_dx is the distance from the plot to the colorbar\n cb_w is the width of the colorbar\n \"\"\"\n pads1 = pads.copy() # for the left plot\n pads1[1] = pads[1] + d_x + sq*len(df2.columns)\n\n pads2 = pads.copy() # for the right plot\n pads2[0] = pads[0] + d_x + sq*len(df1.columns)\n\n _, fs, axs1 = calc_fs_ax_df(df1, pads1, sq=sq)\n _, _, axs2 = calc_fs_ax_df(df2, pads2, sq=sq)\n\n cb_x = axs2[0] + axs2[2] + cb_dx/fs[0]\n axs_cb = [cb_x, axs2[1], cb_w/fs[0], axs2[3]]\n\n return fs, axs1, axs2, axs_cb\n\n\ndef add_pca_line(x, y, scale1, scale2, ax):\n \"\"\"\n this function adds (usually to a scatter plot) a line which is the\n principal direction of the dataset in these 2 dimensions\n the scale is a parameter setting how extended shold be the line from\n the center of the cloud\n \"\"\"\n x1 = x - x.mean()\n x_l2 = LA.norm(x1)\n x2 = x1/x_l2\n\n y1 = y - y.mean()\n y_l2 = LA.norm(y1)\n y2 = y1/y_l2\n\n X = np.array([x2, y2]).T\n pca = skd.PCA(n_components=1)\n pca.fit(X)\n m = [x.mean(), y.mean()]\n v = pca.components_\n v = v * np.array([x_l2, y_l2])\n ax.plot([m[0] - scale1*v[0, 0], m[0] + scale2*v[0, 0]],\n [m[1] - scale1*v[0, 1], m[1] + scale2*v[0, 1]], '--', c='gray')\n\n\n# #### FUNCTION USED IN GRANT AND PRESENTATION PLOTS ########################\n# one idea would be that these 2 functions would be part of plotting class\n# that has as parameters different font sizes and other adjustments\n# so that they are fixed onces for all then can be references from\n# within the functino\n\n\ndef plot_scatter(ax, data1, data2, lblx, lbly, c1='k', c2='k',\n xticks=None, yticks=None, pvalue=None,\n pca_line_scale1=0.8, pca_line_scale2=0.8, show_cc=True,\n **kwargs):\n \"\"\"\n this function is used to plot article quality figure of a scatter plot\n we need to provide 2 datasets, the labels, the colors, and the ticks\n on the y axis, which was historically used for the vector describing\n activity\n \"\"\"\n # to make things more modulable, the adjustments should be done outside\n # of the functino\n # adj_l = 0.15\n # adj_r = 0.85\n # adj_b = 0.2\n # adj_t = 0.9\n\n corr_coef = np.corrcoef(data1, data2)[0, 1]\n ax.scatter(data1, data2, **kwargs)\n # here we are adding a line showing the pca directino of the data-set\n add_pca_line(data1, data2, pca_line_scale1, pca_line_scale2, ax)\n # ax.set_xlim(0, None) # in the csae of PCA it can go below 0\n # ax.set_ylim(0, None)\n # set_aspect_ratio(ax, 1)\n ax.set_xlabel(lblx, color=c1) # , fontsize=ft_s_lb)\n ax.set_ylabel(lbly, color=c2) # , fontsize=ft_s_lb)\n\n if yticks is not None:\n ax.set_yticks(yticks)\n if xticks is not None:\n ax.set_xticks(xticks)\n\n ax.tick_params('x', colors=c1, which='both', pad=1) # , labelsize=ft_s_tk)\n ax.tick_params('y', colors=c2, which='both', pad=1) # , labelsize=ft_s_tk)\n\n ax.spines['bottom'].set_edgecolor(c1)\n ax.spines['left'].set_edgecolor(c2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n # plt.subplots_adjust(left=adj_l, right=adj_r, bottom=adj_b, top=adj_t)\n if show_cc:\n ax.text(0.65, 0.11, r'$r$' + \" = %0.2f\" % corr_coef, transform=ax.transAxes)\n if pvalue is not None:\n ax.text(0.65, 0.03, \"pv = %0.0e\" % pvalue, transform=ax.transAxes)\n\n# =============================================================================\n# regr = skllin.LinearRegression()\n# regr.fit(data1.reshape(-1, 1), data2)\n# print(regr.coef_)\n# d1min = 0\n# d1max = np.max(data1)*2\n# ax.plot([d1min, d1max], regr.predict([[d1min], [d1max]]), '--', c='gray')\n# =============================================================================\n # ax.plot()\n\n\ndef plot_line_2yax(ax, data1, data2, lbl1, lbl2, cell_list, ct,\n c1='k', c2='k', m1=',', m2=',',\n LS='-', ttl='', rot_y=0):\n \"\"\"\n plot activity components and a connectivity with a line plot,\n where the 2 sides have a different scaling\n m1 and m2 are markers\n Now the function is tuned to for connectivity and activity, but if it will\n be used wider, it can definitely be generalized\n LS is an option related to the linetype, if we want lines or not between\n points\n Parameters\n ----------\n ct: x label\n\n \"\"\"\n ttl_y = 1.17\n\n # ideally one should scale the 2 datasets a bit better so that\n # mean1 = np.mean(data1)\n # mean2 = np.mean(data2)\n ln1 = ax.plot(data1, c=c1, label=lbl1, ls=LS, marker=m1)\n\n # ax.set_title('activity and connectivity', fontsize=ft_s_lb)\n ax.text(0.5, ttl_y, ttl,\n horizontalalignment='center', verticalalignment='center',\n transform=ax.transAxes)\n\n # ax.set_ylim(0, None) # in the case of PCA it can be below 0\n ax.set_xlabel(ct)\n ax.set_ylabel('activity', color=c1)\n\n ax.tick_params('y', colors=c1, which='both', pad=1, rotation=rot_y)\n # ax.set_yticks([0.0, 0.5, 1.0])\n ax.set_xticks(np.arange(len(cell_list)))\n ax.set_xticklabels(cell_list)\n ax.tick_params(axis='x', which='both', bottom=False, pad=-1,\n labelrotation=90)\n\n ax2 = ax.twinx()\n ln2 = ax2.plot(data2, c=c2, label=lbl2, ls=LS, marker=m2)\n ax2.set_ylim(0, None)\n ax2.set_ylabel('number of synapses', color=c2)\n ax2.tick_params('y', colors=c2, which='both', pad=1, rotation=rot_y)\n # y_max = data2.max() // 10 * 10\n # ytcks = [0, int(y_max/2), int(y_max)]\n # ax2.set_yticks(ytcks)\n\n # adjusting the borders\n ax2.spines['left'].set_edgecolor(c1)\n ax2.spines['right'].set_edgecolor(c2)\n ax.spines['top'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax.xaxis.grid()\n\n # legend, if one puts ax instead of ax, then the legend will not be on the\n # top layer\n lns = ln1 + ln2\n labs = [l.get_label() for l in lns]\n # ax2.legend(lns, labs, loc=4, prop={'size': ft_s_tk})\n # adding a legend if lbl1 and lbl2 are not None\n if (lbl1 is not None) and (lbl2 is not None):\n leg = ax2.legend(lns, labs, ncol=3, loc=10,\n bbox_to_anchor=(0., 1.01, 1., .1), frameon=False)\n leg.get_frame().set_linewidth(0.0)\n return ax, ax2, lns\n\n\ndef plot_double_series(data1, data2, col1, col2, ylab1, ylab2,\n ylim1=None, ylim2=None, figsize=(9, 2.5)):\n \"\"\"\n the weird order of ax and ax2 comes from the fact i want the corr points to\n be above the signif points\n used for plotting of p-values and mad diff of cdf\n \"\"\"\n\n f, ax = plt.subplots(1, 1, figsize=figsize)\n ax2 = ax.twinx()\n ax2.yaxis.set_label_position(\"left\")\n ax2.yaxis.tick_left()\n ax2.plot([-1, len(data1)], [0, 0], c='gray', lw=0.5)\n ax2.plot(data1, ls='None', marker='.', markersize=5)\n # ax.set_zorder(2)\n ax2.set_ylabel(ylab1, color=col1)\n ax2.spines['left'].set_edgecolor(col1)\n ax2.tick_params('y', colors=col1)\n ax2.set_xlim(-0.7, len(data1) - 1 + 0.7)\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.set_ylim(ylim1)\n\n ax.plot(data2, ls='None', marker=\"+\", c=col2, markersize=5)\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.tick_right()\n ax.set_ylabel(ylab2, color=col2)\n ax.spines['right'].set_edgecolor(col2)\n ax.tick_params('y', colors=col2)\n ax.spines['top'].set_visible(False)\n sign = -np.log10(0.05)\n ax.plot([-0.5, len(data2)-0.5], [sign, sign], c=col2, lw=0.5)\n ax.xaxis.grid()\n ax.set_ylim(ylim2)\n\n ax.set_xticklabels(data1.index, rotation=90)\n\n plt.tight_layout()\n return f, (ax2, ax)\n\n\ndef plot_double_series_unevenX(ax, x, data1, data2, col1, col2, ylab1, ylab2,\n ylim1=None, ylim2=None):\n \"\"\"\n the weird order of ax and ax2 comes from the fact i want the corr points to\n be above the signif points\n \"\"\"\n if not data1.index.equals(data2.index):\n raise ValueError('data1 and data2 should have the same index')\n\n ax2 = ax.twinx()\n ax2.yaxis.set_label_position(\"left\")\n ax2.yaxis.tick_left()\n ax2.plot([-1, np.max(x) + 1], [0, 0], c='gray', lw=0.5)\n ax2.plot(x, data1.values, ls='None', marker='.', markersize=5, c=col1)\n # ax.set_zorder(2)\n\n ax2.spines['left'].set_edgecolor(col1)\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n\n ax2.set_ylabel(ylab1, color=col1)\n ax2.tick_params('y', colors=col1)\n ax2.set_ylim(ylim1)\n\n ax2.set_xlim(-0.7, np.max(x) + 0.7)\n\n # ax\n ax.plot(x, data2.values, ls='None', marker=\"+\", c=col2, markersize=5)\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.tick_right()\n ax.set_ylabel(ylab2, color=col2)\n ax.spines['right'].set_edgecolor(col2)\n ax.tick_params('y', colors=col2)\n ax.spines['top'].set_visible(False)\n sign = -np.log10(0.05)\n ax.plot([-1, np.max(x) + 1], [sign, sign], c=col2, lw=0.5)\n ax.xaxis.grid()\n ax.set_ylim(ylim2)\n\n ax.set_xticks(x)\n ax.set_xticklabels(data1.index, rotation=90)\n ax.tick_params(axis='x', which='both', bottom=False, pad=-1)\n\n return ax2, ax\n\n\ndef plot_pdf_cdf(bins_pdf, pdf_true, pdf_mean, pdf_std,\n bins_cdf, cdf_true, cdf_mean, cdf_std):\n f, axx = plt.subplots(1, 3, figsize=(15, 4))\n ax = axx[0]\n ax.step(bins_pdf, pdf_mean, where='post', lw=3)\n ax.fill_between(bins_pdf, pdf_mean - pdf_std, pdf_mean + pdf_std,\n facecolor='grey', step='post')\n ax.step(bins_pdf, pdf_true, where='post')\n\n ax = axx[1]\n ax.step(bins_cdf, cdf_mean, where='post', lw=3)\n ax.fill_between(bins_cdf, cdf_mean - cdf_std, cdf_mean + cdf_std,\n facecolor='grey', step='post')\n ax.step(bins_cdf, cdf_true, where='post')\n\n ax = axx[2]\n ax.step(bins_cdf, cdf_true - cdf_mean, where='post', lw=3, c='C1')\n ax.fill_between(bins_cdf, - cdf_std, + cdf_std,\n facecolor='grey', step='post')\n # ax.step(bins_cdf, cdf_true, where='post')\n return f, axx\n\n\ndef pvalue2stars(v, init: str = \"\", sign: str = \"*\"):\n stars = np.full(len(v), init, dtype=object)\n for i in range(len(v)):\n if v[i]:\n stars[i] = sign\n return stars\n\n\n# this is actually a plotting function...\ndef add_sign_stars(ax, pvals, alpha, x, y, sign):\n \"\"\"\n computes the significance with a certain alpha and puts the stars\n at the position x and y\n sign is the mark used for the siginifiance indication, for example\n a \"*\" or \"**\" or whatever one wants.\n \"\"\"\n reject, _, _, _ = smsm.multipletests(pvals, method='fdr_bh', alpha=alpha)\n sign_stars = pvalue2stars(reject, sign=sign)\n for i in range(len(x)):\n ax.text(x[i], y, sign_stars[i], horizontalalignment='center')\n\n\n# #############################################################################\n# ########################## PLOTTING ACTIVITY DATA #########################\n# #############################################################################\n\n\n# not sure if this is used anywhere\ndef plot_conVSact_scatter(d1, d2, lab1='activity principal vector, ctr, norm',\n lab2='connectivity, ctr, norm'):\n \"\"\"\n\n Parameters\n ----------\n d1\n d2\n lab1\n lab2\n\n Returns\n -------\n\n \"\"\"\n # Making sure that the 2 datasets are alinged\n cell_srt = d1.index\n d2 = d2.loc[cell_srt]\n\n f, ax = plt.subplots(1, 1)\n d0 = pd.concat([d1, d1], axis=1)\n corr = d1.values @ d2.values\n corr_str = \"{:.2f}\".format(corr)\n\n v_max = np.max(np.abs(d0.values))*1.1\n ax.set_xlim(-v_max, v_max)\n ax.set_ylim(-v_max, v_max)\n ax.plot([-v_max, v_max], [0, 0], c='k')\n ax.plot([0, 0], [-v_max, v_max], c='k')\n ax.scatter(d1, d2)\n for i, txt in enumerate(list(d1.index)):\n ax.annotate(txt, (d1.iloc[i], d2.iloc[i]))\n ax.set_xlabel(lab1)\n ax.set_ylabel(lab2)\n ax.set_aspect('equal')\n ax.grid()\n\n plt.text(0.05, 0.95, 'corr = ' + corr_str, horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes,\n color='r')\n return None\n\n\ndef _update_annotations(ax, corr_txt, annotations, curve):\n (d1, d2) = curve.get_data()\n corr = d1.dot(d2)\n corr_str = \"{:.2f}\".format(corr)\n corr_txt.set_text('corr = ' + corr_str)\n for i, cell_label in enumerate(list(d1.index)):\n if annotations[i] != 0:\n annotations[i].remove()\n annotations[i] = ax.annotate(cell_label, (d1.iloc[i], d2.iloc[i]))\n\n\ndef plot_scatter_radio(df1, df2, title='', xlab='', ylab='', vlim=1):\n \"\"\"\n\n Parameters\n ----------\n df1\n df2\n title\n xlab\n ylab\n vlim\n\n Returns\n -------\n\n \"\"\"\n # Making sure that the 2 datasets are alinged\n vmin, vmax = unpack_vlim(vlim)\n (df1, df2) = FG.align_indices(df1, df2)\n\n df1_multi = isinstance(df1.columns, pd.core.index.MultiIndex)\n df2_multi = isinstance(df2.columns, pd.core.index.MultiIndex)\n\n def update_curve(label, df, df_multi, x):\n if df_multi:\n d_new = df[ast.literal_eval(label)]\n else:\n d_new = df[label]\n if x:\n curve.set_xdata(d_new)\n else:\n curve.set_ydata(d_new)\n plt.draw()\n _update_annotations(ax, corr_txt, annotations, curve)\n\n names1 = list(df1.columns)\n names2 = list(df2.columns)\n\n f, ax = plt.subplots(1, 1, figsize=(16, 16))\n plt.subplots_adjust(left=0.25, right=0.75)\n\n axcol = 'lightgoldenrodyellow'\n rax1 = plt.axes([-0.05, 0.05, 0.5, 0.9],\n facecolor=axcol, frameon=False)\n radio1 = matplotlib.widgets.RadioButtons(rax1, names1)\n for circle in radio1.circles: # adjusting radius. The default is 0.05\n circle.set_radius(0.01)\n\n rax2 = plt.axes([0.75, 0.05, 0.5, 0.9],\n facecolor=axcol, frameon=False)\n radio2 = matplotlib.widgets.RadioButtons(rax2, names2)\n for circle in radio2.circles: # adjusting radius. The default is 0.05\n circle.set_radius(0.01)\n\n d1 = df1[names1[0]]\n d2 = df2[names2[0]]\n if vmax is None:\n vmax = max(np.max(np.abs(df1.values)), np.max(np.abs(df2.values)))\n vmin = -vmax\n # d0 = pd.concat([d1, d1], axis=1)\n # v_max = np.max(np.abs(d0.values))*1.1\n ax.set_xlim(vmin, vmax)\n ax.set_ylim(vmin, vmax)\n ax.plot([vmin, vmax], [0, 0], c='k')\n ax.plot([0, 0], [vmin, vmax], c='k')\n ax.set_aspect('equal')\n ax.grid()\n ax.set_xlabel(xlab)\n ax.set_ylabel(ylab)\n ax.set_title(title)\n\n corr_txt = plt.text(0.05, 0.95, '', horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes,\n color='r')\n\n curve, = ax.plot(d1, d2, '.', markersize=10)\n curve.set_data(d1, d2) # this formulation keeps the pandas information\n annotations = [0]*len(d1.index)\n _update_annotations(ax, corr_txt, annotations, curve)\n\n radio1.on_clicked(lambda x: update_curve(x, df1, df1_multi, True))\n radio2.on_clicked(lambda x: update_curve(x, df2, df2_multi, False))\n\n return radio1, radio2\n\n\ndef plot_cov(data: pd.DataFrame, cell_sort=None, norm: bool = False,\n title: str = '') -> (plt.Figure, plt.Axes, plt.colorbar):\n \"\"\"\n plots the covariance of the input data\n\n\n Parameters\n ----------\n data\n input data\n cell_sort\n most probably in list or np.array format\n norm\n whether or not to divide by the number of samples\n title\n plot title\n\n Returns\n -------\n f\n reference to the figure\n ax\n reference to the axis\n clb\n reference to the colorbar\n \"\"\"\n\n cov = data.T.dot(data)\n if norm is True:\n cov /= len(data)\n if cell_sort is not None:\n cov = cov.loc[cell_sort, cell_sort]\n f, ax, clb = imshow_df(cov, vlim=np.max(cov.values),\n cmap=plt.cm.bwr, title=title, tight=True)\n return f, ax, clb\n\n"
]
| [
[
"matplotlib.pyplot.axes",
"numpy.max",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gcf",
"scipy.linalg.norm",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.close",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"pandas.concat",
"numpy.log10",
"matplotlib.rcParams.update",
"numpy.corrcoef",
"numpy.ndenumerate",
"matplotlib.pyplot.ion",
"numpy.array",
"matplotlib.widgets.RadioButtons",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xticks",
"numpy.abs",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.yticks"
]
]
|
ikim-quantum/er_noise | [
"1e577ff75f8d3141116e37e11d22b516a9e38a24"
]
| [
"pcc/spacetime_1d.py"
]
| [
"from D1 import width_pcc_dmera_1d, depth_pcc_dmera_1d, width_pcc_dmera_1d_nocompression\nimport numpy as np\n\n\nn=8\nD=5\nwidths = []\nwidths_nc = []\ndepths = []\nvols = []\n\nfor x in range(2**n):\n print(\"x={}/{}\".format(x+1, 2**n))\n supp = [x, (x+1)%(2**n)]\n width = width_pcc_dmera_1d(n,D, supp)\n width_nc = width_pcc_dmera_1d_nocompression(n, D, supp)\n depth = depth_pcc_dmera_1d(n,D, supp)\n widths.append(width)\n widths_nc.append(width_nc)\n depths.append(depth)\n vols.append(width*depth)\n\nprint(\"max width (no compression)={}\".format(max(widths_nc)))\nprint(\"average width (no compression)={}\".format(np.mean(widths_nc)))\nprint(\"Standard deviation={}\".format(np.std(widths_nc)))\n \nprint(\"max width={}\".format(max(widths)))\nprint(\"average width={}\".format(np.mean(widths)))\nprint(\"Standard deviation={}\".format(np.std(widths)))\n\nprint(\"max depth={}\".format(max(depths)))\nprint(\"average depth={}\".format(np.mean(depths)))\nprint(\"Standard deviation={}\".format(np.std(depths)))\n\nprint(\"max vol={}\".format(max(vols)))\nprint(\"average vol={}\".format(np.mean(vols)))\nprint(\"Standard deviation={}\".format(np.std(vols)))\n"
]
| [
[
"numpy.std",
"numpy.mean"
]
]
|
prachigaikwad201994/https-github.com-prachi12345-MIRNet | [
"c6812f5bc4ac87e4e63af21aa4e0db84597a17c8"
]
| [
"mirnet/model/dual_attention_unit/attention_blocks.py"
]
| [
"import tensorflow as tf\n\n\ndef spatial_attention_block(input_tensor):\n \"\"\"Spatial Attention Block\"\"\"\n average_pooling = tf.reduce_max(input_tensor, axis=-1)\n average_pooling = tf.expand_dims(average_pooling, axis=-1)\n max_pooling = tf.reduce_mean(input_tensor, axis=-1)\n max_pooling = tf.expand_dims(max_pooling, axis=-1)\n concatenated = tf.keras.layers.Concatenate(axis=-1)([average_pooling, max_pooling])\n feature_map = tf.keras.layers.Conv2D(1, kernel_size=(1, 1))(concatenated)\n feature_map = tf.nn.sigmoid(feature_map)\n return input_tensor * feature_map\n\n\ndef channel_attention_block(input_tensor):\n \"\"\"Channel Attention Block\"\"\"\n channels = list(input_tensor.shape)[-1]\n average_pooling = tf.keras.layers.GlobalAveragePooling2D()(input_tensor)\n feature_descriptor = tf.reshape(average_pooling, shape=(-1, 1, 1, channels))\n feature_activations = tf.keras.layers.ReLU()(\n tf.keras.layers.Conv2D(\n filters=channels // 8, kernel_size=(1, 1)\n )(feature_descriptor)\n )\n feature_activations = tf.nn.sigmoid(\n tf.keras.layers.Conv2D(\n filters=channels, kernel_size=(1, 1)\n )(feature_activations)\n )\n return input_tensor * feature_activations\n"
]
| [
[
"tensorflow.keras.layers.Concatenate",
"tensorflow.reduce_max",
"tensorflow.nn.sigmoid",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.ReLU",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.expand_dims"
]
]
|
PepperBurst/DSP | [
"5a078c494a53bf22dc387a532acc5151b13d0fa3"
]
| [
"Scripts/lab3b/Main.py"
]
| [
"from aSurnameFilters import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n#Plot of all graphs\n#There will be a total of 6 plots\ndef plotlab3b(code, date):\n n = np.arange(-10, 10 + 1)\n x = 1*(n==0)\n y = [s1(x), s2(x), s2(s1(x)), s1(s2(x))]\n # we are using object oriented programming\n # in using the figure plots in\n # matplot matplotlib\n title = 'Figure 3312a\\n'\n title += 'Impulse response of s1(n)\\n'\n title += code + ' ' + date\n f, axarr = plt.subplots(2)\n f.suptitle(title)\n axarr[0].stem(n, x)\n axarr[1].stem(n, y[0])\n axarr[0].grid()\n axarr[1].grid()\n axarr[1].set_xlabel('n')\n axarr[0].set_ylabel('x(n)')\n axarr[1].set_ylabel('s1(n)')\n title = title.replace('3312a', '3312b', 1)\n title = title.replace('s1(n)', 's2(n)', 1)\n f, axarr = plt.subplots(2)\n f.suptitle(title)\n axarr[0].stem(n, x)\n axarr[1].stem(n, y[1])\n axarr[0].grid()\n axarr[1].grid()\n axarr[1].set_xlabel('n')\n axarr[0].set_ylabel('xa(n)')\n axarr[1].set_ylabel('inte(xa(n))')\n title = title.replace('3312b', '3312c', 1)\n title = title.replace('s2(n)', 's2(s1(n))')\n f, axarr = plt.subplots(2)\n f.suptitle(title)\n axarr[0].stem(n, x)\n axarr[1].stem(n, y[2])\n axarr[0].grid()\n axarr[1].grid()\n axarr[1].set_xlabel('n')\n axarr[0].set_ylabel('xb(n)')\n axarr[1].set_ylabel('diff(xb(n))')\n title = title.replace('3312c', '3312d', 1)\n title = title.replace('s2(s1(n))', 's1(s2(n))')\n f, axarr = plt.subplots(2)\n f.suptitle(title)\n axarr[0].stem(n, x)\n axarr[1].stem(n, y[3])\n axarr[0].grid()\n axarr[1].grid()\n axarr[1].set_xlabel('n')\n axarr[0].set_ylabel('xb(n)')\n axarr[1].set_ylabel('inte(xb(n))')\n n = np.arange(0, 128 + 1)\n x = 1*(n==0)\n title = title.replace('3312d', '351', 1)\n title = title.replace('s1(s2(n))', '\\ny(n) + 0.8y(n − 2) = 0.2x(n) + 0.4x(n − 1) + 0.2x(n − 2)')\n f, axarr = plt.subplots(2)\n f.suptitle(title)\n axarr[0].stem(n, x)\n axarr[1].stem(n, fil351(x))\n axarr[0].grid()\n axarr[1].grid()\n axarr[1].set_xlabel('n')\n axarr[0].set_ylabel('x(n)')\n axarr[1].set_ylabel('y(n)')\n n = np.arange(-10, 100 + 1)\n x = 1*(n==0)\n title = title.replace('351', '352', 1)\n title = title.replace('y(n) − 1.8 cos(π/16)y(n − 1) + 0.81y(n − 2) = x(n) + 1/2x(n − 1)',\n 'y(n) + 0.8y(n − 2) = 0.2x(n) + 0.4x(n − 1) + 0.2x(n − 2)')\n f, axarr = plt.subplots(2)\n f.suptitle(title)\n axarr[0].stem(n, x)\n axarr[1].stem(n, fil352(x))\n axarr[0].grid()\n axarr[1].grid()\n axarr[1].set_xlabel('n')\n axarr[0].set_ylabel('x(n)')\n axarr[1].set_ylabel('y(n)')\n plt.show()\n\nif __name__ == '__main__':\n# edit your code and date here\n code = 'RJTRAMOS'\n date = '1/16/2018'\n plotlab3b(code, date)\n path = 'music.wav'\n musicFilter(path)\n"
]
| [
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
]
|
shineyruan/incubator-mxnet | [
"5964d519fb1b178cd7d4e532eaf208605dd68095"
]
| [
"python/mxnet/numpy/multiarray.py"
]
| [
"#!/usr/bin/env python\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=too-many-lines, unused-argument\n\"\"\"numpy ndarray and util functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\ntry:\n from __builtin__ import slice as py_slice\nexcept ImportError:\n from builtins import slice as py_slice\n\nfrom array import array as native_array\nimport ctypes\nimport warnings\nimport numpy as _np\nfrom ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP\nfrom ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\\\n get_oshape_of_gather_nd_op\nfrom ..ndarray._internal import _set_np_ndarray_class\nfrom . import _op as _mx_np_op\nfrom ..base import check_call, _LIB, NDArrayHandle, c_array\nfrom ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types\nfrom ..context import Context\nfrom ..util import _sanity_check_params, set_module, wrap_np_unary_func, wrap_np_binary_func\nfrom ..context import current_context\nfrom ..ndarray import numpy as _mx_nd_np\nfrom ..ndarray.numpy import _internal as _npi\nfrom ..ndarray.ndarray import _storage_type\n\n__all__ = ['ndarray', 'empty', 'array', 'shape', 'zeros', 'ones', 'full', 'add', 'subtract', 'multiply', 'divide',\n 'mod', 'remainder', 'power', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10',\n 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',\n 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative',\n 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append',\n 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange',\n 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum',\n 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',\n 'hanning', 'hamming', 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'bitwise_xor', 'bitwise_or',\n 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',\n 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum',\n 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory', 'diff', 'resize', 'nan_to_num', 'where']\n\n# Return code for dispatching indexing function call\n_NDARRAY_UNSUPPORTED_INDEXING = -1\n_NDARRAY_BASIC_INDEXING = 0\n_NDARRAY_ADVANCED_INDEXING = 1\n\n\n# This function is copied from ndarray.py since pylint\n# keeps giving false alarm error of undefined-all-variable\ndef _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): # pylint: disable=redefined-outer-name\n \"\"\"Return a new handle with specified shape and context.\n\n Empty handle is only used to hold results.\n\n Returns\n -------\n handle\n A new empty `ndarray` handle.\n \"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXNDArrayCreateEx(\n c_array_buf(mx_uint, native_array('I', shape)),\n mx_uint(len(shape)),\n ctypes.c_int(ctx.device_typeid),\n ctypes.c_int(ctx.device_id),\n ctypes.c_int(int(delay_alloc)),\n ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),\n ctypes.byref(hdl)))\n return hdl\n\n\ndef _reshape_view(a, *shape): # pylint: disable=redefined-outer-name\n \"\"\"Returns a **view** of this array with a new shape without altering any data.\n\n Parameters\n ----------\n shape : tuple of int, or n ints\n The new shape should not change the array size, namely\n ``np.prod(new_shape)`` should be equal to ``np.prod(a.shape)``.\n Some dimensions of the shape can take special value -1, which\n infers the dimension of the output shape by using the remainder of the\n input dimensions keeping the size of the new array same as that of the input array.\n At most one dimension of shape can be -1.\n\n Returns\n -------\n ndarray\n An array with desired shape that shares data with this array.\n \"\"\"\n if len(shape) == 1 and isinstance(shape[0], (list, tuple)):\n shape = shape[0]\n handle = NDArrayHandle()\n check_call(_LIB.MXNDArrayReshape64(a.handle,\n len(shape),\n c_array(ctypes.c_int64, shape),\n False,\n ctypes.byref(handle)))\n return ndarray(handle=handle, writable=a.writable)\n\n\n# Have to use 0 as default value for stype since pylint does not allow\n# importing _STORAGE_TYPE_DEFAULT from ndarray.py.\ndef _np_ndarray_cls(handle, writable=True, stype=0):\n if stype == -1:\n stype = _storage_type(handle)\n if stype != 0:\n raise ValueError('_np_ndarray_cls currently only supports default storage '\n 'type, while received stype = {}'.format(stype))\n return ndarray(handle, writable=writable)\n\n\n_set_np_ndarray_class(_np_ndarray_cls)\n\n_NUMPY_ARRAY_FUNCTION_DICT = {}\n_NUMPY_ARRAY_UFUNC_DICT = {}\n\n\n@set_module('mxnet.numpy') # pylint: disable=invalid-name\nclass ndarray(NDArray):\n \"\"\"\n ndarray(handle, writable=True):\n\n An array object represents a multidimensional, homogeneous array of fixed-size items.\n An associated data-type object describes the format of each element in the array\n (its byte-order, how many bytes it occupies in memory, whether it is an integer, a\n floating point number, or something else, etc.). Arrays should be constructed using\n `array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.\n\n Arrays should be constructed using `array`, `zeros` or `empty` (refer\n to the See Also section below). The parameters given here refer to\n a low-level method (`ndarray(...)`) for instantiating an array.\n\n For more information, refer to the `mxnet.numpy` module and examine the\n methods and attributes of an array.\n\n Parameters\n ----------\n handle: int\n The ndarray handle in backend (C++).\n writable: bool\n Indicates whether inplace-assignment is allowed for the array.\n\n Attributes\n ----------\n T : ndarray\n Transpose of the array.\n dtype : dtype object\n Describes the format of the elements in the array.\n size : int\n Number of elements in the array.\n ndim : int\n The array's number of dimensions.\n shape : tuple of ints\n Shape of the array.\n\n See Also\n --------\n array : Construct an array.\n zeros : Create an array, each element of which is zero.\n empty : Create an array, but leave its allocated memory unchanged (i.e.,\n it contains \"garbage\").\n \"\"\"\n\n @staticmethod\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument\n \"\"\"\n Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray\n to this function. The operators must comply with the ufunc definition in NumPy.\n The following code is adapted from CuPy.\n \"\"\"\n if 'out' in kwargs:\n # need to unfold tuple argument in kwargs\n out = kwargs['out']\n if len(out) != 1:\n raise ValueError('The `out` parameter must have exactly one ndarray')\n kwargs['out'] = out[0]\n\n if method == '__call__':\n if ufunc.signature is not None:\n # we don't support generalised-ufuncs (gufuncs)\n return NotImplemented\n name = ufunc.__name__\n mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)\n if mx_ufunc is None:\n raise ValueError('mxnet.numpy operator `{}` has not been registered in '\n 'the _NUMPY_ARRAY_UFUNC_LIST. Please make sure you are '\n 'using NumPy >= 1.15.0 and the operator implementation '\n 'is compatible with NumPy. Then add the operator name '\n 'to the list.'\n .format(name))\n return mx_ufunc(*inputs, **kwargs)\n else:\n return NotImplemented\n\n @staticmethod\n def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument\n \"\"\"\n Dispatch official NumPy operators that comply with the array function protocol to\n this function.\n \"\"\"\n mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)\n if mx_np_func is None:\n raise ValueError('mxnet.numpy operator `{}` has not been registered in '\n 'the _NUMPY_ARRAY_FUNCTION_LIST. Please make sure you are '\n 'using NumPy >= 1.17.0 and the operator '\n 'implementation is compatible with NumPy. Then add '\n 'the operator name to the list.'.format(func))\n # Note: this allows subclasses that don't override\n # __array_function__ to handle mxnet.numpy.ndarray objects\n if not all(issubclass(t, ndarray) for t in types):\n return NotImplemented\n return mx_np_func(*args, **kwargs)\n\n def _get_np_basic_indexing(self, key):\n \"\"\"\n This function indexes ``self`` with a tuple of `slice` objects only.\n \"\"\"\n key_nd = tuple(idx for idx in key if idx is not None)\n if len(key_nd) < self.ndim:\n raise RuntimeError(\n 'too few indices after normalization: expected `ndim` ({}) '\n 'but got {}. This is a bug, please report it!'\n ''.format(self.ndim, len(key_nd))\n )\n if len(key_nd) > self.ndim:\n raise IndexError(\n 'too many indices ({}) for array with {} dimensions'\n ''.format(len(key_nd), self.ndim)\n )\n\n none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name\n slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)\n new_axes = self._new_axes_after_basic_indexing(none_axes, key)\n\n # Check bounds for integer axes\n for ax in int_axes: # pylint: disable=invalid-name\n if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:\n raise IndexError(\n 'index {} is out of bounds for axis {} with size {}'\n ''.format(key_nd[ax], ax, self.shape[ax]))\n\n if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):\n # Create a shared-memory view by using low-level flat slicing\n flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(\n slc_key, self.shape\n )\n handle = NDArrayHandle()\n flat_self = self.reshape_view(-1)\n check_call(\n _LIB.MXNDArraySlice(\n flat_self.handle,\n mx_uint(flat_begin),\n mx_uint(flat_end),\n ctypes.byref(handle),\n )\n )\n sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)\n sliced = self.__class__(handle=handle, writable=self.writable)\n if 0 in sliced_shape:\n sliced = sliced.reshape(sliced_shape)\n else:\n sliced = sliced.reshape_view(sliced_shape)\n\n else:\n begin, end, step = self._basic_indexing_key_to_begin_end_step(\n slc_key, self.shape, keep_none=True\n )\n sliced = _npi.slice(self, begin, end, step)\n\n # Reshape to final shape due to integer and `None` entries in `key`.\n final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]\n for ax in new_axes: # pylint: disable=invalid-name\n final_shape.insert(ax, 1)\n\n if sliced.size == 0:\n return sliced.reshape(tuple(final_shape))\n else:\n return sliced.reshape_view(tuple(final_shape))\n\n def _get_np_advanced_indexing(self, key):\n idcs, new_axes = self._get_index_nd(key)\n if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck\n idcs = idcs.as_np_ndarray()\n else:\n idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])\n sliced = _npi.gather_nd(self, idcs)\n # Reshape due to `None` entries in `key`.\n if new_axes:\n final_shape = [sliced.shape[i] for i in range(sliced.ndim)]\n for ax in new_axes: # pylint: disable=invalid-name\n final_shape.insert(ax, 1)\n return sliced.reshape(tuple(final_shape))\n else:\n return sliced\n\n def _set_np_advanced_indexing(self, key, value):\n \"\"\"This function is called by __setitem__ when key is an advanced index.\"\"\"\n idcs, new_axes = self._get_index_nd(key)\n if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck\n idcs = idcs.as_np_ndarray()\n else:\n idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])\n vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)\n value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)\n self._scatter_set_nd(value_nd, idcs)\n\n # pylint: disable=too-many-return-statements\n def __getitem__(self, key):\n \"\"\"Return self[key].\n\n Returns a sliced view of this array if the elements fetched are contiguous in memory;\n otherwise, returns a newly created NDArray.\n This functions supports advanced indexing defined in the following reference with\n some restrictions. Boolean indexing is supported only for a single boolean ndarray\n as a key. Mixing boolean ndarray with other index types is not supported in ``advanced``\n indexing.\n\n For basic indexing, i.e., if ``key`` consists only of integers,\n ``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is\n returned that shares memory with this array if the accessed portion is\n contiguous in memory.\n Otherwise, a newly created ``ndarray`` is returned.\n\n This functions supports advanced indexing as defined in `the NumPy\n advanced indexing documentation\n <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_.\n\n Parameters\n ----------\n key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types\n Indexing key.\n\n Examples\n --------\n The default is to give explicit indices for all axes:\n\n >>> x = np.arange(6).reshape(2, 3)\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> x[0, :2]\n array([0., 1.])\n >>> x[:, :-1]\n array([[0., 1.],\n [3., 4.]])\n\n If fewer indices are given, they are automatically supplemented by an\n appropriate number of ``slice(None)`` (\"``:``\") to the right. For\n instance, a single integer indexes along the first axis:\n\n >>> x[0]\n array([0., 1., 2.])\n >>> x[1:]\n array([[3., 4., 5.]])\n\n To omit a range of axes that should be kept as-is, an `Ellipsis`\n (\"``...``\") can be used:\n\n >>> x = np.arange(16).reshape(2, 2, 2, 2)\n >>> x[0, ..., 1]\n array([[1., 3.],\n [5., 7.]])\n >>> x[0, :, :, 1] # equivalent\n array([[1., 3.],\n [5., 7.]])\n\n New axes of length 1 can be created by inserting ``None``\n (`numpy.newaxis`) in the index:\n\n >>> x = np.arange(6).reshape(2, 3)\n >>> x[None, :, :]\n array([[[0., 1., 2.],\n [3., 4., 5.]]])\n >>> x[None, :, :].shape\n (1, 2, 3)\n\n If the indexed portion of the array is contiguous in memory, no data\n is copied. Instead, a shared-memory view of the original array is\n returned, and changes to that view affect the original array:\n\n >>> x = np.arange(8).reshape(2, 2, 2)\n >>> y = x[0] # contiguous\n >>> y\n array([[0., 1.],\n [2., 3.]])\n >>> y[:] = -1\n >>> x\n array([[[-1., -1.],\n [-1., -1.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> x = np.arange(8).reshape(2, 2, 2)\n >>> y = x[1, :1, :] # contiguous\n >>> y\n array([[4., 5.]])\n >>> y[:] = -1\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[-1., -1.],\n [ 6., 7.]]])\n >>> x = np.arange(0, 8).reshape(2, 2, 2)\n >>> y = x[:, :, 1] # not contiguous\n >>> y\n array([[1., 3.],\n [5., 7.]])\n >>> y[:] = -1\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n\n If the indexing key contains `list`, `numpy.ndarray` or `NDArray`\n objects, advanced indexing is triggered, which always returns a\n copy:\n\n >>> x = np.arange(8).reshape(2, 2, 2)\n >>> x[[0, 1]]\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> x[[0, 1], :] # equivalent\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> y = np.array([0, 1], dtype='int32')\n >>> x[1:, y]\n array([[[4., 5.],\n [6., 7.]]])\n >>> y = np.array([0, 1], dtype='int32')\n >>> x[1:, y]\n array([[[4., 5.],\n [6., 7.]]])\n\n Get negative elements in an ndarray through boolean array indexing\n >>> x = np.array([1., -1., -2., 3])\n >>> x[x < 0]\n array([-1., -2.])\n \"\"\"\n # handling possible boolean indexing first\n ndim = self.ndim\n shape = self.shape # pylint: disable=redefined-outer-name\n\n if isinstance(key, list):\n try:\n new_key = _np.array(key)\n if new_key.dtype == _np.bool_:\n key = new_key\n except Exception as err:\n raise TypeError('{}'.format(str(err)))\n if isinstance(key, _np.ndarray) and key.dtype == _np.bool_:\n key = array(key, dtype='bool', ctx=self.ctx)\n if isinstance(key, ndarray) and key.dtype == _np.bool_: # boolean indexing\n key_shape = key.shape\n key_ndim = len(key_shape)\n if ndim < key_ndim:\n raise IndexError('too many indices, whose ndim = {}, for array with ndim = {}'\n .format(key_ndim, ndim))\n for i in range(key_ndim):\n if key_shape[i] != shape[i]:\n raise IndexError('boolean index did not match indexed array along dimension {};'\n ' dimension is {} but corresponding boolean dimension is {}'\n .format(i, shape[i], key_shape[i]))\n remaining_dims = shape[key_ndim:]\n data = _reshape_view(self, -1, *remaining_dims)\n key = _reshape_view(key, -1)\n return _reshape_view(_npi.boolean_mask(data, key), -1, *remaining_dims)\n\n if ndim == 0:\n if key != ():\n raise IndexError('scalar tensor can only accept `()` as index')\n # Handle simple cases for higher speed\n if isinstance(key, tuple) and len(key) == 0:\n return self\n if isinstance(key, tuple) and len(key) == ndim\\\n and all(isinstance(idx, integer_types) for idx in key):\n out = self\n for idx in key:\n out = out[idx]\n return out\n if isinstance(key, integer_types):\n if key > shape[0] - 1:\n raise IndexError(\n 'index {} is out of bounds for axis 0 with size {}'.format(\n key, shape[0]))\n return self._at(key)\n elif isinstance(key, py_slice):\n if key.step is None or key.step == 1:\n if key.start is not None or key.stop is not None:\n return self._slice(key.start, key.stop)\n else:\n return self\n elif key.step == 0:\n raise ValueError(\"slice step cannot be zero\")\n\n key = indexing_key_expand_implicit_axes(key, self.shape)\n indexing_dispatch_code = get_indexing_dispatch_code(key)\n if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:\n return self._get_np_basic_indexing(key)\n elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:\n return self._get_np_advanced_indexing(key)\n else:\n raise RuntimeError\n\n def __setitem__(self, key, value):\n \"\"\"Sets ``self[key]`` to ``value``.\n\n This functions supports advanced indexing as defined in `the NumPy\n advanced indexing documentation\n <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_,\n with the restriction that boolean array indexing is not supported.\n\n Parameters\n ----------\n key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types\n The indexing key.\n value : scalar or array-like object that can be broadcast to the shape of self[key]\n The value to set.\n\n Examples\n --------\n >>> x = np.zeros((2, 3))\n >>> x[:] = 1\n >>> x\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]])\n >>> x[:, 1:2] = 2\n >>> x\n array([[ 1., 2., 1.],\n [ 1., 2., 1.]])\n >>> x[1:2, 1:] = 3\n >>> x\n array([[ 1., 2., 1.],\n [ 1., 3., 3.]])\n >>> x[1:, 0:2] = np.zeros((1, 2))\n >>> x\n array([[ 1., 2., 1.],\n [ 0., 0., 3.]])\n >>> x[1, 2] = 4\n >>> x\n array([[ 1., 2., 1.],\n [ 0., 0., 4.]])\n >>> x[[0], [1, 2]] = 5\n >>> x\n array([[ 1., 5., 5.],\n [ 0., 0., 4.]])\n >>> x[::-1, 0:2:2] = [6]\n >>> x\n array([[ 6., 5., 5.],\n [ 6., 0., 4.]])\n \"\"\"\n if isinstance(value, NDArray) and not isinstance(value, ndarray):\n raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')\n\n # handle basic and advanced indexing\n if self.ndim == 0:\n if not isinstance(key, tuple) or len(key) != 0:\n raise IndexError('scalar tensor can only accept `()` as index')\n if isinstance(value, numeric_types):\n self._full(value)\n elif isinstance(value, ndarray) and value.size == 1:\n if value.shape != self.shape:\n value = value.reshape(self.shape)\n value.copyto(self)\n elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:\n if isinstance(value, _np.generic) or value.shape != self.shape:\n value = value.reshape(self.shape)\n self._sync_copyfrom(value)\n else:\n raise ValueError('setting an array element with a sequence.')\n else:\n key = indexing_key_expand_implicit_axes(key, self.shape)\n slc_key = tuple(idx for idx in key if idx is not None)\n if len(slc_key) < self.ndim:\n raise RuntimeError(\n 'too few indices after normalization: expected `ndim` ({}) '\n 'but got {}. This is a bug, please report it!'\n ''.format(self.ndim, len(slc_key))\n )\n if len(slc_key) > self.ndim and self.ndim != 0:\n raise IndexError(\n 'too many indices ({}) for array with {} dimensions'\n ''.format(len(slc_key), self.ndim)\n )\n indexing_dispatch_code = get_indexing_dispatch_code(slc_key)\n if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:\n self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class\n elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:\n self._set_np_advanced_indexing(key, value)\n else:\n raise ValueError(\n 'Indexing NDArray with index {} of type {} is not supported'\n ''.format(key, type(key))\n )\n\n def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):\n \"\"\"Return a broadcast `ndarray` with same context and dtype as ``self``.\n For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the\n value_nd is assigned to not yet expanded space in original array.\n `value`: numeric types or array like.\n `bcast_shape`: a shape tuple.\n `squeeze_axes`: a sequence of axes to squeeze in the value array.\n Note: mxnet.numpy.ndarray not support NDArray as assigned value.\n \"\"\"\n if isinstance(value, numeric_types):\n value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype)\n elif isinstance(value, self.__class__):\n value_nd = value.as_in_ctx(self.ctx)\n if value_nd.dtype != self.dtype:\n value_nd = value_nd.astype(self.dtype)\n else:\n try:\n value_nd = array(value, ctx=self.ctx, dtype=self.dtype)\n except:\n raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '\n 'object {} of type {}'.format(value, type(value)))\n\n # For advanced indexing setitem, if there is None in indices, we need to squeeze the\n # assigned value_nd since None is also ignored in slicing the original array.\n if squeeze_axes and value_nd.ndim > len(bcast_shape):\n squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])\n value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))\n\n # handle the cases like the following\n # a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b\n # b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed\n if value_nd.ndim > len(bcast_shape):\n squeeze_axes = []\n for i in range(value_nd.ndim - len(bcast_shape)):\n if value_nd.shape[i] == 1:\n squeeze_axes.append(i)\n else:\n break\n if squeeze_axes:\n value_nd = value_nd.squeeze(squeeze_axes)\n\n if value_nd.shape != bcast_shape:\n if value_nd.size == 0:\n value_nd = value_nd.reshape(bcast_shape)\n else:\n value_nd = value_nd.broadcast_to(bcast_shape)\n return value_nd\n\n def __add__(self, other):\n \"\"\"x.__add__(y) <=> x + y\"\"\"\n return add(self, other)\n\n def __iadd__(self, other):\n \"\"\"x.__iadd__(y) <=> x += y\"\"\"\n if not self.writable:\n raise ValueError('trying to add to a readonly ndarray')\n return add(self, other, out=self)\n\n def __sub__(self, other):\n \"\"\"x.__sub__(y) <=> x - y\"\"\"\n return subtract(self, other)\n\n def __isub__(self, other):\n \"\"\"x.__isub__(y) <=> x -= y\"\"\"\n if not self.writable:\n raise ValueError('trying to subtract from a readonly ndarray')\n return subtract(self, other, out=self)\n\n def __rsub__(self, other):\n \"\"\"x.__rsub__(y) <=> y - x\"\"\"\n return subtract(other, self)\n\n def __mul__(self, other):\n \"\"\"x.__mul__(y) <=> x * y\"\"\"\n return multiply(self, other)\n\n def __neg__(self):\n return self.__mul__(-1.0)\n\n def __imul__(self, other):\n \"\"\"x.__imul__(y) <=> x *= y\"\"\"\n if not self.writable:\n raise ValueError('trying to add to a readonly ndarray')\n return multiply(self, other, out=self)\n\n def __rmul__(self, other):\n \"\"\"x.__rmul__(y) <=> y * x\"\"\"\n return self.__mul__(other)\n\n def __div__(self, other):\n \"\"\"x.__div__(y) <=> x / y\"\"\"\n return divide(self, other)\n\n def __rdiv__(self, other):\n \"\"\"x.__rdiv__(y) <=> y / x\"\"\"\n return divide(other, self)\n\n def __idiv__(self, other):\n \"\"\"x.__idiv__(y) <=> x /= y\"\"\"\n return divide(self, other, out=self)\n\n def __truediv__(self, other):\n \"\"\"x.__truediv__(y) <=> x / y\"\"\"\n return divide(self, other)\n\n def __rtruediv__(self, other):\n \"\"\"x.__rtruediv__(y) <=> y / x\"\"\"\n return divide(other, self)\n\n def __itruediv__(self, other):\n \"\"\"x.__itruediv__(y) <=> x /= y\"\"\"\n return divide(self, other, out=self)\n\n def __mod__(self, other):\n \"\"\"x.__mod__(y) <=> x % y\"\"\"\n return mod(self, other)\n\n def __rmod__(self, other):\n \"\"\"x.__rmod__(y) <=> y % x\"\"\"\n return mod(other, self)\n\n def __imod__(self, other):\n \"\"\"x.__imod__(y) <=> x %= y\"\"\"\n return mod(self, other, out=self)\n\n def __pow__(self, other):\n \"\"\"x.__pow__(y) <=> x ** y\"\"\"\n return power(self, other)\n\n def __rpow__(self, other):\n \"\"\"x.__rpow__(y) <=> y ** x\"\"\"\n return power(other, self)\n\n def __eq__(self, other):\n \"\"\"x.__eq__(y) <=> x == y\"\"\"\n return equal(self, other)\n\n def __hash__(self):\n raise NotImplementedError\n\n def __ne__(self, other):\n \"\"\"x.__ne__(y) <=> x != y\"\"\"\n return not_equal(self, other)\n\n def __gt__(self, other):\n \"\"\"x.__gt__(y) <=> x > y\"\"\"\n return greater(self, other)\n\n def __ge__(self, other):\n \"\"\"x.__ge__(y) <=> x >= y\"\"\"\n return greater_equal(self, other)\n\n def __lt__(self, other):\n \"\"\"x.__lt__(y) <=> x < y\"\"\"\n return less(self, other)\n\n def __le__(self, other):\n \"\"\"x.__le__(y) <=> x <= y\"\"\"\n return less_equal(self, other)\n\n def __bool__(self):\n num_elements = self.size\n if num_elements == 0:\n warnings.simplefilter('default')\n warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'\n ' future this will result in an error.', DeprecationWarning)\n return False\n elif num_elements == 1:\n return bool(self.item())\n else:\n raise ValueError(\"The truth value of an ndarray with multiple elements is ambiguous.\")\n\n __nonzero__ = __bool__\n\n def __float__(self):\n num_elements = self.size\n if num_elements != 1:\n raise TypeError('only size-1 arrays can be converted to Python scalars')\n return float(self.item())\n\n def __int__(self):\n num_elements = self.size\n if num_elements != 1:\n raise TypeError('only size-1 arrays can be converted to Python scalars')\n return int(self.item())\n\n def __len__(self):\n \"\"\"Number of elements along the first axis.\"\"\"\n shape = self.shape # pylint: disable=redefined-outer-name\n if len(shape) == 0:\n raise TypeError('len() of unsized object')\n return self.shape[0]\n\n def __reduce__(self):\n return ndarray, (None,), self.__getstate__()\n\n def item(self, *args):\n \"\"\"Copy an element of an array to a standard Python scalar and return it.\n\n Parameters\n ----------\n *args : Arguments (variable number and type)\n none: in this case, the method only works for arrays with one element (a.size == 1),\n which element is copied into a standard Python scalar object and returned.\n\n int_type: this argument is interpreted as a flat index into the array, specifying which\n element to copy and return.\n\n tuple of int_types: functions as does a single int_type argument, except that the\n argument is interpreted as an nd-index into the array.\n\n Returns\n -------\n z : Standard Python scalar object\n A copy of the specified element of the array as a suitable Python scalar.\n \"\"\"\n # TODO(junwu): no need to call asnumpy() on the whole array.\n return self.asnumpy().item(*args)\n\n def nonzero(self):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Refer to `numpy.nonzero` for full documentation.\n\n See Also\n --------\n numpy.nonzero : equivalent function\n \"\"\"\n return nonzero(self)\n\n @property\n # pylint: disable= invalid-name, undefined-variable\n def T(self):\n \"\"\"Same as self.transpose(). This always returns a copy of self.\"\"\"\n return self.transpose()\n # pylint: enable= invalid-name, undefined-variable\n\n def all(self, axis=None, out=None, keepdims=False):\n raise NotImplementedError\n\n def any(self, axis=None, out=None, keepdims=False):\n raise NotImplementedError\n\n def as_nd_ndarray(self):\n \"\"\"Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods.\"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))\n return NDArray(handle=hdl, writable=self.writable)\n\n def as_np_ndarray(self):\n \"\"\"A convenience function for creating a numpy ndarray from the current ndarray\n with zero copy. For this class, it just returns itself since it's already a\n numpy ndarray.\"\"\"\n return self\n\n def __repr__(self):\n \"\"\"\n Returns a string representation of the array. The dtype of the ndarray will not\n be appended to the string if it is `float32`. The context of the ndarray will\n be appended for devices other than CPU.\n\n Examples\n --------\n >>> from mxnet import np, npx\n >>> a = np.random.uniform(size=(2, 3))\n >>> a\n array([[0.5488135 , 0.5928446 , 0.71518934],\n [0.84426576, 0.60276335, 0.8579456 ]])\n >>> print(a)\n [[0.5488135 0.5928446 0.71518934]\n [0.84426576 0.60276335 0.8579456 ]]\n >>> a.dtype\n <class 'numpy.float32'>\n >>> b = a.astype(np.float64)\n >>> b\n array([[0.54881352, 0.59284461, 0.71518934],\n [0.84426576, 0.60276335, 0.85794562]], dtype=float64)\n >>> print(b)\n [[0.54881352 0.59284461 0.71518934]\n [0.84426576 0.60276335 0.85794562]]\n >>> b.dtype\n <class 'numpy.float64'>\n >>> c = a.copyto(npx.gpu(0))\n >>> c\n array([[0.5488135 , 0.5928446 , 0.71518934],\n [0.84426576, 0.60276335, 0.8579456 ]], ctx=gpu(0))\n >>> print(c)\n [[0.5488135 0.5928446 0.71518934]\n [0.84426576 0.60276335 0.8579456 ]] @gpu(0)\n >>> d = b.copyto(npx.gpu(0))\n >>> d\n array([[0.54881352, 0.59284461, 0.71518934],\n [0.84426576, 0.60276335, 0.85794562]], dtype=float64, ctx=gpu(0))\n >>> print(d)\n [[0.54881352 0.59284461 0.71518934]\n [0.84426576 0.60276335 0.85794562]] @gpu(0)\n \"\"\"\n array_str = self.asnumpy().__repr__()\n dtype = self.dtype\n if 'dtype=' in array_str:\n if dtype == _np.float32:\n array_str = array_str[:array_str.rindex(',')] + ')'\n elif dtype not in (_np.float32, _np.bool_):\n array_str = array_str[:-1] + ', dtype={})'.format(dtype)\n\n context = self.ctx\n if context.device_type == 'cpu':\n return array_str\n return array_str[:-1] + ', ctx={})'.format(str(context))\n\n def __str__(self):\n \"\"\"Returns a string representation of the array.\"\"\"\n array_str = self.asnumpy().__str__()\n context = self.ctx\n if context.device_type == 'cpu' or self.ndim == 0:\n return array_str\n return '{array} @{ctx}'.format(array=array_str, ctx=context)\n\n def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ\n \"\"\"Attach a gradient buffer to this ndarray, so that `backward`\n can compute gradient with respect to it.\n\n Parameters\n ----------\n grad_req : {'write', 'add', 'null'}\n How gradient will be accumulated.\n - 'write': gradient will be overwritten on every backward.\n - 'add': gradient will be added to existing value on every backward.\n - 'null': do not compute gradient for this NDArray.\n \"\"\"\n grad = _mx_np_op.zeros_like(self) # pylint: disable=undefined-variable\n grad_req = _GRAD_REQ_MAP[grad_req]\n check_call(_LIB.MXAutogradMarkVariables(\n 1, ctypes.pointer(self.handle),\n ctypes.pointer(mx_uint(grad_req)),\n ctypes.pointer(grad.handle)))\n\n @property\n def grad(self):\n \"\"\"Returns gradient buffer attached to this ndarray.\"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))\n if hdl.value is None:\n return None\n return _np_ndarray_cls(hdl)\n\n def detach(self):\n \"\"\"Returns a new ndarray, detached from the current graph.\"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))\n return _np_ndarray_cls(hdl)\n\n def astype(self, dtype, **kwargs): # pylint: disable=arguments-differ,unused-argument\n \"\"\"\n Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n copy : bool, optional\n Default `True`. By default, astype always returns a newly\n allocated ndarray on the same context. If this is set to\n `False`, and the dtype requested is the same as the ndarray's\n dtype, the ndarray is returned instead of a copy.\n\n Returns\n -------\n arr_t : ndarray\n Unless `copy` is False and the other conditions for returning the input\n array are satisfied (see description for `copy` input parameter), `arr_t`\n is a new array of the same shape as the input array with `dtype`.\n \"\"\"\n _sanity_check_params('astype', ['order', 'casting', 'subok'], kwargs)\n copy = kwargs.get('copy', True)\n if not copy and _np.dtype(dtype) == self.dtype:\n return self\n\n res = empty(self.shape, dtype=dtype, ctx=self.ctx)\n self.copyto(res)\n return res\n\n def copyto(self, other):\n \"\"\"Copies the value of this array to another array.\n\n If ``other`` is a ``ndarray`` object, then ``other.shape`` and\n ``self.shape`` should be the same. This function copies the value from\n ``self`` to ``other``.\n\n If ``other`` is a context, a new ``np.ndarray`` will be first created on\n the target context, and the value of ``self`` is copied.\n\n Parameters\n ----------\n other : ndarray or Context\n The destination array or context.\n\n Returns\n -------\n out: ndarray\n The copied array. If ``other`` is an ``ndarray``, then the return value\n and ``other`` will point to the same ``ndarray``.\n\n Examples\n --------\n >>> x = np.ones((2, 3))\n >>> y = np.zeros((2, 3), ctx=npx.gpu(0))\n >>> z = x.copyto(y)\n >>> z is y\n True\n >>> y\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]])\n \"\"\"\n if isinstance(other, ndarray):\n if other.handle is self.handle:\n warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)\n return False\n return _npi.copyto(self, out=other)\n elif isinstance(other, Context):\n hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))\n return _npi.copyto(self, out=hret)\n else:\n raise TypeError('copyto does not support type ' + str(type(other)))\n\n def asscalar(self):\n raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')\n\n def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ\n \"\"\"Return indices of the maximum values along the given axis.\n Refer to `mxnet.numpy.argmax` for full documentation.\"\"\"\n return argmax(self, axis, out)\n\n def as_in_context(self, context):\n \"\"\"This function has been deprecated. Please refer to ``ndarray.as_in_ctx``.\"\"\"\n warnings.warn('ndarray.as_in_context has been renamed to'\n ' ndarray.as_in_ctx', DeprecationWarning)\n return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()\n\n def as_in_ctx(self, ctx):\n \"\"\"Returns an array on the target device with the same value as this array.\n\n If the target context is the same as ``self.context``, then ``self`` is\n returned. Otherwise, a copy is made.\n\n Parameters\n ----------\n context : Context\n The target context.\n\n Returns\n -------\n ndarray\n The target array.\n \"\"\"\n if self.ctx == ctx:\n return self\n return self.copyto(ctx)\n\n @property\n def ctx(self):\n \"\"\"Device context of the array.\n\n Examples\n --------\n >>> x = np.array([1, 2, 3, 4])\n >>> x.ctx\n cpu(0)\n >>> type(x.ctx)\n <class 'mxnet.context.Context'>\n >>> y = np.zeros((2, 3), npx.gpu(0))\n >>> y.ctx\n gpu(0)\n \"\"\"\n dev_typeid = ctypes.c_int()\n dev_id = ctypes.c_int()\n check_call(_LIB.MXNDArrayGetContext(\n self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))\n return Context(Context.devtype2str[dev_typeid.value], dev_id.value)\n\n @property\n def context(self):\n \"\"\"This function has been deprecated. Please refer to ``ndarray.ctx``.\"\"\"\n warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)\n return self.as_nd_ndarray().context\n\n def copy(self, order='C'): # pylint: disable=arguments-differ\n \"\"\"Return a coyp of the array, keeping the same context.\n\n Parameters\n ----------\n order : str\n The memory layout of the copy. Currently, only c-contiguous memory\n layout is supported.\n\n Examples\n --------\n >>> x = np.ones((2, 3))\n >>> y = x.copy()\n >>> y\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]])\n \"\"\"\n if order != 'C':\n raise NotImplementedError('ndarray.copy only supports order=\\'C\\', while '\n 'received {}'.format(str(order)))\n return self.copyto(self.ctx)\n\n def dot(self, b, out=None):\n \"\"\"Dot product of two arrays.\n Refer to ``numpy.dot`` for full documentation.\"\"\"\n return _mx_np_op.dot(self, b, out=out)\n\n def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ\n \"\"\"Returns a copy of the array with a new shape.\n\n Notes\n -----\n Unlike the free function `numpy.reshape`, this method on `ndarray` allows\n the elements of the shape parameter to be passed in as separate arguments.\n For example, ``a.reshape(10, 11)`` is equivalent to\n ``a.reshape((10, 11))``.\n \"\"\"\n order = 'C'\n if len(kwargs) > 1:\n raise TypeError('function takes at most 1 keyword argument')\n if len(kwargs) == 1:\n if 'order' not in kwargs:\n raise TypeError('{} is an invalid keyword argument for this function'\n .format(kwargs.keys()[0]))\n order = kwargs.pop('order', 'C')\n if order != 'C':\n raise NotImplementedError('only supports C-order,'\n ' while received {}'.format(order))\n if len(args) == 0:\n raise TypeError('reshape() takes exactly 1 argument (0 given)')\n if len(args) == 1 and isinstance(args[0], tuple):\n return _mx_np_op.reshape(self, newshape=args[0], order=order)\n else:\n return _mx_np_op.reshape(self, newshape=args, order=order)\n\n def reshape_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reshape_like`.\n\n The arguments are the same as for :py:func:`reshape_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')\n\n def reshape_view(self, *shape, **kwargs): # pylint: disable=redefined-outer-name\n \"\"\"Returns a **view** of this array with a new shape without altering any data.\n Inheritated from NDArray.reshape.\n \"\"\"\n return super(ndarray, self).reshape(*shape, **kwargs)\n\n def zeros_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`zeros_like`.\n\n The arguments are the same as for :py:func:`zeros_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')\n\n def ones_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`ones_like`.\n\n The arguments are the same as for :py:func:`ones_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')\n\n def broadcast_axes(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`broadcast_axes`.\n\n The arguments are the same as for :py:func:`broadcast_axes`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')\n\n def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ\n \"\"\"Repeat elements of an array.\"\"\"\n return _mx_np_op.repeat(self, repeats=repeats, axis=axis)\n\n def pad(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`pad`.\n\n The arguments are the same as for :py:func:`pad`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')\n\n def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ\n \"\"\"Return a copy of the array with axis1 and axis2 interchanged.\n Refer to `mxnet.numpy.swapaxes` for full documentation.\n \"\"\"\n return swapaxes(self, axis1, axis2)\n\n def split(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`split`.\n\n The arguments are the same as for :py:func:`split`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute split')\n\n def split_v2(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`split_v2`.\n\n The arguments are the same as for :py:func:`split_v2`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')\n\n def slice(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice`.\n\n The arguments are the same as for :py:func:`slice`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')\n\n def slice_axis(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice_axis`.\n\n The arguments are the same as for :py:func:`slice_axis`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')\n\n def slice_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice_like`.\n\n The arguments are the same as for :py:func:`slice_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')\n\n def slice_assign_scalar(self, value, begin, end, step):\n \"\"\"\n Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape\n and will be cast to the same dtype of the ndarray.\n\n Parameters\n ----------\n value: numeric value\n Value and this ndarray should be of the same data type.\n The shape of rhs should be the same as the cropped shape of this ndarray.\n begin: tuple of begin indices\n end: tuple of end indices\n step: tuple of step lenghths\n\n Returns\n -------\n This ndarray.\n\n Examples\n --------\n >>> x = np.ones((2, 2, 2))\n >>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))\n >>> y\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n >>> x\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n \"\"\"\n return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)\n\n def slice_assign(self, rhs, begin, end, step):\n \"\"\"\n Assign the rhs to a cropped subset of this ndarray in place.\n Returns the view of this ndarray.\n\n Parameters\n ----------\n rhs: ndarray.\n rhs and this NDArray should be of the same data type, and on the same device.\n The shape of rhs should be the same as the cropped shape of this ndarray.\n begin: tuple of begin indices\n end: tuple of end indices\n step: tuple of step lenghths\n\n Returns\n -------\n out : ndarray\n This ndarray.\n\n Examples\n --------\n >>> x = np.ones((2, 2, 2))\n >>> assigned = np.zeros((1, 1, 2))\n >>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))\n >>> y\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n >>> x\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n \"\"\"\n return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)\n\n def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name\n \"\"\"Convenience fluent method for :py:func:`take`.\n\n The arguments are the same as for :py:func:`take`, with\n this array as data.\n \"\"\"\n return take(self, indices, axis, mode=mode)\n\n def one_hot(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`one_hot`.\n\n The arguments are the same as for :py:func:`one_hot`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')\n\n def pick(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`pick`.\n\n The arguments are the same as for :py:func:`pick`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')\n\n def sort(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sort`.\n\n The arguments are the same as for :py:func:`sort`, with\n this array as data.\n \"\"\"\n raise NotImplementedError\n\n def topk(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`topk`.\n\n The arguments are the same as for :py:func:`topk`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')\n\n def argsort(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argsort`.\n\n The arguments are the same as for :py:func:`argsort`, with\n this array as data.\n \"\"\"\n raise NotImplementedError\n\n def argmax_channel(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argmax_channel`.\n\n The arguments are the same as for :py:func:`argmax_channel`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')\n\n def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ\n \"\"\"Return indices of the minium values along the given axis.\n Refer to `mxnet.numpy.argmin` for full documentation.\"\"\"\n return argmin(self, axis, out)\n\n def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ\n \"\"\"Return an array whose values are limited to [min, max].\n One of max or min must be given.\n \"\"\"\n return clip(self, min, max, out=out)\n\n def abs(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`abs`.\n\n The arguments are the same as for :py:func:`abs`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')\n\n def sign(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sign`.\n\n The arguments are the same as for :py:func:`sign`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')\n\n def flatten(self, order='C'): # pylint: disable=arguments-differ\n \"\"\"Return a copy of the array collapsed into one dimension.\"\"\"\n return self.reshape(-1, order=order)\n\n def shape_array(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`shape_array`.\n\n The arguments are the same as for :py:func:`shape_array`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')\n\n def size_array(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`size_array`.\n\n The arguments are the same as for :py:func:`size_array`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')\n\n def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument\n \"\"\"Convenience fluent method for :py:func:`expand_dims`.\n\n The arguments are the same as for :py:func:`expand_dims`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')\n\n def tile(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tile`.\n\n The arguments are the same as for :py:func:`tile`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tile')\n\n def transpose(self, *axes): # pylint: disable=arguments-differ\n \"\"\"Permute the dimensions of an array.\"\"\"\n if len(axes) == 0:\n axes = None\n elif len(axes) == 1:\n if isinstance(axes[0], (tuple, list)):\n axes = axes[0]\n elif axes[0] is None:\n axes = None\n return _mx_np_op.transpose(self, axes=axes)\n\n def flip(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`flip`.\n\n The arguments are the same as for :py:func:`flip`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')\n\n def depth_to_space(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`depth_to_space`.\n\n The arguments are the same as for :py:func:`depth_to_space`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')\n\n def space_to_depth(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`space_to_depth`.\n\n The arguments are the same as for :py:func:`space_to_depth`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')\n\n def diag(self, k=0, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`diag`.\n\n The arguments are the same as for :py:func:`diag`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')\n\n def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return the sum of the array elements over the given axis.\"\"\"\n return _mx_np_op.sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n def nansum(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`nansum`.\n\n The arguments are the same as for :py:func:`nansum`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')\n\n def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return the product of the array elements over the given axis.\"\"\"\n return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)\n\n def nanprod(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`nanprod`.\n\n The arguments are the same as for :py:func:`nanprod`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')\n\n def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Returns the average of the array elements along given axis.\"\"\"\n return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n # pylint: disable=too-many-arguments, arguments-differ\n def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n \"\"\"Returns the standard deviation of the array elements along given axis.\"\"\"\n return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n \"\"\"Returns the variance of the array elements, along given axis.\"\"\"\n return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)\n # pylint: enable=too-many-arguments, arguments-differ\n\n def cumsum(self, axis=None, dtype=None, out=None):\n \"\"\"Return the cumulative sum of the elements along the given axis.\"\"\"\n return _mx_np_op.cumsum(self, axis=axis, dtype=dtype, out=out)\n\n def tolist(self):\n return self.asnumpy().tolist()\n\n def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return the maximum along a given axis.\"\"\"\n return _mx_np_op.max(self, axis=axis, keepdims=keepdims, out=out)\n\n def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Convenience fluent method for :py:func:`min`.\n\n The arguments are the same as for :py:func:`min`, with\n this array as data.\n \"\"\"\n return _mx_np_op.min(self, axis=axis, keepdims=keepdims, out=out)\n\n def norm(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`norm`.\n\n The arguments are the same as for :py:func:`norm`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')\n\n def round(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`round`.\n\n The arguments are the same as for :py:func:`round`, with\n this array as data.\n \"\"\"\n raise NotImplementedError\n\n def rint(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rint`.\n\n The arguments are the same as for :py:func:`rint`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')\n\n def fix(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`fix`.\n\n The arguments are the same as for :py:func:`fix`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')\n\n def floor(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`floor`.\n\n The arguments are the same as for :py:func:`floor`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')\n\n def ceil(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`ceil`.\n\n The arguments are the same as for :py:func:`ceil`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')\n\n def trunc(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`trunc`.\n\n The arguments are the same as for :py:func:`trunc`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')\n\n def sin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sin`.\n\n The arguments are the same as for :py:func:`sin`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')\n\n def cos(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cos`.\n\n The arguments are the same as for :py:func:`cos`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')\n\n def tan(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tan`.\n\n The arguments are the same as for :py:func:`tan`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')\n\n def arcsin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arcsin`.\n\n The arguments are the same as for :py:func:`arcsin`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')\n\n def arccos(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arccos`.\n\n The arguments are the same as for :py:func:`arccos`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')\n\n def arctan(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arctan`.\n\n The arguments are the same as for :py:func:`arctan`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')\n\n def degrees(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`degrees`.\n\n The arguments are the same as for :py:func:`degrees`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')\n\n def radians(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`radians`.\n\n The arguments are the same as for :py:func:`radians`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')\n\n def sinh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sinh`.\n\n The arguments are the same as for :py:func:`sinh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')\n\n def cosh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cosh`.\n\n The arguments are the same as for :py:func:`cosh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')\n\n def tanh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tanh`.\n\n The arguments are the same as for :py:func:`tanh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')\n\n def arcsinh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arcsinh`.\n\n The arguments are the same as for :py:func:`arcsinh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')\n\n def arccosh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arccosh`.\n\n The arguments are the same as for :py:func:`arccosh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')\n\n def arctanh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arctanh`.\n\n The arguments are the same as for :py:func:`arctanh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')\n\n def exp(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`exp`.\n\n The arguments are the same as for :py:func:`exp`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')\n\n def expm1(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`expm1`.\n\n The arguments are the same as for :py:func:`expm1`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')\n\n def log(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log`.\n\n The arguments are the same as for :py:func:`log`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log')\n\n def log10(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log10`.\n\n The arguments are the same as for :py:func:`log10`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')\n\n def log2(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log2`.\n\n The arguments are the same as for :py:func:`log2`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')\n\n def log1p(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log1p`.\n\n The arguments are the same as for :py:func:`log1p`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')\n\n def sqrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sqrt`.\n\n The arguments are the same as for :py:func:`sqrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')\n\n def rsqrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rsqrt`.\n\n The arguments are the same as for :py:func:`rsqrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')\n\n def cbrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cbrt`.\n\n The arguments are the same as for :py:func:`cbrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')\n\n def rcbrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rcbrt`.\n\n The arguments are the same as for :py:func:`rcbrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')\n\n def square(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`square`.\n\n The arguments are the same as for :py:func:`square`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute square')\n\n def reciprocal(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reciprocal`.\n\n The arguments are the same as for :py:func:`reciprocal`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')\n\n def relu(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`relu`.\n\n The arguments are the same as for :py:func:`relu`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')\n\n def sigmoid(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sigmoid`.\n\n The arguments are the same as for :py:func:`sigmoid`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')\n\n def softmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`softmax`.\n\n The arguments are the same as for :py:func:`softmax`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')\n\n def log_softmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log_softmax`.\n\n The arguments are the same as for :py:func:`log_softmax`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')\n\n def softmin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`softmin`.\n\n The arguments are the same as for :py:func:`softmin`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')\n\n def squeeze(self, axis=None): # pylint: disable=arguments-differ\n \"\"\"Remove single-dimensional entries from the shape of a.\"\"\"\n return _mx_np_op.squeeze(self, axis=axis)\n\n def broadcast_to(self, shape): # pylint: disable=redefined-outer-name\n return _mx_np_op.broadcast_to(self, shape)\n\n def broadcast_like(self, other):\n raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')\n\n def _full(self, value):\n \"\"\"\n Currently for internal use only. Implemented for __setitem__.\n Assign to self an array of self's same shape and type, filled with value.\n \"\"\"\n return _mx_nd_np.full(self.shape, value, ctx=self.ctx, dtype=self.dtype, out=self)\n\n # pylint: disable=redefined-outer-name\n def _scatter_set_nd(self, value_nd, indices):\n \"\"\"\n This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing\n \"\"\"\n return _npi.scatter_set_nd(\n lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self\n )\n # pylint: enable=redefined-outer-name\n\n @property\n def shape(self):\n return super(ndarray, self).shape\n\n @property\n def ndim(self):\n \"\"\"Number of array dimensions.\"\"\"\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\"Number of elements in the array.\"\"\"\n return super(ndarray, self).size\n\n @property\n def dtype(self):\n \"\"\"Data-type of the array's elements.\n\n Returns\n -------\n numpy.dtype\n This NDArray's data type.\n\n Examples\n --------\n >>> x = np.zeros((2,3))\n >>> x.dtype\n dtype('float32')\n >>> y = np.zeros((2,3), dtype='int32')\n >>> y.dtype\n dtype('int32')\n \"\"\"\n return _np.dtype(super(ndarray, self).dtype)\n\n def tostype(self, stype):\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')\n\n\n@set_module('mxnet.numpy')\ndef empty(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, without initializing entries.\n\n Parameters\n ----------\n shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.\n dtype : data-type, optional\n Desired output data-type for the array, e.g, `numpy.int8`. Default is\n `numpy.float32`. Note that this behavior is different from NumPy's `empty`\n function where `float64` is the default value, because `float32` is\n considered as the default data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.context.current_context()`.\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data of the given shape, dtype, and order.\n\n Examples\n --------\n >>> np.empty([2, 2])\n array([[ 0.000000e+00, -2.524355e-29],\n [ nan, -8.592023e+09]]) # uninitialized\n\n >>> np.empty([2, 2], dtype=int)\n array([[8751743591039004782, 3196766424264760104],\n [7583328881310196768, 562950123910254]], dtype=int64) # uninitialized\n \"\"\"\n if order != 'C':\n raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'\n .format(str(order)))\n if ctx is None:\n ctx = current_context()\n if dtype is None:\n dtype = _np.float32\n if isinstance(shape, int):\n shape = (shape,)\n return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype))\n\n\n@set_module('mxnet.numpy')\ndef array(object, dtype=None, ctx=None):\n \"\"\"\n Create an array.\n\n Parameters\n ----------\n object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n dtype : data-type, optional\n The desired data-type for the array. Default is `float32`.\n ctx : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.context.current_context()`.\n\n Returns\n -------\n out : ndarray\n An array object satisfying the specified requirements.\n\n Examples\n --------\n >>> np.array([1, 2, 3])\n array([1., 2., 3.])\n\n >>> np.array([[1, 2], [3, 4]])\n array([[1., 2.],\n [3., 4.]])\n\n >>> np.array([[1, 0], [0, 1]], dtype=bool)\n array([[ True, False],\n [False, True]])\n \"\"\"\n if ctx is None:\n ctx = current_context()\n if isinstance(object, (ndarray, _np.ndarray)):\n dtype = object.dtype if dtype is None else dtype\n elif isinstance(object, NDArray):\n raise ValueError(\"If you're trying to create a mxnet.numpy.ndarray \"\n \"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.\")\n else:\n if dtype is None:\n dtype = object.dtype if hasattr(object, \"dtype\") else _np.float32\n try:\n object = _np.array(object, dtype=dtype)\n except Exception as e:\n # printing out the error raised by official NumPy's array function\n # for transparency on users' side\n raise TypeError('{}'.format(str(e)))\n ret = empty(object.shape, dtype=dtype, ctx=ctx)\n if len(object.shape) == 0:\n ret[()] = object\n else:\n ret[:] = object\n return ret\n\n\n@set_module('mxnet.numpy')\ndef shape(a):\n \"\"\"\n Return the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n shape : tuple of ints\n The elements of the shape tuple give the lengths of the\n corresponding array dimensions.\n\n See Also\n --------\n ndarray.shape : Equivalent array method.\n\n Examples\n --------\n >>> np.shape(np.eye(3))\n (3, 3)\n >>> np.shape([[1, 2]])\n (1, 2)\n >>> np.shape([0])\n (1,)\n >>> np.shape(0)\n ()\n \"\"\"\n return _mx_nd_np.shape(a)\n\n\n@set_module('mxnet.numpy')\ndef zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with zeros.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type (default is `numpy.float32`). Note that this\n behavior is different from NumPy's `zeros` function where `float64`\n is the default value, because `float32` is considered as the default\n data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of zeros with the given shape, dtype, and ctx.\n\n Examples\n --------\n >>> np.zeros(5)\n array([0., 0., 0., 0., 0.])\n\n >>> np.zeros((5,), dtype=int)\n array([0, 0, 0, 0, 0], dtype=int64)\n\n >>> np.zeros((2, 1))\n array([[0.],\n [0.]])\n \"\"\"\n return _mx_nd_np.zeros(shape, dtype, order, ctx)\n\n\n@set_module('mxnet.numpy')\ndef ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with ones.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `numpy.float32`. Note that this\n behavior is different from NumPy's `ones` function where `float64`\n is the default value, because `float32` is considered as the default\n data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of ones with the given shape, dtype, and ctx.\n\n Examples\n --------\n >>> np.ones(5)\n array([1., 1., 1., 1., 1.])\n\n >>> np.ones((5,), dtype=int)\n array([1, 1, 1, 1, 1], dtype=int64)\n\n >>> np.ones((2, 1))\n array([[1.],\n [1.]])\n\n >>> s = (2,2)\n >>> np.ones(s)\n array([[1., 1.],\n [1., 1.]])\n \"\"\"\n return _mx_nd_np.ones(shape, dtype, order, ctx)\n\n\n# pylint: disable=too-many-arguments, redefined-outer-name\n@set_module('mxnet.numpy')\ndef full(shape, fill_value, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n The desired data-type for the array. The default, `None`, means\n `np.array(fill_value).dtype`.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n\n Notes\n -----\n This function differs from the original `numpy.full\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in\n the following way(s):\n\n - Has an additional `ctx` argument to specify the device\n - Has an additional `out` argument\n - Currently does not support `order` selection\n\n See Also\n --------\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> np.full((2, 2), 10)\n array([[10., 10.],\n [10., 10.]])\n >>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))\n array([[2, 2],\n [2, 2]], dtype=int32)\n \"\"\"\n return _mx_nd_np.full(shape, fill_value, order=order, ctx=ctx, dtype=dtype, out=out)\n# pylint: enable=too-many-arguments, redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef identity(n, dtype=None, ctx=None):\n \"\"\"\n Return the identity array.\n\n The identity array is a square array with ones on\n the main diagonal.\n\n Parameters\n ----------\n n : int\n Number of rows (and columns) in `n` x `n` output.\n dtype : data-type, optional\n Data-type of the output. Defaults to ``numpy.float32``.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n `n` x `n` array with its main diagonal set to one,\n and all other elements 0.\n\n Examples\n --------\n >>> np.identity(3)\n >>> np.identity(3)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n \"\"\"\n return _mx_nd_np.identity(n, dtype, ctx)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef take(a, indices, axis=None, mode='raise', out=None):\n r\"\"\"\n Take elements from an array along an axis.\n\n When axis is not None, this function does the same thing as \"fancy\"\n indexing (indexing arrays using arrays); however, it can be easier to use\n if you need elements along a given axis. A call such as\n ``np.take(arr, indices, axis=3)`` is equivalent to\n ``arr[:,:,:,indices,...]``.\n\n Explained without fancy indexing, this is equivalent to the following use\n of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of\n indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n Nj = indices.shape\n for ii in ndindex(Ni):\n for jj in ndindex(Nj):\n for kk in ndindex(Nk):\n out[ii + jj + kk] = a[ii + (indices[jj],) + kk]\n\n Parameters\n ----------\n a : ndarray\n The source array.\n indices : ndarray\n The indices of the values to extract. Also allow scalars for indices.\n axis : int, optional\n The axis over which to select values. By default, the flattened\n input array is used.\n out : ndarray, optional\n If provided, the result will be placed in this array. It should\n be of the appropriate shape and dtype.\n mode : {'clip', 'wrap'}, optional\n Specifies how out-of-bounds indices will behave.\n\n * 'clip' -- clip to the range (default)\n * 'wrap' -- wrap around\n\n 'clip' mode means that all indices that are too large are replaced\n by the index that addresses the last element along that axis. Note\n that this disables indexing with negative numbers.\n\n Returns\n -------\n out : ndarray\n The returned array has the same type as `a`.\n\n Notes\n -----\n\n This function differs from the original `numpy.take\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in\n the following way(s):\n\n - Only ndarray or scalar ndarray is accepted as valid input.\n\n Examples\n --------\n >>> a = np.array([4, 3, 5, 7, 6, 8])\n >>> indices = np.array([0, 1, 4])\n >>> np.take(a, indices)\n array([4., 3., 6.])\n\n In this example for `a` is an ndarray, \"fancy\" indexing can be used.\n\n >>> a[indices]\n array([4., 3., 6.])\n\n If `indices` is not one dimensional, the output also has these dimensions.\n\n >>> np.take(a, np.array([[0, 1], [2, 3]]))\n array([[4., 3.],\n [5., 7.]])\n \"\"\"\n return _mx_nd_np.take(a, indices, axis, mode, out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):\n \"\"\"\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements:\n\n * the indices of the input array that give the unique values\n * the indices of the unique array that reconstruct the input array\n * the number of times each unique value comes up in the input array\n\n Parameters\n ----------\n ar : ndarray\n Input array. Unless `axis` is specified, this will be flattened if it\n is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` (along the specified axis,\n if provided, or in the flattened array) that result in the unique array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array (for the specified\n axis, if provided) that can be used to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique item appears\n in `ar`.\n axis : int or None, optional\n The axis to operate on. If None, `ar` will be flattened. If an integer,\n the subarrays indexed by the given axis will be flattened and treated\n as the elements of a 1-D array with the dimension of the given axis,\n see the notes for more details. The default is None.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n Notes\n -----\n When an axis is specified the subarrays indexed by the axis are sorted.\n This is done by making the specified axis the first dimension of the array\n and then flattening the subarrays in C order. The flattened subarrays are\n then viewed as a structured type with each element given a label, with the\n effect that we end up with a 1-D array of structured types that can be\n treated in the same way as any other 1-D array. The result is that the\n flattened subarrays are sorted in lexicographic order starting with the\n first element.\n\n This function differs from the original `numpy.unique\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in\n the following aspects:\n\n - Only support ndarray as input.\n - Object arrays or structured arrays are not supported.\n\n Examples\n --------\n >>> np.unique(np.array([1, 1, 2, 2, 3, 3]))\n array([1., 2., 3.])\n >>> a = np.array([[1, 1], [2, 3]])\n >>> np.unique(a)\n array([1., 2., 3.])\n\n Return the unique rows of a 2D array\n\n >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])\n >>> np.unique(a, axis=0)\n array([[1., 0., 0.],\n [2., 3., 4.]])\n\n Return the indices of the original array that give the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_index=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 5, 3, 2], dtype=int64)\n >>> a[indices]\n array([1., 2., 3., 4., 6.])\n\n Reconstruct the input array from the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_inverse=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 4, 3, 1, 2, 1], dtype=int64)\n >>> u[indices]\n array([1., 2., 6., 4., 2., 3., 2.])\n \"\"\"\n return _mx_nd_np.unique(ar, return_index, return_inverse, return_counts, axis)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef add(x1, x2, out=None, **kwargs):\n \"\"\"\n Add arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n add : ndarray or scalar\n The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n\n Examples\n --------\n >>> np.add(1.0, 4.0)\n 5.0\n >>>\n >>> x1 = np.arange(9.0).reshape((3, 3))\n >>> x2 = np.arange(3.0)\n >>> np.add(x1, x2)\n array([[ 0., 2., 4.],\n [ 3., 5., 7.],\n [ 6., 8., 10.]])\n \"\"\"\n return _mx_nd_np.add(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef subtract(x1, x2, out=None, **kwargs):\n \"\"\"\n Subtract arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be subtracted from each other. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape\n of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n subtract : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n\n Examples\n --------\n >>> np.subtract(1.0, 4.0)\n -3.0\n >>> x1 = np.arange(9.0).reshape((3, 3))\n >>> x2 = np.arange(3.0)\n >>> np.subtract(x1, x2)\n array([[0., 0., 0.],\n [3., 3., 3.],\n [6., 6., 6.]])\n \"\"\"\n return _mx_nd_np.subtract(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef multiply(x1, x2, out=None, **kwargs):\n \"\"\"\n Multiply arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n\n Examples\n --------\n >>> np.multiply(2.0, 4.0)\n 8.0\n >>> x1 = np.arange(9.0).reshape((3, 3))\n >>> x2 = np.arange(3.0)\n >>> np.multiply(x1, x2)\n array([[ 0., 1., 4.],\n [ 0., 4., 10.],\n [ 0., 7., 16.]])\n \"\"\"\n return _mx_nd_np.multiply(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef divide(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns a true division of the inputs, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 type.\n\n Examples\n --------\n >>> np.true_divide(x, 4)\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n \"\"\"\n return _mx_nd_np.divide(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\ndef true_divide(x1, x2, out=None):\n \"\"\"Returns a true division of the inputs, element-wise.\n\n Instead of the Python traditional 'floor division', this returns a true\n division. True division adjusts the output type to present the best\n answer, regardless of input types.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 type.\n\n Examples\n --------\n >>> x = np.arange(5)\n >>> np.true_divide(x, 4)\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n \"\"\"\n return _mx_nd_np.true_divide(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef mod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.mod(np.arange(7), 5)\n array([0., 1., 2., 3., 4., 0., 1.])\n \"\"\"\n return _mx_nd_np.mod(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef remainder(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.remainder(np.arange(7), 5)\n array([0., 1., 2., 3., 4., 0., 1.])\n \"\"\"\n return _mx_nd_np.remainder(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef power(x1, x2, out=None, **kwargs):\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n The bases.\n\n x2 : ndarray or scalar\n The exponent.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> x1 = np.arange(6)\n >>> np.power(x1, 3)\n array([ 0., 1., 8., 27., 64., 125.])\n\n Raise the bases to different exponents.\n\n >>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])\n >>> np.power(x1, x2)\n array([ 0., 1., 8., 27., 16., 5.])\n\n The effect of broadcasting.\n\n >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])\n >>> x2\n array([[1., 2., 3., 3., 2., 1.],\n [1., 2., 3., 3., 2., 1.]])\n\n >>> np.power(x1, x2)\n array([[ 0., 1., 8., 27., 16., 5.],\n [ 0., 1., 8., 27., 16., 5.]])\n \"\"\"\n return _mx_nd_np.power(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef lcm(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the lowest common multiple of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing lowest common multiple. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The lowest common multiple of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n gcd : The greatest common divisor\n\n Examples\n --------\n >>> np.lcm(12, 20)\n 60\n >>> np.lcm(np.arange(6, dtype=int), 20)\n array([ 0, 20, 20, 60, 20, 20], dtype=int64)\n \"\"\"\n return _mx_nd_np.lcm(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sin(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The sine of each element of x. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sin(np.pi/2.)\n 1.0\n >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)\n array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])\n \"\"\"\n return _mx_nd_np.sin(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef cos(x, out=None, **kwargs):\n r\"\"\"\n Cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding cosine values. This is a scalar if x is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cos(np.array([0, np.pi/2, np.pi]))\n array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.cos(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _mx_nd_np.cos(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sinh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic sine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sinh(0)\n 0.0\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.sinh(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _mx_nd_np.sinh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef cosh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic cosine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cosh(0)\n 1.0\n \"\"\"\n return _mx_nd_np.cosh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef tanh(x, out=None, **kwargs):\n \"\"\"\n Compute hyperbolic tangent element-wise.\n Equivalent to ``np.sinh(x)/np.cosh(x)``.\n\n Parameters\n ----------\n x : ndarray or scalar.\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n ----------\n y : ndarray or scalar\n The corresponding hyperbolic tangent values.\n\n Notes\n -----\n If `out` is provided, the function writes the result into it,\n and returns a reference to `out`. (See Examples)\n - input x does not support complex computation (like imaginary number)\n >>> np.tanh(np.pi*1j)\n TypeError: type <type 'complex'> not supported\n\n Examples\n --------\n >>> np.tanh(np.array[0, np.pi]))\n array([0. , 0.9962721])\n >>> np.tanh(np.pi)\n 0.99627207622075\n >>> # Example of providing the optional output parameter illustrating\n >>> # that what is returned is a reference to said parameter\n >>> out1 = np.array(1)\n >>> out2 = np.tanh(np.array(0.1), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _mx_nd_np.tanh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log10(x, out=None, **kwargs):\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The logarithm to the base 10 of `x`, element-wise. NaNs are\n returned where x is negative. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.log10(np.array([1e-15, -3.]))\n array([-15., nan])\n \"\"\"\n return _mx_nd_np.log10(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sqrt(x, out=None, **kwargs):\n \"\"\"\n Return the non-negative square-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose square-roots are required.\n out : ndarray, or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n An array of the same shape as `x`, containing the positive\n square-root of each element in `x`. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sqrt(np.array([1,4,9]))\n array([1., 2., 3.])\n >>> np.sqrt(np.array([4, -1, _np.inf]))\n array([ 2., nan, inf])\n \"\"\"\n return _mx_nd_np.sqrt(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef cbrt(x, out=None, **kwargs):\n \"\"\"\n Return the cube-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray\n The values whose cube-roots are required.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n y : ndarray\n An array of the same shape as x, containing the cube cube-root of each element in x.\n If out was provided, y is a reference to it. This is a scalar if x is a scalar.\n\n Examples\n ----------\n >>> np.cbrt([1,8,27])\n array([ 1., 2., 3.])\n \"\"\"\n return _mx_nd_np.cbrt(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef abs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> x = np.array([-1.2, 1.2])\n >>> np.abs(x)\n array([1.2, 1.2])\n \"\"\"\n return _mx_nd_np.abs(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef absolute(x, out=None, **kwargs):\n \"\"\"\n Calculate the absolute value element-wise.\n np.abs is a shorthand for this function.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n absolute : ndarray\n An ndarray containing the absolute value of each element in x.\n\n Examples\n ----------\n >>> x = np.array([-1.2, 1.2])\n >>> np.absolute(x)\n array([ 1.2, 1.2])\n \"\"\"\n return _mx_nd_np.absolute(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef exp(x, out=None, **kwargs):\n r\"\"\"\n Calculate the exponential of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential of `x`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.exp(1)\n 2.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])\n \"\"\"\n return _mx_nd_np.exp(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef expm1(x, out=None, **kwargs):\n r\"\"\"\n Calculate `exp(x) - 1` for all elements in the array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential minus one: `out = exp(x) - 1`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.expm1(1)\n 1.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])\n \"\"\"\n return _mx_nd_np.expm1(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arcsin(x, out=None, **kwargs):\n r\"\"\"\n Inverse sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n `y`-coordinate on the unit circle.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n angle : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n The inverse sine of each element in `x`, in radians and in the\n closed interval ``[-pi/2, pi/2]``.\n\n Examples\n --------\n >>> np.arcsin(1) # pi/2\n 1.5707963267948966\n >>> np.arcsin(-1) # -pi/2\n -1.5707963267948966\n >>> np.arcsin(0)\n 0.0\n\n Notes\n -----\n `arcsin` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that :math:`sin(z) = x`. The convention is to\n return the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, *arcsin* always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n The inverse sine is also known as `asin` or sin^{-1}.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.arcsin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in\n the following aspects:\n - Only support ndarray or scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,\n 10th printing, New York: Dover, 1964, pp. 79ff.\n http://www.math.sfu.ca/~cbm/aands/\n \"\"\"\n return _mx_nd_np.arcsin(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arccos(x, out=None, **kwargs):\n \"\"\"\n Trigonometric inverse cosine, element-wise.\n The inverse of cos so that, if y = cos(x), then x = arccos(y).\n\n Parameters\n ----------\n x : ndarray\n x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that\n the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n angle : ndarray\n The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].\n This is a scalar if x is a scalar.\n\n Notes\n ----------\n arccos is a multivalued function: for each x there are infinitely many numbers z such that\n cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].\n For real-valued input data types, arccos always returns real output.\n For each value that cannot be expressed as a real number or infinity, it yields nan and sets\n the invalid floating point error flag.\n The inverse cos is also known as acos or cos^-1.\n\n Examples\n ----------\n >>> np.arccos([1, -1])\n array([ 0. , 3.14159265])\n \"\"\"\n return _mx_nd_np.arccos(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arctan(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse tangent, element-wise.\n The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Out has the same shape as `x`. It lies is in\n ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctan` is a multi-valued function: for each `x` there are infinitely\n many numbers `z` such that tan(`z`) = `x`. The convention is to return\n the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, `arctan` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n For complex-valued input, we do not have support for them yet.\n The inverse tangent is also known as `atan` or tan^{-1}.\n\n Examples\n --------\n >>> x = np.array([0, 1])\n >>> np.arctan(x)\n array([0. , 0.7853982])\n >>> np.pi/4\n 0.7853981633974483\n \"\"\"\n return _mx_nd_np.arctan(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sign(x, out=None, **kwargs):\n \"\"\"\n Returns an element-wise indication of the sign of a number.\n The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.\n\n Parameters\n ----------\n x : ndarray or a scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The sign of `x`.\n This is a scalar if `x` is a scalar.\n\n Note\n -------\n - Only supports real number as input elements.\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([-5., 4.5])\n >>> np.sign(a)\n array([-1., 1.])\n Scalars as input:\n >>> np.sign(4.0)\n 1.0\n >>> np.sign(0)\n 0\n Use ``out`` parameter:\n >>> b = np.zeros((2, ))\n >>> np.sign(a, out=b)\n array([-1., 1.])\n >>> b\n array([-1., 1.])\n \"\"\"\n return _mx_nd_np.sign(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log(x, out=None, **kwargs):\n \"\"\"\n Natural logarithm, element-wise.\n The natural logarithm `log` is the inverse of the exponential function,\n so that `log(exp(x)) = x`. The natural logarithm is logarithm in base\n `e`.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The natural logarithm of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and\n ``nan`` according to the input.\n This function differs from the original `numpy.log\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)\n >>> np.log(a)\n array([ 0., 1., 2., -inf], dtype=float64)\n >>> # Using the default float32 dtype leads to slightly different behavior\n >>> a = np.array([1, np.exp(1), np.exp(2), 0])\n >>> np.log(a)\n array([ 0., 0.99999994, 2., -inf])\n >>> np.log(1)\n 0.0\n \"\"\"\n return _mx_nd_np.log(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef rint(x, out=None, **kwargs):\n \"\"\"\n Round elements of the array to the nearest integer.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.rint\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.rint(a)\n array([-2., -2., -0., 0., 1., 2., 2.])\n \"\"\"\n return _mx_nd_np.rint(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log2(x, out=None, **kwargs):\n \"\"\"\n Base-2 logarithm of x.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The logarithm base two of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.log2\n <https://www.google.com/search?q=numpy+log2>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> x = np.array([0, 1, 2, 2**4])\n >>> np.log2(x)\n array([-inf, 0., 1., 4.])\n \"\"\"\n return _mx_nd_np.log2(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log1p(x, out=None, **kwargs):\n \"\"\"\n Return the natural logarithm of one plus the input array, element-wise.\n Calculates ``log(1 + x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n Natural logarithm of 1 + x, element-wise. This is a scalar\n if x is a scalar.\n\n Notes\n -----\n For real-valued input, `log1p` is accurate also for `x` so small\n that `1 + x == 1` in floating-point accuracy.\n Logarithm is a multivalued function: for each `x` there is an infinite\n number of `z` such that `exp(z) = 1 + x`. The convention is to return\n the `z` whose imaginary part lies in `[-pi, pi]`.\n For real-valued input data types, `log1p` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n cannot support complex-valued input.\n\n Examples\n --------\n >>> np.log1p(1e-99)\n 1e-99\n >>> a = np.array([3, 4, 5])\n >>> np.log1p(a)\n array([1.3862944, 1.609438 , 1.7917595])\n \"\"\"\n return _mx_nd_np.log1p(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef degrees(x, out=None, **kwargs):\n \"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding degree values; if `out` was supplied this is a\n reference to it.\n This is a scalar if `x` is a scalar.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> rad = np.arange(12.) * np.pi / 6\n >>> np.degrees(rad)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> # Use specified ``out`` ndarray:\n >>> out = np.zeros((rad.shape))\n >>> np.degrees(rad, out)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> out\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n \"\"\"\n return _mx_nd_np.degrees(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef rad2deg(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from radians to degrees.\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"rad2deg(x)\" is \"x * 180 / pi\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.rad2deg(np.pi/2)\n 90.0\n \"\"\"\n return _mx_nd_np.rad2deg(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef radians(x, out=None, **kwargs):\n \"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array in degrees.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding radian values. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.radians\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> deg = np.arange(12.) * 30.\n >>> np.radians(deg)\n array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,\n 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],\n dtype=float32)\n \"\"\"\n return _mx_nd_np.radians(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef deg2rad(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"deg2rad(x)\" is \"x * pi / 180\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.deg2rad(180)\n 3.1415927\n \"\"\"\n return _mx_nd_np.deg2rad(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef reciprocal(x, out=None, **kwargs):\n r\"\"\"\n Return the reciprocal of the argument, element-wise.\n Calculates ``1/x``.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose reciprocals are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.reciprocal(2.)\n 0.5\n >>> x = np.array([1, 2., 3.33])\n >>> np.reciprocal(x)\n array([1. , 0.5 , 0.3003003])\n\n Notes\n -----\n .. note::\n This function is not designed to work with integers.\n For integer arguments with absolute value larger than 1 the result is\n always zero because of the way Python handles integer division. For\n integer zero the result is an overflow.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.reciprocal\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n \"\"\"\n return _mx_nd_np.reciprocal(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef square(x, out=None, **kwargs):\n r\"\"\"\n Return the element-wise square of the input.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose squares are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.square(2.)\n 4.0\n >>> x = np.array([1, 2., -1])\n >>> np.square(x)\n array([1., 4., 1.])\n\n Notes\n -----\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.square\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n \"\"\"\n return _mx_nd_np.square(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef negative(x, out=None, **kwargs):\n r\"\"\"\n Numerical negative, element-wise.\n\n Parameters:\n ------------\n x : ndarray or scalar\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored.\n If provided, it must have a shape that the inputs broadcast to.\n If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length\n equal to the number of outputs.\n\n Returns:\n -------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.negative(1)\n -1\n \"\"\"\n return _mx_nd_np.negative(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef fix(x, out=None, **kwargs):\n \"\"\"\n Round an array of floats element-wise to nearest integer towards zero.\n The rounded values are returned as floats.\n\n Parameters:\n ----------\n x : ndarray\n An array of floats to be rounded\n out : ndarray, optional\n Output array\n\n Returns:\n -------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats\n\n Examples\n ---------\n >>> np.fix(3.14)\n 3\n \"\"\"\n return _mx_nd_np.fix(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef tan(x, out=None, **kwargs):\n r\"\"\"\n Compute tangent element-wise.\n Equivalent to np.sin(x)/np.cos(x) element-wise.\n\n Parameters:\n ----------\n x : ndarray\n Input array.\n out : ndarray or none, optional\n A location into which the result is stored. If provided,\n it must have a shape that the inputs broadcast to. If not provided or None,\n a freshly-allocated array is returned. A tuple (possible only as a keyword argument)\n must have length equal to the number of outputs.\n\n Returns:\n -------\n y : ndarray\n The corresponding tangent values. This is a scalar if x is a scalar.\n\n Examples\n ---------\n >>> np.tan(np.array([-np.pi, np.pi/2, np.pi]))\n array([-8.7422777e-08, -2.2877332e+07, 8.7422777e-08])\n \"\"\"\n\n return _mx_nd_np.tan(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef ceil(x, out=None, **kwargs):\n r\"\"\"\n Return the ceiling of the input, element-wise.\n The ceil of the ndarray `x` is the smallest integer `i`, such that\n `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The ceiling of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.ceil(a)\n array([-1., -1., -0., 1., 2., 2., 2.])\n >>> # if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.ceil(np.array(3.5), a)\n array(4.)\n >>> a\n array(4.)\n \"\"\"\n return _mx_nd_np.ceil(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef floor(x, out=None, **kwargs):\n r\"\"\"\n Return the floor of the input, element-wise.\n The ceil of the ndarray `x` is the largest integer `i`, such that\n `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The floor of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.floor(a)\n array([-2., -2., -1., 0., 1., 1., 2.])\n >>> # if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.floor(np.array(3.5), a)\n array(3.)\n >>> a\n array(3.)\n \"\"\"\n return _mx_nd_np.floor(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef trunc(x, out=None, **kwargs):\n r\"\"\"\n Return the truncated value of the input, element-wise.\n The truncated value of the scalar `x` is the nearest integer `i` which\n is closer to zero than `x` is. In short, the fractional part of the\n signed number `x` is discarded.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : ndarray or scalar\n The truncated value of each element in `x`.\n This is a scalar if `x` is a scalar.\n Notes\n -----\n This function differs from the original numpy.trunc in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.trunc(a)\n array([-1., -1., -0., 0., 1., 1., 2.])\n \"\"\"\n return _mx_nd_np.trunc(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef logical_not(x, out=None, **kwargs):\n r\"\"\"\n Compute the truth value of NOT x element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Logical NOT is applied to the elements of `x`.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : bool or ndarray of bool\n Boolean result with the same shape as `x` of the NOT operation\n on elements of `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.logical_not in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> x= np.array([True, False, 0, 1])\n >>> np.logical_not(x)\n array([False, True, True, False])\n\n >>> x = np.arange(5)\n >>> np.logical_not(x<3)\n array([False, False, False, True, True])\n \"\"\"\n return _mx_nd_np.logical_not(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arcsinh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arcsinh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arcsinh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `sinh(z) = x`.\n\n For real-valued input data types, `arcsinh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arcsinh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. DType of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arcsinh(a)\n array([1.8309381, 2.2924316])\n\n >>> np.arcsinh(1)\n 0.0\n \"\"\"\n return _mx_nd_np.arcsinh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arccosh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arccosh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arccosh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `cosh(z) = x`.\n\n For real-valued input data types, `arccosh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arccosh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arccosh(a)\n array([1.8309381, 2.2924316])\n\n >>> np.arccosh(1)\n 0.0\n \"\"\"\n return _mx_nd_np.arccosh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arctanh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic tangent, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arctanh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctanh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `tanh(z) = x`.\n\n For real-valued input data types, `arctanh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arctanh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([0.0, -0.5])\n >>> np.arctanh(a)\n array([0., -0.54930615])\n\n >>> np.arctanh(1)\n 0.0\n \"\"\"\n return _mx_nd_np.arctanh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef tensordot(a, b, axes=2):\n r\"\"\"\n tensordot(a, b, axes=2)\n Compute tensor dot product along specified axes for arrays >= 1-D.\n Given two tensors (arrays of dimension greater than or equal to one),\n `a` and `b`, and an ndarray object containing two ndarray\n objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s\n elements (components) over the axes specified by ``a_axes`` and\n ``b_axes``. The third argument can be a single non-negative\n integer_like scalar, ``N``; if it is such, then the last ``N``\n dimensions of `a` and the first ``N`` dimensions of `b` are summed\n over.\n\n Parameters\n ----------\n a, b : ndarray, len(shape) >= 1\n Tensors to \"dot\".\n axes : int or (2,) ndarray\n * integer_like\n If an int N, sum over the last N axes of `a` and the first N axes\n of `b` in order. The sizes of the corresponding axes must match.\n * (2,) ndarray\n Or, a list of axes to be summed over, first sequence applying to `a`,\n second to `b`. Both elements ndarray must be of the same length.\n\n See Also\n --------\n dot, einsum\n\n Notes\n -----\n Three common use cases are:\n * ``axes = 0`` : tensor product :math:`a\\otimes b`\n * ``axes = 1`` : tensor dot product :math:`a\\cdot b`\n * ``axes = 2`` : (default) tensor double contraction :math:`a:b`\n When `axes` is integer_like, the sequence for evaluation will be: first\n the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and\n Nth axis in `b` last.\n When there is more than one axis to sum over - and they are not the last\n (first) axes of `a` (`b`) - the argument `axes` should consist of\n two sequences of the same length, with the first axis to sum over given\n first in both sequences, the second axis second, and so forth.\n\n Examples\n --------\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))\n >>> c.shape\n (5, 2)\n >>> c\n array([[ 4400., 4730.],\n [ 4532., 4874.],\n [ 4664., 5018.],\n [ 4796., 5162.],\n [ 4928., 5306.]])\n \"\"\"\n return _mx_nd_np.tensordot(a, b, axes)\n\n\n@set_module('mxnet.numpy')\ndef histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : ndarray\n Input data. The histogram is computed over the flattened array.\n bins : int or ndarray\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n .. versionadded:: 1.11.0\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n range : (float, float)\n The lower and upper range of the bins. Required when `bins` is an integer.\n Values outside the range are ignored. The first element of the range must\n be less than or equal to the second.\n normed : bool, optional\n Not supported yet, coming soon.\n weights : array_like, optional\n Not supported yet, coming soon.\n density : bool, optional\n Not supported yet, coming soon.\n\n Examples\n --------\n >>> np.histogram(np.arange(4), bins=np.arange(5))\n [array([1, 1, 1, 1], dtype=int64), array([0., 1., 2., 3., 4.])]\n \"\"\"\n return _mx_nd_np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)\n\n\n@set_module('mxnet.numpy')\ndef eye(N, M=None, k=0, dtype=_np.float32, **kwargs):\n \"\"\"\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to N.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero,\n except for the k-th diagonal, whose values are equal to one.\n\n Examples\n --------\n >>> np.eye(2, dtype=int)\n array([[1, 0],\n [0, 1]], dtype=int64)\n >>> np.eye(3, k=1)\n array([[0., 1., 0.],\n [0., 0., 1.],\n [0., 0., 0.]])\n \"\"\"\n return _mx_nd_np.eye(N, M, k, dtype, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"\n Return evenly spaced numbers over a specified interval.\n\n Returns num evenly spaced samples, calculated over the interval [start, stop].\n The endpoint of the interval can optionally be excluded.\n\n Parameters\n ----------\n start : real number\n The starting value of the sequence.\n stop : real number\n The end value of the sequence, unless endpoint is set to False. In\n that case, the sequence consists of all but the last of num + 1\n evenly spaced samples, so that stop is excluded. Note that the step\n size changes when endpoint is False.\n num : int, optional\n Number of samples to generate. Default is 50. Must be non-negative.\n endpoint : bool, optional\n If True, stop is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between samples.\n dtype : dtype, optional\n The type of the output array. If dtype is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start or\n stop are array-like. By default (0), the samples will be along a new\n axis inserted at the beginning. Use -1 to get an axis at the end.\n\n Returns\n -------\n samples : ndarray\n There are num equally spaced samples in the closed interval\n `[start, stop]` or the half-open interval `[start, stop)`\n (depending on whether endpoint is True or False).\n step : float, optional\n Only returned if retstep is True\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similar to `linspace`, but uses a step size (instead of the\n number of samples).\n\n Examples\n --------\n >>> np.linspace(2.0, 3.0, num=5)\n array([2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n Notes\n -----\n\n This function differs from the original `numpy.linspace\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in\n the following aspects:\n\n - `start` and `stop` do not support list, numpy ndarray and mxnet ndarray\n - axis could only be 0\n - There could be an additional `ctx` argument to specify the device, e.g. the i-th\n GPU.\n \"\"\"\n return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, ctx)\n\n\n# pylint: disable=too-many-arguments\n@set_module('mxnet.numpy')\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None):\n r\"\"\"Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n Non-scalar `start` and `stop` are now supported.\n\n Parameters\n ----------\n start : int or float\n ``base ** start`` is the starting value of the sequence.\n stop : int or float\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : float, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n dtype : dtype\n The type of the output array. If `dtype` is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Now, axis only support axis = 0.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n\n Notes\n -----\n Logspace is equivalent to the code\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ...\n >>> power(base, y).astype(dtype)\n ...\n\n Examples\n --------\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.44347, 464.15887, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([100. , 177.82794, 316.22775, 562.3413 ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([4. , 5.0396843, 6.349604 , 8. ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)\n array([4, 5, 6, 8], dtype=int32)\n >>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))\n array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))\n \"\"\"\n return _mx_nd_np.logspace(start, stop, num, endpoint, base, dtype, axis, ctx=ctx)\n# pylint: enable=too-many-arguments\n\n\n@set_module('mxnet.numpy')\ndef expand_dims(a, axis):\n \"\"\"Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded array shape.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1., 2.]])\n\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]\n >>> y\n array([[1.],\n [2.]])\n\n >>> y.shape\n (2, 1)\n\n Note that some examples may use None instead of np.newaxis. These are the same objects:\n\n >>> np.newaxis is None\n True\n \"\"\"\n return _npi.expand_dims(a, axis)\n\n\n@set_module('mxnet.numpy')\ndef tile(A, reps):\n r\"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : ndarray or scalar\n An input array or a scalar to repeat.\n reps : a single integer or tuple of integers\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0., 1., 2., 0., 1., 2.])\n >>> np.tile(a, (2, 2))\n array([[0., 1., 2., 0., 1., 2.],\n [0., 1., 2., 0., 1., 2.]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0., 1., 2., 0., 1., 2.]],\n [[0., 1., 2., 0., 1., 2.]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1., 2., 1., 2.],\n [3., 4., 3., 4.]])\n >>> np.(b, (2, 1))\n array([[1., 2.],\n [3., 4.],\n [1., 2.],\n [3., 4.]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.]])\n\n Scalar as input:\n\n >>> np.tile(2, 3)\n array([2, 2, 2]) # repeating integer `2`\n\n \"\"\"\n return _mx_nd_np.tile(A, reps)\n\n\n@set_module('mxnet.numpy')\ndef tril(m, k=0):\n r\"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : ndarray, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : same thing, only for the upper triangle\n\n Examples\n --------\n >>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\n >>> np.tril(a, -1)\n array([[ 0., 0., 0.],\n [ 4., 0., 0.],\n [ 7., 8., 0.],\n [10., 11., 12.]])\n \"\"\"\n return _mx_nd_np.tril(m, k)\n\n\n@set_module('mxnet.numpy')\ndef arange(start, stop=None, step=1, dtype=None, ctx=None):\n \"\"\"Return evenly spaced values within a given interval.\n\n Values are generated within the half-open interval ``[start, stop)``\n (in other words, the interval including `start` but excluding `stop`).\n For integer arguments the function is equivalent to the Python built-in\n `range` function, but returns an ndarray rather than a list.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default\n start value is 0.\n stop : number\n End of interval. The interval does not include this value, except\n in some cases where `step` is not an integer and floating point\n round-off affects the length of `out`.\n step : number, optional\n Spacing between values. For any output `out`, this is the distance\n between two adjacent values, ``out[i+1] - out[i]``. The default\n step size is 1. If `step` is specified as a position argument,\n `start` must also be given.\n dtype : dtype\n The type of the output array. The default is `float32`.\n\n Returns\n -------\n arange : ndarray\n Array of evenly spaced values.\n\n For floating point arguments, the length of the result is\n ``ceil((stop - start)/step)``. Because of floating point overflow,\n this rule may result in the last element of `out` being greater\n than `stop`.\n\n Examples\n --------\n >>> np.arange(3)\n array([0., 1., 2.])\n\n >>> np.arange(3.0)\n array([0., 1., 2.])\n\n >>> np.arange(3,7)\n array([3., 4., 5., 6.])\n\n >>> np.arange(3,7,2)\n array([3., 5.])\n \"\"\"\n return _mx_nd_np.arange(start, stop, step, dtype, ctx)\n\n\n@set_module('mxnet.numpy')\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> np.split(x, [3, 5, 6, 8])\n [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]\n \"\"\"\n return _mx_nd_np.split(ary, indices_or_sections, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef vsplit(ary, indices_or_sections):\n r\"\"\"\n vsplit(ary, indices_or_sections)\n\n Split an array into multiple sub-arrays vertically (row-wise).\n\n ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split\n along the first axis regardless of the array dimension.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 0. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 0 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along axis 0, an error will be thrown.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n\n - Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,\n tuple and list.\n - In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,\n an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n\n >>> # With a higher dimensional array the split is still along the first axis.\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n return split(ary, indices_or_sections, 0)\n\n\n@set_module('mxnet.numpy')\ndef concatenate(seq, axis=0, out=None):\n \"\"\"Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n See Also\n --------\n split : Split array into a list of multiple sub-arrays of equal size.\n hsplit : Split array into multiple sub-arrays horizontally (column wise)\n vsplit : Split array into multiple sub-arrays vertically (row wise)\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n stack : Stack a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise)\n vstack : Stack arrays in sequence vertically (row wise)\n dstack : Stack arrays in sequence depth wise (along third dimension)\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n\n >>> np.concatenate((a, b.T), axis=1)\n array([[1., 2., 5.],\n [3., 4., 6.]])\n\n >>> np.concatenate((a, b), axis=None)\n array([1., 2., 3., 4., 5., 6.])\n \"\"\"\n return _mx_nd_np.concatenate(seq, axis=axis, out=out)\n\n\n@set_module('mxnet.numpy')\ndef append(arr, values, axis=None): # pylint: disable=redefined-outer-name\n \"\"\"\n Append values to the end of an array.\n\n Parameters\n ----------\n arr : ndarray\n Values are appended to a copy of this array.\n values : ndarray\n These values are appended to a copy of `arr`. It must be of the\n correct shape (the same shape as `arr`, excluding `axis`). If\n `axis` is not specified, `values` can be any shape and will be\n flattened before use.\n axis : int, optional\n The axis along which `values` are appended. If `axis` is not\n given, both `arr` and `values` are flattened before use.\n\n Returns\n -------\n append : ndarray\n A copy of `arr` with `values` appended to `axis`. Note that\n `append` does not occur in-place: a new array is allocated and\n filled. If `axis` is None, `out` is a flattened array.\n\n Examples\n --------\n >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))\n array([1., 2., 3., 4., 5., 6., 7., 8., 9.])\n\n When `axis` is specified, `values` must have the correct shape.\n\n >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)\n array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n \"\"\"\n return _mx_nd_np.append(arr, values, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef stack(arrays, axis=0, out=None):\n \"\"\"Join a sequence of arrays along a new axis.\n The axis parameter specifies the index of the new axis in the dimensions of the result.\n For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.\n\n Parameters\n ----------\n arrays : sequence of array_like\n Each array must have the same shape.\n axis : int, optional\n The axis in the result array along which the input arrays are stacked.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be correct,\n matching that of what stack would have returned if no out argument were specified.\n\n Returns\n -------\n stacked : ndarray\n The stacked array has one more dimension than the input arrays.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n split : Split array into a list of multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> arrays = [np.random.rand(3, 4) for _ in range(10)]\n >>> np.stack(arrays, axis=0).shape\n (10, 3, 4)\n\n >>> np.stack(arrays, axis=1).shape\n (3, 10, 4)\n\n >>> np.stack(arrays, axis=2).shape\n (3, 4, 10)\n\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.stack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> np.stack((a, b), axis=-1)\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _mx_nd_np.stack(arrays, axis=axis, out=out)\n\n\n@set_module('mxnet.numpy')\ndef vstack(arrays, out=None):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n return _mx_nd_np.vstack(arrays)\n\n\n@set_module('mxnet.numpy')\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n --------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _mx_nd_np.column_stack(tup)\n\n\n@set_module('mxnet.numpy')\ndef dstack(arrays):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n \"\"\"\n return _npi.dstack(*arrays)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef maximum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))\n array([2., 5., 4.])\n\n >>> np.maximum(np.eye(2), np.array([0.5, 2])) # broadcasting\n array([[1. , 2. ],\n [0.5, 2. ]])\n \"\"\"\n return _mx_nd_np.maximum(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef minimum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.minimum(np.array([2, 3, 4]), np.array([1, 5, 2]))\n array([1., 3., 2.])\n\n >>> np.minimum(np.eye(2), np.array([0.5, 2])) # broadcasting\n array([[0.5, 0. ],\n [0. , 1. ]])\n \"\"\"\n return _mx_nd_np.minimum(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\ndef swapaxes(a, axis1, axis2):\n \"\"\"Interchange two axes of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis1 : int\n First axis.\n axis2 : int\n Second axis.\n\n Returns\n -------\n a_swapped : ndarray\n Swapped array. This is always a copy of the input array.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> np.swapaxes(x,0,1)\n array([[1.],\n [2.],\n [3.]])\n\n >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n\n [[4., 5.],\n [6., 7.]]])\n\n >>> np.swapaxes(x,0,2)\n array([[[0., 4.],\n [2., 6.]],\n\n [[1., 5.],\n [3., 7.]]])\n \"\"\"\n return _npi.swapaxes(a, dim1=axis1, dim2=axis2)\n\n\n@set_module('mxnet.numpy')\ndef clip(a, a_min, a_max, out=None):\n \"\"\"clip(a, a_min, a_max, out=None)\n\n Clip (limit) the values in an array.\n Given an interval, values outside the interval are clipped to\n the interval edges. For example, if an interval of ``[0, 1]``\n is specified, values smaller than 0 become 0, and values larger\n than 1 become 1.\n\n Parameters\n ----------\n a : ndarray\n Array containing elements to clip.\n a_min : scalar or `None`\n Minimum value. If `None`, clipping is not performed on lower\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n a_max : scalar or `None`\n Maximum value. If `None`, clipping is not performed on upper\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n out : ndarray, optional\n The results will be placed in this array. It may be the input\n array for in-place clipping. `out` must be of the right shape\n to hold the output. Its type is preserved.\n\n Returns\n -------\n clipped_array : ndarray\n An array with the elements of `a`, but where values\n < `a_min` are replaced with `a_min`, and those > `a_max`\n with `a_max`.\n\n Notes\n -----\n array_like `a_min` and `a_max` are not supported.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> np.clip(a, 1, 8)\n array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)\n >>> np.clip(a, 3, 6, out=a)\n array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)\n \"\"\"\n return _mx_nd_np.clip(a, a_min, a_max, out=out)\n\n\n@set_module('mxnet.numpy')\ndef argmax(a, axis=None, out=None):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - Output has dtype that is same as the input ndarray.\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmax(a)\n array(5.)\n >>> np.argmax(a, axis=0)\n array([1., 1., 1.])\n >>> np.argmax(a, axis=1)\n array([2., 2.])\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0., 5., 2., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(1.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmax(a, axis=1, out=b)\n array([2., 2.])\n >>> b\n array([2., 2.])\n \"\"\"\n return _mx_nd_np.argmax(a, axis, out)\n\n\n@set_module('mxnet.numpy')\ndef argmin(a, axis=None, out=None):\n r\"\"\"\n Returns the indices of the minimum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n In case of multiple occurrences of the minimum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - Output has dtype that is same as the input ndarray.\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmin(a)\n array(0.)\n >>> np.argmin(a, axis=0)\n array([0., 0., 0.])\n >>> np.argmin(a, axis=1)\n array([0., 0.])\n\n >>> b = np.arange(6)\n >>> b[2] = 0\n >>> b\n array([0., 1., 0., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(0.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmin(a, axis=1, out=b)\n array([0., 0.])\n >>> b\n array([0., 0.])\n \"\"\"\n return _mx_nd_np.argmin(a, axis, out)\n\n\n@set_module('mxnet.numpy')\ndef mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"\n Compute the arithmetic mean along the specified axis.\n Returns the average of the array elements.\n The average is taken over the flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n ndarray containing numbers whose mean is desired.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.\n If this is a tuple of ints, a mean is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the mean. For integer inputs, the default is float32;\n for floating point inputs, it is the same as the input dtype.\n out : ndarray, optional\n Alternate output array in which to place the result. The default is None; if provided,\n it must have the same shape and type as the expected output.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result\n as dimensions with size one. With this option, the result will broadcast correctly\n against the input array.\n If the default value is passed, then keepdims will not be passed through to the mean\n method of sub-classes of ndarray, however any non-default value will be. If the sub-class\n method does not implement keepdims any exceptions will be raised.\n\n Returns\n -------\n m : ndarray, see dtype parameter above\n If out=None, returns a new array containing the mean values,\n otherwise a reference to the output array is returned.\n\n Notes\n -----\n This function differs from the original `numpy.mean\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in\n the following way(s):\n - only ndarray is accepted as valid input, python iterables or scalar is not supported\n - default data type for integer input is float32\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.mean(a)\n array(2.5)\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0,:] = 1.0\n >>> a[1,:] = 0.1\n >>> np.mean(a)\n array(0.55)\n >>> np.mean(a, dtype=np.float64)\n array(0.55)\n \"\"\"\n return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)\n\n\n\n@set_module('mxnet.numpy')\ndef std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the standard deviation along the specified axis.\n Returns the standard deviation, a measure of the spread of a distribution,\n of the array elements. The standard deviation is computed for the\n flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : array_like\n Calculate the standard deviation of these values.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed. The\n default is to compute the standard deviation of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a standard deviation is performed over\n multiple axes, instead of a single axis or all the axes as before.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the calculated\n values) will be cast if necessary.\n ddof : int, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n By default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `std` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard deviation,\n otherwise return a reference to the output array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.std(a)\n 1.1180339887498949 # may vary\n >>> np.std(a, axis=0)\n array([1., 1.])\n >>> np.std(a, axis=1)\n array([0.5, 0.5])\n In single precision, std() can be inaccurate:\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.std(a)\n array(0.45)\n >>> np.std(a, dtype=np.float64)\n array(0.45, dtype=float64)\n \"\"\"\n return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n\n@set_module('mxnet.numpy')\ndef var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the variance along the specified axis.\n Returns the variance of the array elements, a measure of the spread of a\n distribution. The variance is computed for the flattened array by\n default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose variance is desired. If `a` is not an\n array, a conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the variance is computed. The default is to\n compute the variance of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a variance is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the variance. For arrays of integer type\n the default is `float32`; for arrays of float types it is the same as\n the array type.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output, but the type is cast if\n necessary.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n ``N - ddof``, where ``N`` represents the number of elements. By\n default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `var` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n variance : ndarray, see dtype parameter above\n If ``out=None``, returns a new array containing the variance;\n otherwise, a reference to the output array is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.var(a)\n array(1.25)\n >>> np.var(a, axis=0)\n array([1., 1.])\n >>> np.var(a, axis=1)\n array([0.25, 0.25])\n\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.var(a)\n array(0.2025)\n >>> np.var(a, dtype=np.float64)\n array(0.2025, dtype=float64)\n >>> ((1-0.55)**2 + (0.1-0.55)**2)/2\n 0.2025\n \"\"\"\n return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef indices(dimensions, dtype=_np.int32, ctx=None):\n \"\"\"Return an array representing the indices of a grid.\n\n Compute an array where the subarrays contain index values 0,1,...\n varying only along the corresponding axis.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the grid.\n dtype : data-type, optional\n The desired data-type for the array. Default is `float32`.\n ctx : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.context.current_context()`.\n\n Returns\n -------\n grid : ndarray\n The array of grid indices,\n ``grid.shape = (len(dimensions),) + tuple(dimensions)``.\n\n Notes\n -----\n The output shape is obtained by prepending the number of dimensions\n in front of the tuple of dimensions, i.e. if `dimensions` is a tuple\n ``(r0, ..., rN-1)`` of length ``N``, the output shape is\n ``(N,r0,...,rN-1)``.\n\n The subarrays ``grid[k]`` contains the N-D array of indices along the\n ``k-th`` axis. Explicitly::\n\n grid[k,i0,i1,...,iN-1] = ik\n\n Examples\n --------\n >>> grid = np.indices((2, 3))\n >>> grid.shape\n (2, 2, 3)\n >>> grid[0] # row indices\n array([[0, 0, 0],\n [1, 1, 1]])\n >>> grid[1] # column indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int32)\n\n The indices can be used as an index into an array.\n\n >>> x = np.arange(20).reshape(5, 4)\n >>> row, col = np.indices((2, 3))\n >>> x[row, col]\n array([[0., 1., 2.],\n [4., 5., 6.]])\n\n Note that it would be more straightforward in the above example to\n extract the required elements directly with ``x[:2, :3]``.\n \"\"\"\n return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef copysign(x1, x2, out=None, **kwargs):\n r\"\"\"\n Change the sign of x1 to that of x2, element-wise.\n\n If `x2` is a scalar, its sign will be copied to all elements of `x1`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Values to change the sign of.\n x2 : ndarray or scalar\n The sign of `x2` is copied to `x1`.\n out : ndarray or None, optional\n A location into which the result is stored. It must be of the\n right shape and right type to hold the output. If not provided\n or `None`,a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The values of `x1` with the sign of `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -------\n This function differs from the original `numpy.copysign\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in\n the following aspects:\n\n - ``where`` param is not supported.\n\n Examples\n --------\n >>> np.copysign(1.3, -1)\n -1.3\n >>> 1/np.copysign(0, 1)\n inf\n >>> 1/np.copysign(0, -1)\n -inf\n\n >>> a = np.array([-1, 0, 1])\n >>> np.copysign(a, -1.1)\n array([-1., -0., -1.])\n >>> np.copysign(a, np.arange(3)-1)\n array([-1., 0., 1.])\n \"\"\"\n return _mx_nd_np.copysign(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\ndef ravel(x, order='C'):\n r\"\"\"\n ravel(x)\n\n Return a contiguous flattened array.\n A 1-D array, containing the elements of the input, is returned. A copy is\n made only if needed.\n\n Parameters\n ----------\n x : ndarray\n Input array. The elements in `x` are read in row-major, C-style order and\n packed as a 1-D array.\n order : `C`, optional\n Only support row-major, C-style order.\n\n Returns\n -------\n y : ndarray\n y is an array of the same subtype as `x`, with shape ``(x.size,)``.\n Note that matrices are special cased for backward compatibility, if `x`\n is a matrix, then y is a 1-D ndarray.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support row-major, C-style order.\n\n Examples\n --------\n It is equivalent to ``reshape(x, -1)``.\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6]])\n >>> print(np.ravel(x))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(x.reshape(-1))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(np.ravel(x.T))\n [1. 4. 2. 5. 3. 6.]\n \"\"\"\n return _mx_nd_np.ravel(x, order)\n\n\ndef unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name\n \"\"\"\n Converts a flat index or array of flat indices into a tuple of coordinate arrays.\n\n Parameters:\n -------------\n indices : array_like\n An integer array whose elements are indices into the flattened version of an array of dimensions shape.\n Before version 1.6.0, this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling indices.\n order : Only row-major is supported currently.\n\n Returns:\n -------------\n unraveled_coords : ndarray\n Each row in the ndarray has the same shape as the indices array.\n Each column in the ndarray represents the unravelled index\n\n Examples:\n -------------\n >>> np.unravel_index([22, 41, 37], (7,6))\n [[3. 6. 6.]\n [4. 5. 1.]]\n >>> np.unravel_index(1621, (6,7,8,9))\n [3, 1, 4, 1]\n \"\"\"\n return _mx_nd_np.unravel_index(indices, shape, order=order)\n\n\n@set_module('mxnet.numpy')\ndef hanning(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the Hanning window.\n\n The Hanning window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n blackman, hamming\n\n Notes\n -----\n The Hanning window is defined as\n\n .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hanning was named for Julius von Hann, an Austrian meteorologist.\n It is also known as the Cosine Bell. Some authors prefer that it be\n called a Hann window, to help avoid confusion with the very similar\n Hamming window.\n\n Most references to the Hanning window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 106-108.\n .. [3] Wikipedia, \"Window function\",\n http://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hanning(12)\n array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,\n 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,\n 0.07937312, 0. ])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hanning(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hann window\")\n Text(0.5, 1.0, 'Hann window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n return _mx_nd_np.hanning(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.numpy')\ndef hamming(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the hamming window.\n\n The hamming window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n blackman, hanning\n\n Notes\n -----\n The Hamming window is defined as\n\n .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hamming was named for R. W. Hamming, an associate of J. W. Tukey\n and is described in Blackman and Tukey. It was recommended for\n smoothing the truncated autocovariance function in the time domain.\n Most references to the Hamming window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 109-110.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hamming(12)\n array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,\n 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,\n 0.15302327, 0.08000001])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hamming(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"hamming window\")\n Text(0.5, 1.0, 'hamming window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n return _mx_nd_np.hamming(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.numpy')\ndef blackman(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the Blackman window.\n\n The Blackman window is a taper formed by using the first three\n terms of a summation of cosines. It was designed to have close to the\n minimal leakage possible. It is close to optimal, only slightly worse\n than a Kaiser window.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value one\n appears only if the number of samples is odd).\n\n See Also\n --------\n hamming, hanning\n\n Notes\n -----\n The Blackman window is defined as\n\n .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/{M-1}) + 0.08 \\cos(4\\pi n/{M-1})\n\n Most references to the Blackman window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function. It is known as a\n \"near optimal\" tapering function, almost as good (by some measures)\n as the kaiser window.\n\n References\n ----------\n Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,\n Dover Publications, New York.\n\n Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.\n Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.\n\n Examples\n --------\n >>> np.blackman(12)\n array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,\n 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,\n 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.blackman(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"blackman window\")\n Text(0.5, 1.0, 'blackman window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n return _mx_nd_np.blackman(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.numpy')\ndef flip(m, axis=None, out=None):\n r\"\"\"\n flip(m, axis=None, out=None)\n\n Reverse the order of elements in an array along the given axis.\n\n The shape of the array is preserved, but the elements are reordered.\n\n Parameters\n ----------\n m : ndarray or scalar\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to flip over. The default,\n axis=None, will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n out : ndarray or scalar, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n out : ndarray or scalar\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> np.flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> np.flip(A)\n array([[[7, 6],\n [5, 4]],\n [[3, 2],\n [1, 0]]])\n >>> np.flip(A, (0, 2))\n array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n \"\"\"\n return _mx_nd_np.flip(m, axis, out=out)\n\n\n@set_module('mxnet.numpy')\ndef around(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n around(x, decimals=0, out=None)\n\n Evenly round to the given number of decimals.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n decimals : int, optional\n Number of decimal places to round to (default: 0). If\n decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n rounded_array : ndarray or scalar\n An array of the same type as `x`, containing the rounded values.\n A reference to the result is returned.\n\n Notes\n -----\n For values exactly halfway between rounded decimal values, NumPy\n rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,\n -0.5 and 0.5 round to 0.0, etc.\n\n This function differs from the original numpy.prod in the following aspects:\n\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot support complex-valued number.\n\n Examples\n --------\n >>> np.around([0.37, 1.64])\n array([ 0., 2.])\n >>> np.around([0.37, 1.64], decimals=1)\n array([ 0.4, 1.6])\n >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value\n array([ 0., 2., 2., 4., 4.])\n >>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned\n array([ 1, 2, 3, 11])\n >>> np.around([1, 2, 3, 11], decimals=-1)\n array([ 0, 0, 0, 10])\n \"\"\"\n return _mx_nd_np.around(x, decimals, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef arctan2(x1, x2, out=None, **kwargs):\n r\"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n `y`-coordinates.\n x2 : ndarray or scalar\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if\n `x1` and `x2` are scalars.\n\n Notes\n -----\n *arctan2* is identical to the `atan2` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n ====== ====== ================\n `x1` `x2` `arctan2(x1,x2)`\n ====== ====== ================\n +/- 0 +0 +/- 0\n +/- 0 -0 +/- pi\n > 0 +/-inf +0 / +pi\n < 0 +/-inf -0 / -pi\n +/-inf +inf +/- (pi/4)\n +/-inf -inf +/- (3*pi/4)\n ====== ====== ================\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n\n >>> x = np.array([-1, +1, +1, -1])\n >>> y = np.array([-1, -1, +1, +1])\n >>> np.arctan2(y, x) * 180 / np.pi\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `arctan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> x = np.array([1, -1])\n >>> y = np.array([0, 0])\n >>> np.arctan2(x, y)\n array([ 1.5707964, -1.5707964])\n \"\"\"\n return _mx_nd_np.arctan2(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef hypot(x1, x2, out=None, **kwargs):\n r\"\"\"\n Given the \"legs\" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n\n Parameters\n ----------\n x1, x2 : array_like\n Leg of the triangle(s).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n z : ndarray\n The hypotenuse of the triangle(s).\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n Examples\n --------\n >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> np.hypot(3*np.ones((3, 3)), [4])\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n \"\"\"\n return _mx_nd_np.hypot(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef bitwise_xor(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_xor(13, 17)\n 28\n\n >>> np.bitwise_xor(31, 5)\n 26\n >>> np.bitwise_xor(np.array([31,3], dtype=np.int32), 5)\n array([26, 6])\n\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([26, 5])\n >>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n return _mx_nd_np.bitwise_xor(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef bitwise_or(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise OR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_or(13, 17)\n 29\n\n >>> np.bitwise_or(31, 5)\n 31\n >>> np.bitwise_or(np.array([31,3], dtype=np.int32), 5)\n array([31, 7])\n\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([31, 7])\n >>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, True])\n \"\"\"\n return _mx_nd_np.bitwise_or(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef ldexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns x1 * 2**x2, element-wise.\n The mantissas `x1` and twos exponents `x2` are used to construct\n floating point numbers ``x1 * 2**x2``.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The result of ``x1 * 2**x2``.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n Complex dtypes are not supported, they will raise a TypeError.\n Different from numpy, we allow x2 to be float besides int.\n `ldexp` is useful as the inverse of `frexp`, if used by itself it is\n more clear to simply use the expression ``x1 * 2**x2``.\n\n Examples\n --------\n >>> np.ldexp(5, np.arange(4))\n array([ 5., 10., 20., 40.])\n \"\"\"\n return _mx_nd_np.ldexp(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef inner(a, b):\n r\"\"\"Inner product of two arrays.\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : ndarray\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n np.inner(a, b) = sum(a[:]*b[:])\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n or explicitly::\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n In addition `a` or `b` may be scalars, in which case::\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n array(2.)\n\n A multidimensional example:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14., 38., 62.],\n [ 86., 110., 134.]])\n \"\"\"\n return tensordot(a, b, [-1, -1])\n\n\n@set_module('mxnet.numpy')\ndef outer(a, b):\n r\"\"\"Compute the outer product of two vectors.\n Given two vectors, ``a = [a0, a1, ..., aM]`` and\n ``b = [b0, b1, ..., bN]``,\n the outer product [1]_ is::\n [[a0*b0 a0*b1 ... a0*bN ]\n [a1*b0 .\n [ ... .\n [aM*b0 aM*bN ]]\n\n Parameters\n ----------\n a : (M,) ndarray\n First input vector. Input is flattened if\n not already 1-dimensional.\n b : (N,) ndarray\n Second input vector. Input is flattened if\n not already 1-dimensional.\n\n Returns\n -------\n out : (M, N) ndarray\n ``out[i, j] = a[i] * b[j]``\n\n See also\n --------\n inner\n einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.\n ufunc.outer : A generalization to N dimensions and other operations.\n ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.\n\n References\n ----------\n .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd\n ed., Baltimore, MD, Johns Hopkins University Press, 1996,\n pg. 8.\n\n Examples\n --------\n Make a (*very* coarse) grid for computing a Mandelbrot set:\n\n >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))\n >>> rl\n array([[-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.]])\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 0)\n\n\n@set_module('mxnet.numpy')\ndef vdot(a, b):\n r\"\"\"\n Return the dot product of two vectors.\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : ndarray\n First argument to the dot product.\n b : ndarray\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n Note that higher-dimensional arrays are flattened!\n\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n array(30.)\n >>> np.vdot(b, a)\n array(30.)\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 1)\n\n\n@set_module('mxnet.numpy')\ndef equal(x1, x2, out=None):\n \"\"\"\n Return (x1 == x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n Examples\n --------\n >>> np.equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.equal(1, np.ones(1))\n array([ True])\n \"\"\"\n return _mx_nd_np.equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef not_equal(x1, x2, out=None):\n \"\"\"\n Return (x1 != x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.not_equal(1, np.ones(1))\n array([False])\n \"\"\"\n return _mx_nd_np.not_equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef greater(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 > x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater(1, np.ones(1))\n array([False])\n \"\"\"\n return _mx_nd_np.greater(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef less(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 < x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.less(1, np.ones(1))\n array([False])\n \"\"\"\n return _mx_nd_np.less(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef greater_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 >= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _mx_nd_np.greater_equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef less_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 <= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.less_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _mx_nd_np.less_equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef rot90(m, k=1, axes=(0, 1)):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by axes.\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : ndarray\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes: (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n Notes\n -----\n rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))\n rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], 'int')\n >>> m\n array([[1, 2],\n [3, 4]], dtype=int64)\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]], dtype=int64)\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]], dtype=int64)\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1., 3.],\n [0., 2.]],\n\n [[5., 7.],\n [4., 6.]]])\n \"\"\"\n return _mx_nd_np.rot90(m, k=k, axes=axes)\n\n\n@set_module('mxnet.numpy')\ndef hsplit(ary, indices_or_sections):\n \"\"\"Split an array into multiple sub-arrays horizontally (column-wise).\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int, list of ints or tuple of ints.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a list of sorted integers, the entries\n indicate where along `axis` the array is split.\n If an index exceeds the dimension of the array along `axis`,\n it will raises errors. so index must less than or euqal to\n the dimension of the array along axis.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Notes\n ------\n - If `indices_or_sections` is given as an integer, but a split\n does not result in equal division.It will raises ValueErrors.\n - If indices_or_sections is an integer, and the number is 1, it will\n raises an error. Because single output from split is not supported yet...\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, [3, 6])\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float32)]\n With a higher dimensional array the split is still along the second axis.\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n If ``ary`` has one dimension, 'axis' = 0.\n >>> x = np.arange(4)\n array([0., 1., 2., 3.])\n >>> np.hsplit(x, 2)\n [array([0., 1.]), array([2., 3.])]\n If you want to produce an empty sub-array, you can see an example.\n >>> np.hsplit(x, [2, 2])\n [array([0., 1.]), array([], dtype=float32), array([2., 3.])]\n \"\"\"\n return _mx_nd_np.hsplit(ary, indices_or_sections)\n\n\n@set_module('mxnet.numpy')\ndef einsum(*operands, **kwargs):\n r\"\"\"\n einsum(subscripts, *operands, out=None, optimize=False)\n\n Evaluates the Einstein summation convention on the operands.\n\n Using the Einstein summation convention, many common multi-dimensional,\n linear algebraic array operations can be represented in a simple fashion.\n In *implicit* mode `einsum` computes these values.\n\n In *explicit* mode, `einsum` provides further flexibility to compute\n other array operations that might not be considered classical Einstein\n summation operations, by disabling, or forcing summation over specified\n subscript labels.\n\n See the notes and examples for clarification.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation as comma separated list of\n subscript labels. An implicit (classical Einstein summation)\n calculation is performed unless the explicit indicator '->' is\n included as well as subscript labels of the precise output form.\n operands : list of ndarray\n These are the arrays for the operation.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n optimize : {False, True}, optional\n Controls if intermediate optimization should occur. No optimization\n will occur if False. Defaults to False.\n\n Returns\n -------\n output : ndarray\n The calculation based on the Einstein summation convention.\n\n Notes\n -----\n The Einstein summation convention can be used to compute\n many multi-dimensional, linear algebraic array operations. `einsum`\n provides a succinct way of representing these.\n\n A non-exhaustive list of these operations,\n which can be computed by `einsum`, is shown below along with examples:\n\n * Trace of an array, :py:func:`np.trace`.\n * Return a diagonal, :py:func:`np.diag`.\n * Array axis summations, :py:func:`np.sum`.\n * Transpositions and permutations, :py:func:`np.transpose`.\n * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.\n * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.\n * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.\n * Tensor contractions, :py:func:`np.tensordot`.\n\n The subscripts string is a comma-separated list of subscript labels,\n where each label refers to a dimension of the corresponding operand.\n Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``\n is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label\n appears only once, it is not summed, so ``np.einsum('i', a)`` produces a\n view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``\n describes traditional matrix multiplication and is equivalent to\n :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one\n operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent\n to :py:func:`np.trace(a) <np.trace>`.\n\n In *implicit mode*, the chosen subscripts are important\n since the axes of the output are reordered alphabetically. This\n means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while\n ``np.einsum('ji', a)`` takes its transpose. Additionally,\n ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,\n ``np.einsum('ij,jh', a, b)`` returns the transpose of the\n multiplication since subscript 'h' precedes subscript 'i'.\n\n In *explicit mode* the output can be directly controlled by\n specifying output subscript labels. This requires the\n identifier '->' as well as the list of output subscript labels.\n This feature increases the flexibility of the function since\n summing can be disabled or forced when required. The call\n ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,\n and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.\n The difference is that `einsum` does not allow broadcasting by default.\n Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the\n order of the output subscript labels and therefore returns matrix\n multiplication, unlike the example above in implicit mode.\n\n To enable and control broadcasting, use an ellipsis. Default\n NumPy-style broadcasting is done by adding an ellipsis\n to the left of each term, like ``np.einsum('...ii->...i', a)``.\n To take the trace along the first and last axes,\n you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix\n product with the left-most indices instead of rightmost, one can do\n ``np.einsum('ij...,jk...->ik...', a, b)``.\n\n When there is only one operand, no axes are summed, and no output\n parameter is provided, a view into the operand is returned instead\n of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``\n produces a view.\n\n The ``optimize`` argument which will optimize the contraction order\n of an einsum expression. For a contraction with three or more operands this\n can greatly increase the computational efficiency at the cost of a larger\n memory footprint during computation.\n\n Typically a 'greedy' algorithm is applied which empirical tests have shown\n returns the optimal path in the majority of cases. 'optimal' is not supported\n for now.\n\n This function differs from the original `numpy.einsum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in\n the following way(s):\n\n - Does not support 'optimal' strategy\n - Does not support the alternative subscript like\n `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`\n - Does not produce view in any cases\n\n Examples\n --------\n >>> a = np.arange(25).reshape(5,5)\n >>> b = np.arange(5)\n >>> c = np.arange(6).reshape(2,3)\n\n Trace of a matrix:\n\n >>> np.einsum('ii', a)\n array(60.)\n\n Extract the diagonal (requires explicit form):\n\n >>> np.einsum('ii->i', a)\n array([ 0., 6., 12., 18., 24.])\n\n Sum over an axis (requires explicit form):\n\n >>> np.einsum('ij->i', a)\n array([ 10., 35., 60., 85., 110.])\n >>> np.sum(a, axis=1)\n array([ 10., 35., 60., 85., 110.])\n\n For higher dimensional arrays summing a single axis can be done with ellipsis:\n\n >>> np.einsum('...j->...', a)\n array([ 10., 35., 60., 85., 110.])\n\n Compute a matrix transpose, or reorder any number of axes:\n\n >>> np.einsum('ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.einsum('ij->ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.transpose(c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n\n Vector inner products:\n\n >>> np.einsum('i,i', b, b)\n array(30.)\n\n Matrix vector multiplication:\n\n >>> np.einsum('ij,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.dot(a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.einsum('...j,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n\n Broadcasting and scalar multiplication:\n\n >>> np.einsum('..., ...', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.einsum(',ij', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.multiply(3, c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n\n Vector outer product:\n\n >>> np.einsum('i,j', np.arange(2)+1, b)\n array([[0., 1., 2., 3., 4.],\n [0., 2., 4., 6., 8.]])\n\n Tensor contraction:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> np.einsum('ijk,jil->kl', a, b)\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n Example of ellipsis use:\n\n >>> a = np.arange(6).reshape((3,2))\n >>> b = np.arange(12).reshape((4,3))\n >>> np.einsum('ki,jk->ij', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('ki,...k->i...', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('k...,jk', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n\n Chained array operations. For more complicated contractions, speed ups\n might be achieved by repeatedly computing a 'greedy' path. Performance\n improvements can be particularly significant with larger arrays:\n\n >>> a = np.ones(64).reshape(2,4,8)\n # Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)\n # Greedy `einsum` (faster optimal path approximation): ~0.117ms\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)\n \"\"\"\n return _mx_nd_np.einsum(*operands, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef nonzero(a):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of `a`,\n containing the indices of the non-zero elements in that\n dimension. The values in `a` are always returned in\n row-major, C-style order.\n\n To group the indices by element, rather than dimension, use `argwhere`,\n which returns a row for each non-zero element.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n ndarray.nonzero :\n Equivalent ndarray method.\n\n Notes\n -----\n While the nonzero values can be obtained with ``a[nonzero(a)]``, it is\n recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which\n will correctly handle 0-d arrays.\n\n Examples\n --------\n >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])\n >>> x\n array([[3, 0, 0],\n [0, 4, 0],\n [5, 6, 0]], dtype=int32)\n >>> np.nonzero(x)\n (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))\n\n >>> x[np.nonzero(x)]\n array([3, 4, 5, 6])\n >>> np.transpose(np.stack(np.nonzero(x)))\n array([[0, 0],\n [1, 1],\n [2, 0],\n [2, 1]], dtype=int64)\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, np.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)\n >>> a > 3\n array([[False, False, False],\n [ True, True, True],\n [ True, True, True]])\n >>> np.nonzero(a > 3)\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n\n Using this result to index `a` is equivalent to using the mask directly:\n\n >>> a[np.nonzero(a > 3)]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n >>> a[a > 3]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n\n ``nonzero`` can also be called as a method of the array.\n\n >>> (a > 3).nonzero()\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n \"\"\"\n return _mx_nd_np.nonzero(a)\n\n\n@set_module('mxnet.numpy')\ndef shares_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n This function differs from the original `numpy.shares_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `may_share_memory` in MXNet DeepNumPy\n \"\"\"\n return _mx_nd_np.shares_memory(a, b, max_work)\n\n\n@set_module('mxnet.numpy')\ndef may_share_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n This function differs from the original `numpy.may_share_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `shares_memory` in MXNet DeepNumPy\n \"\"\"\n return _mx_nd_np.may_share_memory(a, b, max_work)\n\n\n@set_module('mxnet.numpy')\ndef diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n This is the same as the type of a in most cases.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n \"\"\"\n if (prepend or append):\n raise NotImplementedError('prepend and append options are not supported yet')\n return _mx_nd_np.diff(a, n=n, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef resize(a, new_shape):\n \"\"\"\n Return a new array with the specified shape.\n If the new array is larger than the original array, then the new\n array is filled with repeated copies of `a`. Note that this behavior\n is different from a.resize(new_shape) which fills with zeros instead\n of repeated copies of `a`.\n\n Parameters\n ----------\n a : ndarray\n Array to be resized.\n new_shape : int or tuple of int\n Shape of resized array.\n\n Returns\n -------\n reshaped_array : ndarray\n The new array is formed from the data in the old array, repeated\n if necessary to fill out the required number of elements. The\n data are repeated in the order that they are stored in memory.\n\n See Also\n --------\n ndarray.resize : resize an array in-place.\n\n Notes\n -----\n Warning: This functionality does **not** consider axes separately,\n i.e. it does not apply interpolation/extrapolation.\n It fills the return array with the required number of elements, taken\n from `a` as they are laid out in memory, disregarding strides and axes.\n (This is in case the new shape is smaller. For larger, see above.)\n This functionality is therefore not suitable to resize images,\n or data where each axis represents a separate and distinct entity.\n\n Examples\n --------\n >>> a = np.array([[0, 1], [2, 3]])\n >>> np.resize(a, (2, 3))\n array([[0., 1., 2.],\n [3., 0., 1.]])\n >>> np.resize(a, (1, 4))\n array([[0., 1., 2., 3.]])\n >>> np.resize(a,(2, 4))\n array([[0., 1., 2., 3.],\n [0., 1., 2., 3.]])\n \"\"\"\n return _mx_nd_np.resize(a, new_shape)\n\n\n@set_module('mxnet.numpy')\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):\n \"\"\"\n Replace NaN with zero and infinity with large finite numbers (default\n behaviour) or with the numbers defined by the user using the `nan`,\n `posinf` and/or `neginf` keywords.\n\n If `x` is inexact, NaN is replaced by zero or by the user defined value in\n `nan` keyword, infinity is replaced by the largest finite floating point\n values representable by ``x.dtype`` or by the user defined value in\n `posinf` keyword and -infinity is replaced by the most negative finite\n floating point values representable by ``x.dtype`` or by the user defined\n value in `neginf` keyword.\n\n For complex dtypes, the above is applied to each of the real and\n imaginary components of `x` separately.\n\n If `x` is not inexact, then no replacements are made.\n\n Parameters\n ----------\n x : scalar\n ndarray\n Input data.\n copy : bool, optional\n Whether to create a copy of `x` (True) or to replace values\n in-place (False). The in-place operation only occurs if\n casting to an array does not require a copy.\n Default is True.\n Gluon does not support copy = False.\n nan : int, float, optional\n Value to be used to fill NaN values. If no value is passed\n then NaN values will be replaced with 0.0.\n posinf : int, float, optional\n Value to be used to fill positive infinity values. If no value is\n passed then positive infinity values will be replaced with a very\n large number.\n neginf : int, float, optional\n Value to be used to fill negative infinity values. If no value is\n passed then negative infinity values will be replaced with a very\n small (or negative) number.\n\n .. versionadded:: 1.13\n\n Returns\n -------\n out : ndarray\n `x`, with the non-finite values replaced. If `copy` is False, this may\n be `x` itself.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.nan_to_num(np.inf)\n 1.7976931348623157e+308\n >>> np.nan_to_num(-np.inf)\n -1.7976931348623157e+308\n >>> np.nan_to_num(np.nan)\n 0.0\n >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])\n >>> np.nan_to_num(x)\n array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,\n 1.2800000e+02])\n >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)\n array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,\n 1.2800000e+02])\n >>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype=\"float64\")/0\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y)\n array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],\n [ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)\n >>> np.nan_to_num(y, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n \"\"\"\n return _mx_nd_np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)\n\n\n@set_module('mxnet.numpy')\ndef where(condition, x=None, y=None):\n \"\"\"where(condition, [x, y])\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. The rest of this documentation\n covers only the case where all three arguments are provided.\n\n Parameters\n ----------\n condition : ndarray\n Where True, yield `x`, otherwise yield `y`.\n x, y : ndarray\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape. `x` and `y` must have the same dtype.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.where(a < 5, a, 10*a)\n array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])\n\n This can be used on multidimensional arrays too:\n\n >>> cond = np.array([[True, False], [True, True]])\n >>> x = np.array([[1, 2], [3, 4]])\n >>> y = np.array([[9, 8], [7, 6]])\n >>> np.where(cond, x, y)\n array([[1., 8.],\n [3., 4.]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = onp.ogrid[:3, :4]\n >>> x = np.array(x)\n >>> y = np.array(y)\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]], dtype=int64)\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, np.array(-1)) # -1 is broadcast\n array([[ 0., 1., 2.],\n [ 0., 2., -1.],\n [ 0., 3., -1.]])\n \"\"\"\n return _mx_nd_np.where(condition, x, y)\n"
]
| [
[
"numpy.array",
"numpy.dtype"
]
]
|
VITA-Group/Audio-Lottery | [
"98ed7b34858469b5c8594c92c45f0602e14c1806"
]
| [
"CNN_LSTM/deepspeech_pytorch/prune.py"
]
| [
"import json\nimport os\nimport random\nimport time\nimport logging\nimport copy\nimport glob\n\nimport numpy as np\nimport torch.distributed as dist\nimport torch.utils.data.distributed\nfrom apex import amp\nfrom hydra.utils import to_absolute_path\nfrom omegaconf import OmegaConf\nfrom torch.nn.parallel import DistributedDataParallel\nfrom warpctc_pytorch import CTCLoss\n\nfrom deepspeech_pytorch.checkpoint import FileCheckpointHandler, GCSCheckpointHandler\nfrom deepspeech_pytorch.configs.train_config import SGDConfig, AdamConfig, BiDirectionalConfig, UniDirectionalConfig, \\\n FileCheckpointConfig, GCSCheckpointConfig\nfrom deepspeech_pytorch.decoder import GreedyDecoder\nfrom deepspeech_pytorch.loader.data_loader import SpectrogramDataset, DSRandomSampler, DSElasticDistributedSampler, \\\n AudioDataLoader\nfrom deepspeech_pytorch.logger import VisdomLogger, TensorBoardLogger\nfrom deepspeech_pytorch.model import DeepSpeech, supported_rnns\nfrom deepspeech_pytorch.state import TrainingState\nfrom deepspeech_pytorch.testing import run_evaluation\nfrom deepspeech_pytorch.utils import check_loss\nfrom deepspeech_pytorch.pruning_utils import prune_main, rewind, load_winning_ticket, load_winning_ticket_with_random_init\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef prune_train(cfg):\n # Set seeds for determinism\n torch.manual_seed(cfg.training.seed)\n torch.cuda.manual_seed_all(cfg.training.seed)\n np.random.seed(cfg.training.seed)\n random.seed(cfg.training.seed)\n\n main_proc = True\n device = torch.device(\"cpu\" if cfg.training.no_cuda else \"cuda\")\n\n is_distributed = os.environ.get(\"LOCAL_RANK\") # If local rank exists, distributed env\n\n if is_distributed:\n # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops\n # because NCCL uses a spin-lock on the device. Set this env var and\n # to enable a watchdog thread that will destroy stale NCCL communicators\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n\n device_id = int(os.environ[\"LOCAL_RANK\"])\n torch.cuda.set_device(device_id)\n print(f\"Setting CUDA Device to {device_id}\")\n\n dist.init_process_group(backend=cfg.training.dist_backend.value)\n main_proc = device_id == 0 # Main process handles saving of models and reporting\n\n if OmegaConf.get_type(cfg.checkpointing) == FileCheckpointConfig:\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n elif OmegaConf.get_type(cfg.checkpointing) == GCSCheckpointConfig:\n checkpoint_handler = GCSCheckpointHandler(cfg=cfg.checkpointing)\n else:\n raise ValueError(\"Checkpoint Config has not been specified correctly.\")\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger = VisdomLogger(id=cfg.visualization.id,\n num_epochs=cfg.training.epochs)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger = TensorBoardLogger(id=cfg.visualization.id,\n log_dir=to_absolute_path(cfg.visualization.log_dir),\n log_params=cfg.visualization.log_params)\n\n if cfg.checkpointing.load_auto_checkpoint:\n latest_checkpoint = checkpoint_handler.find_latest_checkpoint()\n if latest_checkpoint:\n cfg.checkpointing.continue_from = latest_checkpoint\n\n if cfg.checkpointing.continue_from: # Starting from previous model\n original_model_path = os.path.join(cfg.checkpointing.continue_from, 'prune_0',\n 'deepspeech_checkpoint_epoch_0.pth')\n original_state = TrainingState.load_state(state_path=to_absolute_path(original_model_path))\n original_model_state_dict = original_state.model.state_dict()\n original_optimizer_state_dict = original_state.optim_state\n\n prune_list = sorted(glob.glob(to_absolute_path(os.path.join(cfg.checkpointing.continue_from, '*'))),\n key=lambda x: int(os.path.basename(x).split('_')[-1]))\n \n prev_path = os.path.join(prune_list[-2], 'deepspeech_final.pth')\n start_pt = int(os.path.basename(prune_list[-2]).split('_')[-1]) + 1\n state = TrainingState.load_state(state_path=to_absolute_path(prev_path))\n model = state.model\n\n if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph\n visdom_logger.load_previous_values(state.epoch, state.results)\n if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs\n tensorboard_logger.load_previous_values(state.epoch, state.results)\n else:\n # Initialise new model training\n with open(to_absolute_path(cfg.data.labels_path)) as label_file:\n labels = json.load(label_file)\n\n if OmegaConf.get_type(cfg.model) is BiDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=True)\n elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=False,\n context=cfg.model.lookahead_context)\n else:\n raise ValueError(\"Model Config has not been specified correctly.\")\n\n state = TrainingState(model=model)\n state.init_results_tracking(epochs=cfg.training.epochs)\n\n start_pt = 0\n\n # Data setup\n evaluation_decoder = GreedyDecoder(model.labels) # Decoder used for validation\n train_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.train_manifest),\n labels=model.labels,\n normalize=True,\n augmentation_conf=cfg.data.augmentation)\n test_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.val_manifest),\n labels=model.labels,\n normalize=True)\n if not is_distributed:\n train_sampler = DSRandomSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n else:\n train_sampler = DSElasticDistributedSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n train_loader = AudioDataLoader(dataset=train_dataset,\n num_workers=cfg.data.num_workers,\n batch_sampler=train_sampler)\n test_loader = AudioDataLoader(dataset=test_dataset,\n num_workers=cfg.data.num_workers,\n batch_size=cfg.data.batch_size)\n\n model = model.to(device)\n if state.mask_dict is not None:\n for key in state.mask_dict:\n state.mask_dict[key] = state.mask_dict[key].to(device)\n\n parameters = model.parameters()\n if OmegaConf.get_type(cfg.optim) is SGDConfig:\n optimizer = torch.optim.SGD(parameters,\n lr=cfg.optim.learning_rate,\n momentum=cfg.optim.momentum,\n nesterov=True,\n weight_decay=cfg.optim.weight_decay)\n elif OmegaConf.get_type(cfg.optim) is AdamConfig:\n optimizer = torch.optim.AdamW(parameters,\n lr=cfg.optim.learning_rate,\n betas=cfg.optim.betas,\n eps=cfg.optim.eps,\n weight_decay=cfg.optim.weight_decay)\n else:\n raise ValueError(\"Optimizer has not been specified correctly.\")\n\n model, optimizer = amp.initialize(model, optimizer,\n enabled=not cfg.training.no_cuda,\n opt_level=cfg.apex.opt_level,\n loss_scale=cfg.apex.loss_scale)\n if state.optim_state is not None:\n optimizer.load_state_dict(state.optim_state)\n if state.amp_state is not None:\n amp.load_state_dict(state.amp_state)\n\n # Track states for optimizer/amp\n state.track_optim_state(optimizer)\n if not cfg.training.no_cuda:\n state.track_amp_state(amp)\n\n if is_distributed:\n model = DistributedDataParallel(model, device_ids=[device_id])\n logging.info(model)\n logging.info(\"Number of parameters: %d\" % DeepSpeech.get_param_size(model))\n logging.info(\"Start from the %d -th pruneing\" % start_pt)\n\n criterion = CTCLoss()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n checkpoint_folder = os.path.abspath(cfg.checkpointing.save_folder)\n \n wers = []\n\n for pt in range(start_pt, cfg.training.prune_times + 1):\n cfg.checkpointing.save_folder = os.path.join(checkpoint_folder, 'prune_{}'.format(pt))\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n if main_proc and pt == 0:\n checkpoint_handler.save_checkpoint_model(epoch=-1, state=state)\n original_model_state_dict = copy.deepcopy(model.state_dict())\n original_optimizer_state_dict = copy.deepcopy(optimizer.state_dict())\n if pt > 0:\n state.mask_dict = prune_main(model, cfg.training.prune_percentage,\n supported_rnns[cfg.model.rnn_type.value],\n original_model_state_dict, random_prune=cfg.training.random_prune)\n optimizer.load_state_dict(original_optimizer_state_dict)\n \n state.set_best_wer(100.0)\n \n for epoch in range(cfg.training.epochs):\n model.train()\n end = time.time()\n start_epoch_time = time.time()\n state.set_epoch(epoch=epoch)\n train_sampler.set_epoch(epoch=epoch)\n train_sampler.reset_training_step(training_step=state.training_step)\n for i, (data) in enumerate(train_loader, start=state.training_step):\n state.set_training_step(training_step=i)\n inputs, targets, input_percentages, target_sizes = data\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n # measure data loading time\n data_time.update(time.time() - end)\n inputs = inputs.to(device)\n\n out, output_sizes = model(inputs, input_sizes)\n out = out.transpose(0, 1) # TxNxH\n\n float_out = out.float() # ensure float32 for loss\n loss = criterion(float_out, targets, output_sizes, target_sizes).to(device)\n loss = loss / inputs.size(0) # average the loss by minibatch\n loss_value = loss.item()\n\n # Check to ensure valid loss was calculated\n valid_loss, error = check_loss(loss, loss_value)\n if valid_loss:\n optimizer.zero_grad()\n\n # compute gradient\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optim.max_norm)\n optimizer.step()\n else:\n logging.info(error)\n logging.info('Skipping grad update')\n loss_value = 0\n\n state.avg_loss += loss_value\n losses.update(loss_value, inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if (i + 1) % 100 == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses))\n\n if main_proc and cfg.checkpointing.checkpoint_per_iteration:\n checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state)\n del loss, out, float_out\n\n state.avg_loss /= len(train_dataset)\n\n epoch_time = time.time() - start_epoch_time\n logging.info('Training Summary Epoch: [{0}]\\t'\n 'Time taken (s): {epoch_time:.0f}\\t'\n 'Average Loss {loss:.3f}\\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss))\n\n with torch.no_grad():\n wer, cer, output_data = run_evaluation(test_loader=test_loader,\n device=device,\n model=model,\n decoder=evaluation_decoder,\n target_decoder=evaluation_decoder)\n\n state.add_results(epoch=epoch,\n loss_result=state.avg_loss,\n wer_result=wer,\n cer_result=cer)\n\n logging.info('Validation Summary Epoch: [{0}]\\t'\n 'Average WER {wer:.3f}\\t'\n 'Average CER {cer:.3f}\\t'.format(epoch + 1, wer=wer, cer=cer))\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger.update(epoch, state.result_state)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger.update(epoch, state.result_state, model.named_parameters())\n\n if main_proc and cfg.checkpointing.checkpoint and epoch == cfg.training.epochs - 1: # Save epoch checkpoint\n checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state)\n # anneal lr\n for g in optimizer.param_groups:\n g['lr'] = g['lr'] / cfg.optim.learning_anneal\n logging.info('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))\n\n if main_proc and (state.best_wer is None or state.best_wer > wer):\n checkpoint_handler.save_best_model(epoch=epoch, state=state)\n state.set_best_wer(wer)\n state.reset_avg_loss()\n state.reset_training_step() # Reset training step for next epoch\n wers.append((pt, state.best_wer))\n logging.info(wers)\n\n \ndef transfer_train(cfg):\n # Set seeds for determinism\n torch.manual_seed(cfg.training.seed)\n torch.cuda.manual_seed_all(cfg.training.seed)\n np.random.seed(cfg.training.seed)\n random.seed(cfg.training.seed)\n\n main_proc = True\n device = torch.device(\"cpu\" if cfg.training.no_cuda else \"cuda\")\n\n is_distributed = os.environ.get(\"LOCAL_RANK\") # If local rank exists, distributed env\n\n if is_distributed:\n # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops\n # because NCCL uses a spin-lock on the device. Set this env var and\n # to enable a watchdog thread that will destroy stale NCCL communicators\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n\n device_id = int(os.environ[\"LOCAL_RANK\"])\n torch.cuda.set_device(device_id)\n print(f\"Setting CUDA Device to {device_id}\")\n\n dist.init_process_group(backend=cfg.training.dist_backend.value)\n main_proc = device_id == 0 # Main process handles saving of models and reporting\n\n if OmegaConf.get_type(cfg.checkpointing) == FileCheckpointConfig:\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n elif OmegaConf.get_type(cfg.checkpointing) == GCSCheckpointConfig:\n checkpoint_handler = GCSCheckpointHandler(cfg=cfg.checkpointing)\n else:\n raise ValueError(\"Checkpoint Config has not been specified correctly.\")\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger = VisdomLogger(id=cfg.visualization.id,\n num_epochs=cfg.training.epochs)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger = TensorBoardLogger(id=cfg.visualization.id,\n log_dir=to_absolute_path(cfg.visualization.log_dir),\n log_params=cfg.visualization.log_params)\n\n if cfg.checkpointing.load_auto_checkpoint:\n latest_checkpoint = checkpoint_handler.find_latest_checkpoint()\n if latest_checkpoint:\n cfg.checkpointing.continue_from = latest_checkpoint\n\n if cfg.checkpointing.continue_from: # Starting from previous model\n checkpoint_list = sorted(glob.glob(to_absolute_path(os.path.join(cfg.checkpointing.continue_from, '*'))),\n key=lambda x: int(os.path.basename(x).split('_')[-1]))\n \n start_pt = int(os.path.basename(checkpoint_list[-1]).split('_')[-1]) + 1\n # Initialise new model training\n with open(to_absolute_path(cfg.data.labels_path)) as label_file:\n labels = json.load(label_file)\n\n if OmegaConf.get_type(cfg.model) is BiDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=True)\n elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=False,\n context=cfg.model.lookahead_context)\n else:\n raise ValueError(\"Model Config has not been specified correctly.\")\n\n if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph\n visdom_logger.load_previous_values(state.epoch, state.results)\n if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs\n tensorboard_logger.load_previous_values(state.epoch, state.results)\n state = TrainingState(model=model)\n state.init_results_tracking(epochs=cfg.training.epochs)\n else:\n # Initialise new model training\n with open(to_absolute_path(cfg.data.labels_path)) as label_file:\n labels = json.load(label_file)\n\n if OmegaConf.get_type(cfg.model) is BiDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=True)\n elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=False,\n context=cfg.model.lookahead_context)\n else:\n raise ValueError(\"Model Config has not been specified correctly.\")\n\n state = TrainingState(model=model)\n state.init_results_tracking(epochs=cfg.training.epochs)\n\n start_pt = 1\n\n # Load saved models during pruning, used to extract winning tickets\n if cfg.checkpointing.pretrained_model:\n prune_list = sorted(glob.glob(to_absolute_path(os.path.join(cfg.checkpointing.pretrained_model, '*'))),\n key=lambda x: int(os.path.basename(x).split('_')[-1]))\n original_model_path = os.path.join(cfg.checkpointing.pretrained_model, 'prune_0',\n 'deepspeech_checkpoint_epoch_0.pth')\n original_state = TrainingState.load_state(state_path=to_absolute_path(original_model_path))\n original_model_state_dict = original_state.model.state_dict()\n original_optimizer_state_dict = original_state.optim_state\n \n # Data setup\n evaluation_decoder = GreedyDecoder(model.labels) # Decoder used for validation\n train_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.train_manifest),\n labels=model.labels,\n normalize=True,\n augmentation_conf=cfg.data.augmentation)\n test_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.val_manifest),\n labels=model.labels,\n normalize=True)\n if not is_distributed:\n train_sampler = DSRandomSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n else:\n train_sampler = DSElasticDistributedSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n train_loader = AudioDataLoader(dataset=train_dataset,\n num_workers=cfg.data.num_workers,\n batch_sampler=train_sampler)\n test_loader = AudioDataLoader(dataset=test_dataset,\n num_workers=cfg.data.num_workers,\n batch_size=cfg.data.batch_size)\n\n model = model.to(device)\n parameters = model.parameters()\n if OmegaConf.get_type(cfg.optim) is SGDConfig:\n optimizer = torch.optim.SGD(parameters,\n lr=cfg.optim.learning_rate,\n momentum=cfg.optim.momentum,\n nesterov=True,\n weight_decay=cfg.optim.weight_decay)\n elif OmegaConf.get_type(cfg.optim) is AdamConfig:\n optimizer = torch.optim.AdamW(parameters,\n lr=cfg.optim.learning_rate,\n betas=cfg.optim.betas,\n eps=cfg.optim.eps,\n weight_decay=cfg.optim.weight_decay)\n else:\n raise ValueError(\"Optimizer has not been specified correctly.\")\n\n model, optimizer = amp.initialize(model, optimizer,\n enabled=not cfg.training.no_cuda,\n opt_level=cfg.apex.opt_level,\n loss_scale=cfg.apex.loss_scale)\n if state.optim_state is not None:\n optimizer.load_state_dict(state.optim_state)\n if state.amp_state is not None:\n amp.load_state_dict(state.amp_state)\n\n # Track states for optimizer/amp\n state.track_optim_state(optimizer)\n if not cfg.training.no_cuda:\n state.track_amp_state(amp)\n\n if is_distributed:\n model = DistributedDataParallel(model, device_ids=[device_id])\n logging.info(model)\n logging.info(\"Number of parameters: %d\" % DeepSpeech.get_param_size(model))\n logging.info(\"Start from the %d -th pruneing\" % start_pt)\n\n criterion = CTCLoss()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n checkpoint_folder = os.path.abspath(cfg.checkpointing.save_folder)\n \n wers = []\n\n for pt in range(start_pt, cfg.training.prune_times + 1):\n cfg.checkpointing.save_folder = os.path.join(checkpoint_folder, 'prune_{}'.format(pt))\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n \n prune_state_path = os.path.join(prune_list[pt], 'deepspeech_final.pth')\n prune_state = TrainingState.load_state(state_path=to_absolute_path(prune_state_path))\n if prune_state.mask_dict is not None:\n for key in prune_state.mask_dict:\n prune_state.mask_dict[key] = prune_state.mask_dict[key].to(device)\n \n # model = prune_state.model.to(device)\n remove_p = False if pt == start_pt else True\n # print(remove_p)\n load_winning_ticket(model, prune_state.mask_dict, supported_rnns[cfg.model.rnn_type.value], original_model_state_dict, remove_p=remove_p)\n optimizer.load_state_dict(original_optimizer_state_dict)\n \n state.set_best_wer(100.0)\n \n for epoch in range(cfg.training.epochs):\n #if epoch > 0:\n # checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state)\n # break\n model.train()\n end = time.time()\n start_epoch_time = time.time()\n state.set_epoch(epoch=epoch)\n train_sampler.set_epoch(epoch=epoch)\n train_sampler.reset_training_step(training_step=state.training_step)\n for i, (data) in enumerate(train_loader, start=state.training_step):\n #if i > 0:\n # break\n state.set_training_step(training_step=i)\n inputs, targets, input_percentages, target_sizes = data\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n # measure data loading time\n data_time.update(time.time() - end)\n inputs = inputs.to(device)\n\n out, output_sizes = model(inputs, input_sizes)\n out = out.transpose(0, 1) # TxNxH\n\n float_out = out.float() # ensure float32 for loss\n loss = criterion(float_out, targets, output_sizes, target_sizes).to(device)\n loss = loss / inputs.size(0) # average the loss by minibatch\n loss_value = loss.item()\n\n # Check to ensure valid loss was calculated\n valid_loss, error = check_loss(loss, loss_value)\n if valid_loss:\n optimizer.zero_grad()\n\n # compute gradient\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optim.max_norm)\n optimizer.step()\n else:\n logging.info(error)\n logging.info('Skipping grad update')\n loss_value = 0\n\n state.avg_loss += loss_value\n losses.update(loss_value, inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if (i + 1) % 100 == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses))\n\n if main_proc and cfg.checkpointing.checkpoint_per_iteration:\n checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state)\n del loss, out, float_out\n\n state.avg_loss /= len(train_dataset)\n\n epoch_time = time.time() - start_epoch_time\n logging.info('Training Summary Epoch: [{0}]\\t'\n 'Time taken (s): {epoch_time:.0f}\\t'\n 'Average Loss {loss:.3f}\\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss))\n\n with torch.no_grad():\n wer, cer, output_data = run_evaluation(test_loader=test_loader,\n device=device,\n model=model,\n decoder=evaluation_decoder,\n target_decoder=evaluation_decoder)\n\n state.add_results(epoch=epoch,\n loss_result=state.avg_loss,\n wer_result=wer,\n cer_result=cer)\n\n logging.info('Validation Summary Epoch: [{0}]\\t'\n 'Average WER {wer:.3f}\\t'\n 'Average CER {cer:.3f}\\t'.format(epoch + 1, wer=wer, cer=cer))\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger.update(epoch, state.result_state)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger.update(epoch, state.result_state, model.named_parameters())\n\n if main_proc and cfg.checkpointing.checkpoint and epoch == cfg.training.epochs - 1: # Save epoch checkpoint\n checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state)\n # anneal lr\n for g in optimizer.param_groups:\n g['lr'] = g['lr'] / cfg.optim.learning_anneal\n logging.info('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))\n\n if main_proc and (state.best_wer is None or state.best_wer > wer):\n checkpoint_handler.save_best_model(epoch=epoch, state=state)\n state.set_best_wer(wer)\n state.reset_avg_loss()\n state.reset_training_step() # Reset training step for next epoch\n wers.append((pt, state.best_wer))\n logging.info(wers)\n\n\ndef random_ticket_train(cfg):\n # Set seeds for determinism\n torch.manual_seed(cfg.training.seed)\n torch.cuda.manual_seed_all(cfg.training.seed)\n np.random.seed(cfg.training.seed)\n random.seed(cfg.training.seed)\n\n main_proc = True\n device = torch.device(\"cpu\" if cfg.training.no_cuda else \"cuda\")\n\n is_distributed = os.environ.get(\"LOCAL_RANK\") # If local rank exists, distributed env\n\n if is_distributed:\n # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops\n # because NCCL uses a spin-lock on the device. Set this env var and\n # to enable a watchdog thread that will destroy stale NCCL communicators\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n\n device_id = int(os.environ[\"LOCAL_RANK\"])\n torch.cuda.set_device(device_id)\n print(f\"Setting CUDA Device to {device_id}\")\n\n dist.init_process_group(backend=cfg.training.dist_backend.value)\n main_proc = device_id == 0 # Main process handles saving of models and reporting\n\n if OmegaConf.get_type(cfg.checkpointing) == FileCheckpointConfig:\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n elif OmegaConf.get_type(cfg.checkpointing) == GCSCheckpointConfig:\n checkpoint_handler = GCSCheckpointHandler(cfg=cfg.checkpointing)\n else:\n raise ValueError(\"Checkpoint Config has not been specified correctly.\")\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger = VisdomLogger(id=cfg.visualization.id,\n num_epochs=cfg.training.epochs)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger = TensorBoardLogger(id=cfg.visualization.id,\n log_dir=to_absolute_path(cfg.visualization.log_dir),\n log_params=cfg.visualization.log_params)\n\n if cfg.checkpointing.load_auto_checkpoint:\n latest_checkpoint = checkpoint_handler.find_latest_checkpoint()\n if latest_checkpoint:\n cfg.checkpointing.continue_from = latest_checkpoint\n\n if cfg.checkpointing.continue_from: # Starting from previous model\n checkpoint_list = sorted(glob.glob(to_absolute_path(os.path.join(cfg.checkpointing.continue_from, '*'))),\n key=lambda x: int(os.path.basename(x).split('_')[-1]))\n \n start_pt = int(os.path.basename(checkpoint_list[-1]).split('_')[-1]) + 1\n with open(to_absolute_path(cfg.data.labels_path)) as label_file:\n labels = json.load(label_file)\n\n if OmegaConf.get_type(cfg.model) is BiDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=True)\n elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=False,\n context=cfg.model.lookahead_context)\n else:\n raise ValueError(\"Model Config has not been specified correctly.\")\n\n if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph\n visdom_logger.load_previous_values(state.epoch, state.results)\n if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs\n tensorboard_logger.load_previous_values(state.epoch, state.results)\n state = TrainingState(model=model)\n state.init_results_tracking(epochs=cfg.training.epochs)\n else:\n # Initialise new model training\n with open(to_absolute_path(cfg.data.labels_path)) as label_file:\n labels = json.load(label_file)\n\n if OmegaConf.get_type(cfg.model) is BiDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=True)\n elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig:\n model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size,\n nb_layers=cfg.model.hidden_layers,\n labels=labels,\n rnn_type=supported_rnns[cfg.model.rnn_type.value],\n audio_conf=cfg.data.spect,\n bidirectional=False,\n context=cfg.model.lookahead_context)\n else:\n raise ValueError(\"Model Config has not been specified correctly.\")\n\n state = TrainingState(model=model)\n state.init_results_tracking(epochs=cfg.training.epochs)\n\n start_pt = 0\n\n # Load saved models during pruning, used to extract winning tickets\n if cfg.checkpointing.pretrained_model:\n prune_list = sorted(glob.glob(to_absolute_path(os.path.join(cfg.checkpointing.pretrained_model, '*'))),\n key=lambda x: int(os.path.basename(x).split('_')[-1]))\n \n # Data setup\n evaluation_decoder = GreedyDecoder(model.labels) # Decoder used for validation\n train_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.train_manifest),\n labels=model.labels,\n normalize=True,\n augmentation_conf=cfg.data.augmentation)\n test_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.val_manifest),\n labels=model.labels,\n normalize=True)\n if not is_distributed:\n train_sampler = DSRandomSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n else:\n train_sampler = DSElasticDistributedSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n train_loader = AudioDataLoader(dataset=train_dataset,\n num_workers=cfg.data.num_workers,\n batch_sampler=train_sampler)\n test_loader = AudioDataLoader(dataset=test_dataset,\n num_workers=cfg.data.num_workers,\n batch_size=cfg.data.batch_size)\n\n model = model.to(device)\n if state.mask_dict is not None:\n for key in state.mask_dict:\n state.mask_dict[key] = state.mask_dict[key].to(device)\n\n parameters = model.parameters()\n if OmegaConf.get_type(cfg.optim) is SGDConfig:\n optimizer = torch.optim.SGD(parameters,\n lr=cfg.optim.learning_rate,\n momentum=cfg.optim.momentum,\n nesterov=True,\n weight_decay=cfg.optim.weight_decay)\n elif OmegaConf.get_type(cfg.optim) is AdamConfig:\n optimizer = torch.optim.AdamW(parameters,\n lr=cfg.optim.learning_rate,\n betas=cfg.optim.betas,\n eps=cfg.optim.eps,\n weight_decay=cfg.optim.weight_decay)\n else:\n raise ValueError(\"Optimizer has not been specified correctly.\")\n\n model, optimizer = amp.initialize(model, optimizer,\n enabled=not cfg.training.no_cuda,\n opt_level=cfg.apex.opt_level,\n loss_scale=cfg.apex.loss_scale)\n if state.optim_state is not None:\n optimizer.load_state_dict(state.optim_state)\n if state.amp_state is not None:\n amp.load_state_dict(state.amp_state)\n\n # Track states for optimizer/amp\n state.track_optim_state(optimizer)\n if not cfg.training.no_cuda:\n state.track_amp_state(amp)\n\n if is_distributed:\n model = DistributedDataParallel(model, device_ids=[device_id])\n logging.info(model)\n logging.info(\"Number of parameters: %d\" % DeepSpeech.get_param_size(model))\n logging.info(\"Start from the %d -th pruneing\" % start_pt)\n\n criterion = CTCLoss()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n checkpoint_folder = os.path.abspath(cfg.checkpointing.save_folder)\n \n wers = []\n\n for pt in range(start_pt, cfg.training.prune_times + 1):\n cfg.checkpointing.save_folder = os.path.join(checkpoint_folder, 'prune_{}'.format(pt))\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n \n # If not continue from a previous checkpoint or current sparsity is not the same as the sparsity in checkpoint\n # we need to reload the winning ticket and rewind to theta_0\n if pt > 0:\n prune_state_path = os.path.join(prune_list[pt], 'deepspeech_final.pth')\n prune_state = TrainingState.load_state(state_path=to_absolute_path(prune_state_path))\n if prune_state.mask_dict is not None:\n state.mask_dict = prune_state.mask_dict\n for key in prune_state.mask_dict:\n prune_state.mask_dict[key] = prune_state.mask_dict[key].to(device)\n load_winning_ticket_with_random_init(model, prune_state.mask_dict, supported_rnns[cfg.model.rnn_type.value])\n # reset optimizer, didn't find a better way to do this\n parameters = model.parameters()\n if OmegaConf.get_type(cfg.optim) is SGDConfig:\n optimizer = torch.optim.SGD(parameters,\n lr=cfg.optim.learning_rate,\n momentum=cfg.optim.momentum,\n nesterov=True,\n weight_decay=cfg.optim.weight_decay)\n elif OmegaConf.get_type(cfg.optim) is AdamConfig:\n optimizer = torch.optim.AdamW(parameters,\n lr=cfg.optim.learning_rate,\n betas=cfg.optim.betas,\n eps=cfg.optim.eps,\n weight_decay=cfg.optim.weight_decay)\n else:\n raise ValueError(\"Optimizer has not been specified correctly.\")\n\n model, optimizer = amp.initialize(model, optimizer,\n enabled=not cfg.training.no_cuda,\n opt_level=cfg.apex.opt_level,\n loss_scale=cfg.apex.loss_scale)\n \n state.set_best_wer(100.0)\n \n for epoch in range(cfg.training.epochs):\n model.train()\n end = time.time()\n start_epoch_time = time.time()\n state.set_epoch(epoch=epoch)\n train_sampler.set_epoch(epoch=epoch)\n train_sampler.reset_training_step(training_step=state.training_step)\n for i, (data) in enumerate(train_loader, start=state.training_step):\n state.set_training_step(training_step=i)\n inputs, targets, input_percentages, target_sizes = data\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n # measure data loading time\n data_time.update(time.time() - end)\n inputs = inputs.to(device)\n\n out, output_sizes = model(inputs, input_sizes)\n out = out.transpose(0, 1) # TxNxH\n\n float_out = out.float() # ensure float32 for loss\n loss = criterion(float_out, targets, output_sizes, target_sizes).to(device)\n loss = loss / inputs.size(0) # average the loss by minibatch\n loss_value = loss.item()\n\n # Check to ensure valid loss was calculated\n valid_loss, error = check_loss(loss, loss_value)\n if valid_loss:\n optimizer.zero_grad()\n\n # compute gradient\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optim.max_norm)\n optimizer.step()\n else:\n logging.info(error)\n logging.info('Skipping grad update')\n loss_value = 0\n\n state.avg_loss += loss_value\n losses.update(loss_value, inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if (i + 1) % 100 == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses))\n\n if main_proc and cfg.checkpointing.checkpoint_per_iteration:\n checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state)\n del loss, out, float_out\n\n state.avg_loss /= len(train_dataset)\n\n epoch_time = time.time() - start_epoch_time\n logging.info('Training Summary Epoch: [{0}]\\t'\n 'Time taken (s): {epoch_time:.0f}\\t'\n 'Average Loss {loss:.3f}\\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss))\n\n with torch.no_grad():\n wer, cer, output_data = run_evaluation(test_loader=test_loader,\n device=device,\n model=model,\n decoder=evaluation_decoder,\n target_decoder=evaluation_decoder)\n\n state.add_results(epoch=epoch,\n loss_result=state.avg_loss,\n wer_result=wer,\n cer_result=cer)\n\n logging.info('Validation Summary Epoch: [{0}]\\t'\n 'Average WER {wer:.3f}\\t'\n 'Average CER {cer:.3f}\\t'.format(epoch + 1, wer=wer, cer=cer))\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger.update(epoch, state.result_state)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger.update(epoch, state.result_state, model.named_parameters())\n\n if main_proc and cfg.checkpointing.checkpoint and epoch == cfg.training.epochs - 1: # Save epoch checkpoint\n checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state)\n # anneal lr\n for g in optimizer.param_groups:\n g['lr'] = g['lr'] / cfg.optim.learning_anneal\n logging.info('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))\n\n if main_proc and (state.best_wer is None or state.best_wer > wer):\n checkpoint_handler.save_best_model(epoch=epoch, state=state)\n state.set_best_wer(wer)\n state.reset_avg_loss()\n state.reset_training_step() # Reset training step for next epoch\n wers.append((pt, state.best_wer))\n logging.info(wers) \n\n \ndef train_subnetwork(cfg):\n # Set seeds for determinism\n torch.manual_seed(cfg.training.seed)\n torch.cuda.manual_seed_all(cfg.training.seed)\n np.random.seed(cfg.training.seed)\n random.seed(cfg.training.seed)\n\n main_proc = True\n device = torch.device(\"cpu\" if cfg.training.no_cuda else \"cuda\")\n\n is_distributed = os.environ.get(\"LOCAL_RANK\") # If local rank exists, distributed env\n\n if is_distributed:\n # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops\n # because NCCL uses a spin-lock on the device. Set this env var and\n # to enable a watchdog thread that will destroy stale NCCL communicators\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n\n device_id = int(os.environ[\"LOCAL_RANK\"])\n torch.cuda.set_device(device_id)\n print(f\"Setting CUDA Device to {device_id}\")\n\n dist.init_process_group(backend=cfg.training.dist_backend.value)\n main_proc = device_id == 0 # Main process handles saving of models and reporting\n\n if OmegaConf.get_type(cfg.checkpointing) == FileCheckpointConfig:\n checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing)\n elif OmegaConf.get_type(cfg.checkpointing) == GCSCheckpointConfig:\n checkpoint_handler = GCSCheckpointHandler(cfg=cfg.checkpointing)\n else:\n raise ValueError(\"Checkpoint Config has not been specified correctly.\")\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger = VisdomLogger(id=cfg.visualization.id,\n num_epochs=cfg.training.epochs)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger = TensorBoardLogger(id=cfg.visualization.id,\n log_dir=to_absolute_path(cfg.visualization.log_dir),\n log_params=cfg.visualization.log_params)\n\n if cfg.checkpointing.load_auto_checkpoint:\n latest_checkpoint = checkpoint_handler.find_latest_checkpoint()\n if latest_checkpoint:\n cfg.checkpointing.continue_from = latest_checkpoint\n\n if cfg.checkpointing.continue_from:\n state = TrainingState.load_state(state_path=to_absolute_path(cfg.checkpointing.continue_from))\n model = state.model\n \n if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph\n visdom_logger.load_previous_values(state.epoch, state.results)\n if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs\n tensorboard_logger.load_previous_values(state.epoch, state.results)\n \n elif cfg.checkpointing.pretrained_model and cfg.checkpointing.best_iter: # Starting from previous model\n original_model_path = os.path.join(cfg.checkpointing.pretrained_model, 'prune_0',\n 'deepspeech_checkpoint_epoch_0.pth')\n original_state = TrainingState.load_state(state_path=to_absolute_path(original_model_path))\n original_model_state_dict = original_state.model.state_dict()\n original_optimizer_state_dict = original_state.optim_state\n \n checkpoint_path = os.path.join(cfg.checkpointing.pretrained_model, 'prune_{}'.format(cfg.checkpointing.best_iter), 'deepspeech_final.pth')\n state = TrainingState.load_state(state_path=to_absolute_path(checkpoint_path))\n model = state.model\n rewind(model, supported_rnns[cfg.model.rnn_type.value], original_model_state_dict)\n \n\n if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph\n visdom_logger.load_previous_values(state.epoch, state.results)\n if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs\n tensorboard_logger.load_previous_values(state.epoch, state.results)\n else:\n raise ValueError('Checkpoint not found. Please set the checkpoint to checkpointing.continue_from.')\n\n # Data setup\n evaluation_decoder = GreedyDecoder(model.labels) # Decoder used for validation\n train_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.train_manifest),\n labels=model.labels,\n normalize=True,\n augmentation_conf=cfg.data.augmentation)\n test_dataset = SpectrogramDataset(audio_conf=model.audio_conf,\n manifest_filepath=to_absolute_path(cfg.data.val_manifest),\n labels=model.labels,\n normalize=True)\n if not is_distributed:\n train_sampler = DSRandomSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n else:\n train_sampler = DSElasticDistributedSampler(dataset=train_dataset,\n batch_size=cfg.data.batch_size,\n start_index=state.training_step)\n train_loader = AudioDataLoader(dataset=train_dataset,\n num_workers=cfg.data.num_workers,\n batch_sampler=train_sampler)\n test_loader = AudioDataLoader(dataset=test_dataset,\n num_workers=cfg.data.num_workers,\n batch_size=cfg.data.batch_size)\n\n model = model.to(device)\n for key in state.mask_dict:\n state.mask_dict[key] = state.mask_dict[key].to(device)\n parameters = model.parameters()\n if OmegaConf.get_type(cfg.optim) is SGDConfig:\n optimizer = torch.optim.SGD(parameters,\n lr=cfg.optim.learning_rate,\n momentum=cfg.optim.momentum,\n nesterov=True,\n weight_decay=cfg.optim.weight_decay)\n elif OmegaConf.get_type(cfg.optim) is AdamConfig:\n optimizer = torch.optim.AdamW(parameters,\n lr=cfg.optim.learning_rate,\n betas=cfg.optim.betas,\n eps=cfg.optim.eps,\n weight_decay=cfg.optim.weight_decay)\n else:\n raise ValueError(\"Optimizer has not been specified correctly.\")\n\n model, optimizer = amp.initialize(model, optimizer,\n enabled=not cfg.training.no_cuda,\n opt_level=cfg.apex.opt_level,\n loss_scale=cfg.apex.loss_scale)\n if state.optim_state is not None:\n optimizer.load_state_dict(state.optim_state)\n # rewind to original state\n if cfg.checkpointing.pretrained_model and cfg.checkpointing.best_iter:\n optimizer.load_state_dict(original_optimizer_state_dict)\n if state.amp_state is not None:\n amp.load_state_dict(state.amp_state)\n\n # Track states for optimizer/amp\n state.track_optim_state(optimizer)\n if not cfg.training.no_cuda:\n state.track_amp_state(amp)\n\n if is_distributed:\n model = DistributedDataParallel(model, device_ids=[device_id])\n logging.info(model)\n logging.info(\"Number of parameters: %d\" % DeepSpeech.get_param_size(model))\n\n criterion = CTCLoss()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n \n for epoch in range(cfg.training.epochs):\n model.train()\n end = time.time()\n start_epoch_time = time.time()\n state.set_epoch(epoch=epoch)\n train_sampler.set_epoch(epoch=epoch)\n train_sampler.reset_training_step(training_step=state.training_step)\n for i, (data) in enumerate(train_loader, start=state.training_step):\n state.set_training_step(training_step=i)\n inputs, targets, input_percentages, target_sizes = data\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n # measure data loading time\n data_time.update(time.time() - end)\n inputs = inputs.to(device)\n\n out, output_sizes = model(inputs, input_sizes)\n out = out.transpose(0, 1) # TxNxH\n\n float_out = out.float() # ensure float32 for loss\n loss = criterion(float_out, targets, output_sizes, target_sizes).to(device)\n loss = loss / inputs.size(0) # average the loss by minibatch\n loss_value = loss.item()\n\n # Check to ensure valid loss was calculated\n valid_loss, error = check_loss(loss, loss_value)\n if valid_loss:\n optimizer.zero_grad()\n\n # compute gradient\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optim.max_norm)\n optimizer.step()\n else:\n logging.info(error)\n logging.info('Skipping grad update')\n loss_value = 0\n\n state.avg_loss += loss_value\n losses.update(loss_value, inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if (i + 1) % 100 == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses))\n\n if main_proc and cfg.checkpointing.checkpoint_per_iteration:\n checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state)\n del loss, out, float_out\n\n state.avg_loss /= len(train_dataset)\n\n epoch_time = time.time() - start_epoch_time\n logging.info('Training Summary Epoch: [{0}]\\t'\n 'Time taken (s): {epoch_time:.0f}\\t'\n 'Average Loss {loss:.3f}\\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss))\n\n with torch.no_grad():\n wer, cer, output_data = run_evaluation(test_loader=test_loader,\n device=device,\n model=model,\n decoder=evaluation_decoder,\n target_decoder=evaluation_decoder)\n\n state.add_results(epoch=epoch,\n loss_result=state.avg_loss,\n wer_result=wer,\n cer_result=cer)\n\n logging.info('Validation Summary Epoch: [{0}]\\t'\n 'Average WER {wer:.3f}\\t'\n 'Average CER {cer:.3f}\\t'.format(epoch + 1, wer=wer, cer=cer))\n\n if main_proc and cfg.visualization.visdom:\n visdom_logger.update(epoch, state.result_state)\n if main_proc and cfg.visualization.tensorboard:\n tensorboard_logger.update(epoch, state.result_state, model.named_parameters())\n\n if main_proc and cfg.checkpointing.checkpoint and (epoch == cfg.training.epochs - 1 or epoch % 5 == 0): # Save epoch checkpoint\n checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state)\n # anneal lr\n for g in optimizer.param_groups:\n g['lr'] = g['lr'] / cfg.optim.learning_anneal\n logging.info('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))\n\n if main_proc and (state.best_wer is None or state.best_wer > wer):\n checkpoint_handler.save_best_model(epoch=epoch, state=state)\n state.set_best_wer(wer)\n state.reset_avg_loss()\n state.reset_training_step() # Reset training step for next epoch\n logging.info(state.best_wer)\n"
]
| [
[
"torch.nn.parallel.DistributedDataParallel",
"torch.distributed.init_process_group",
"numpy.random.seed"
]
]
|
Matgenix/pysisso | [
"b619c297a6892cfeb6068ca4d84e0cf079da3ecc"
]
| [
"pysisso/tests/test_sklearn.py"
]
| [
"# -*- coding: utf-8 -*-\n# Copyright (c) 2020, Matgenix SRL, All rights reserved.\n# Distributed open source for academic and non-profit users.\n# Contact Matgenix for commercial usage.\n# See LICENSE file for details.\n\n\nimport datetime\nimport os\nimport shutil\n\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom monty.tempfile import ScratchDir\n\nimport pysisso\nimport pysisso.sklearn\nfrom pysisso.outputs import SISSOOut\nfrom pysisso.sklearn import SISSORegressor, get_timestamp\n\nTEST_FILES_DIR = os.path.abspath(\n os.path.join(pysisso.__file__, \"..\", \"..\", \"test_files\")\n)\n\n\[email protected]\ndef test_get_timestamp(mocker):\n timestamp = get_timestamp(datetime.datetime(2014, 5, 28, 9, 6, 57, 6521))\n assert isinstance(timestamp, str)\n assert timestamp == \"2014_05_28_09_06_57_006521\"\n\n\[email protected]\ndef test_sisso_regressor(mocker):\n\n # Simple single task SISSO runs with various options for the run directory\n # Mock the run of the custodian by just copying a reference SISSO.out file\n def copy_sisso_out():\n shutil.copy(\n os.path.join(TEST_FILES_DIR, \"runs\", \"perfect_linear_5pts\", \"SISSO.out\"),\n \"SISSO.out\",\n )\n\n mocker.patch.object(\n pysisso.sklearn.Custodian,\n \"run\",\n return_value=[],\n side_effect=copy_sisso_out,\n )\n\n makedirs_spy = mocker.spy(pysisso.sklearn, \"makedirs_p\")\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert os.path.exists(\"SISSO_dir\")\n makedirs_spy.assert_called_with(\"SISSO_dir\")\n assert makedirs_spy.call_count == 1\n makedirs_spy.reset_mock()\n\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(\n desc_dim=1, rung=0, subs_sis=1, method=\"L0\", run_dir=\"mySISSOdir\"\n )\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert os.path.exists(\"mySISSOdir\")\n assert not os.path.exists(\"SISSO_dir\")\n makedirs_spy.assert_called_with(\"mySISSOdir\")\n assert makedirs_spy.call_count == 1\n makedirs_spy.reset_mock()\n\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(\n desc_dim=1,\n rung=0,\n subs_sis=1,\n method=\"L0\",\n run_dir=\"mySISSOdir\",\n clean_run_dir=True,\n )\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert not os.path.exists(\"mySISSOdir\")\n makedirs_spy.assert_called_with(\"mySISSOdir\")\n assert makedirs_spy.call_count == 1\n makedirs_spy.reset_mock()\n\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(\n desc_dim=1, rung=0, subs_sis=1, method=\"L0\", clean_run_dir=True\n )\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert not os.path.exists(\"SISSO_dir\")\n makedirs_spy.assert_called_with(\"SISSO_dir\")\n assert makedirs_spy.call_count == 1\n makedirs_spy.reset_mock()\n\n # Run with a temporary directory (i.e. when run_dir is None, useful for CV)\n # TODO : mocking tempfile did not work here for some reason ...\n mocker.patch(\n \"pysisso.sklearn.get_timestamp\",\n return_value=\"2018_09_28_16_04_54_017895\",\n )\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(\n desc_dim=1,\n rung=0,\n subs_sis=1,\n method=\"L0\",\n run_dir=None,\n clean_run_dir=False,\n )\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert os.path.exists(\"SISSO_runs\")\n dirs = os.listdir(\"SISSO_runs\")\n assert len(dirs) == 1\n sisso_dir = dirs[0]\n assert sisso_dir.startswith(\"SISSO_dir_2018_09_28_16_04_54_017895_\")\n makedirs_spy.assert_called_with(\"SISSO_runs\")\n assert makedirs_spy.call_count == 1\n makedirs_spy.reset_mock()\n\n # Run with a temporary directory (i.e. when run_dir is None, useful for CV)\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(\n desc_dim=1,\n rung=0,\n subs_sis=1,\n method=\"L0\",\n run_dir=None,\n clean_run_dir=True,\n )\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert os.path.exists(\"SISSO_runs\")\n dirs = os.listdir(\"SISSO_runs\")\n assert len(dirs) == 0\n assert makedirs_spy.call_count == 1\n makedirs_spy.reset_mock()\n\n # Simple multi task SISSO run\n # Mock the run of the custodian by just copying a reference SISSO.out file\n def copy_sisso_out():\n shutil.copy(\n os.path.join(\n TEST_FILES_DIR, \"runs\", \"perfect_linear_5pts_multi\", \"SISSO.out\"\n ),\n \"SISSO.out\",\n )\n\n mocker.patch.object(\n pysisso.sklearn.Custodian,\n \"run\",\n return_value=[],\n side_effect=copy_sisso_out,\n )\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n sisso_reg.fit(\n np.array([[1], [2], [3], [4], [5]]),\n np.array([[0, -3], [1, -5], [2, -7], [3, -9], [4, -11]]),\n )\n pred = sisso_reg.predict([[1.5], [4.5]])\n assert pred[0] == pytest.approx([0.5, -4])\n assert pred[1] == pytest.approx([3.5, -10])\n assert sisso_reg.columns == [\"feat1\"]\n\n # Test of initializations and errors\n # Run with a numpy array\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n sisso_reg.fit(\n np.array([[1, 5], [2, 3], [3, 89], [4, 1], [5, 4]]),\n np.array([[0, -3], [1, -5], [2, -7], [3, -9], [4, -11]]),\n )\n assert sisso_reg.columns == [\"feat1\", \"feat2\"]\n\n # Run with a pandas Dataframe\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n X_df = pd.DataFrame(\n [[1, 5], [2, 3], [3, 89], [4, 1], [5, 4]], columns=[\"a\", \"b\"]\n )\n sisso_reg.fit(X_df, np.array([[0, -3], [1, -5], [2, -7], [3, -9], [4, -11]]))\n assert sisso_reg.columns == [\"a\", \"b\"]\n\n # Run raising errors about columns\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n X_df = pd.DataFrame(\n [[1, 5], [2, 3], [3, 89], [4, 1], [5, 4]], columns=[\"a\", \"b\"]\n )\n with pytest.raises(\n ValueError,\n match=r\"Columns should be of the size of the \" r\"second axis of X.\",\n ):\n sisso_reg.fit(\n X_df,\n np.array([[0, -3], [1, -5], [2, -7], [3, -9], [4, -11]]),\n columns=[\"a\", \"b\", \"c\"],\n )\n\n # Run raising errors about index\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n X_df = pd.DataFrame([[1], [2], [3], [4], [5]])\n with pytest.raises(ValueError, match=r\"Index, X and y should have same size.\"):\n sisso_reg.fit(X_df, np.array([[0], [1], [2], [3]]))\n\n # Run raising errors about index\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n X_df = pd.DataFrame([[1], [2], [3], [4], [5]])\n with pytest.raises(ValueError, match=r\"Index, X and y should have same size.\"):\n sisso_reg.fit(\n X_df,\n np.array([[0], [1], [2], [3], [4]]),\n index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"],\n )\n\n # Run with a wrong shape for y target\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n X_df = pd.DataFrame([[1], [2], [3], [4], [5]])\n with pytest.raises(ValueError, match=r\"Wrong shapes.\"):\n sisso_reg.fit(X_df, np.array([[[0], [1], [2], [3], [4]]]))\n\n\[email protected]\ndef test_sisso_regressor_omp(mocker):\n # Simple SISSO run with OMP\n # Mock the run of the custodian by just copying a reference SISSO.out file\n def copy_sisso_out():\n shutil.copy(\n os.path.join(TEST_FILES_DIR, \"runs\", \"OMP\", \"SISSO.out\"),\n \"SISSO.out\",\n )\n\n mocker.patch.object(\n pysisso.sklearn.Custodian,\n \"run\",\n return_value=[],\n side_effect=copy_sisso_out,\n )\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor.OMP(desc_dim=4)\n assert sisso_reg.rung == 0\n assert sisso_reg.subs_sis == 1\n assert sisso_reg.desc_dim == 4\n assert sisso_reg.method == \"L0\"\n assert sisso_reg.L1L0_size4L0 is None\n X = np.array(\n [\n [8, 1, 3.01, 4],\n [6, 2, 3.02, 3],\n [2, 3, 3.01, 0],\n [10, 4, 3.02, -8],\n [4, 5, 3.01, 10],\n ]\n )\n y = 0.9 * X[:, 1] + 0.1 * X[:, 3] - 1.0\n sisso_reg.fit(X, y)\n\n actual_sin = \"SISSO_dir/SISSO.in\"\n ref_sin = os.path.join(TEST_FILES_DIR, \"runs\", \"OMP\", \"SISSO.in\")\n assert [line for line in open(actual_sin)] == [line for line in open(ref_sin)]\n\n sisso_out = SISSOOut.from_file(filepath=\"SISSO_dir/SISSO.out\")\n assert sisso_out.params.n_rungs == sisso_reg.rung\n assert sisso_out.params.SIS_subspaces_sizes == [sisso_reg.subs_sis]\n assert sisso_out.params.descriptor_dimension == sisso_reg.desc_dim\n assert sisso_out.params.sparsification_method == sisso_reg.method\n\n sisso_model = sisso_out.model\n assert str(sisso_model.descriptors[0]) == \"(feature_1)\"\n assert str(sisso_model.descriptors[1]) == \"(feature_3)\"\n\n\[email protected]\ndef test_model_persistence(mocker):\n # Simple single task SISSO runs with various options for the run directory\n # Mock the run of the custodian by just copying a reference SISSO.out file\n def copy_sisso_out():\n shutil.copy(\n os.path.join(TEST_FILES_DIR, \"runs\", \"perfect_linear_5pts\", \"SISSO.out\"),\n \"SISSO.out\",\n )\n\n mocker.patch.object(\n pysisso.sklearn.Custodian,\n \"run\",\n return_value=[],\n side_effect=copy_sisso_out,\n )\n\n with ScratchDir(\".\"):\n sisso_reg = SISSORegressor(desc_dim=1, rung=0, subs_sis=1, method=\"L0\")\n sisso_reg.fit(np.array([[1], [2], [3], [4], [5]]), np.array([0, 1, 2, 3, 4]))\n joblib.dump(sisso_reg, filename=\"model.joblib\")\n sisso_reg_loaded = joblib.load(\"model.joblib\")\n pred = sisso_reg_loaded.predict([[1.5], [4.5]])\n assert pred[0] == 0.5\n assert pred[1] == 3.5\n assert sisso_reg.get_params() == sisso_reg_loaded.get_params()\n model = sisso_reg.sisso_out.model\n model_loaded = sisso_reg_loaded.sisso_out.model\n assert len(model.coefficients) == 1\n assert len(model_loaded.coefficients) == 1\n assert model.coefficients[0] == pytest.approx(model_loaded.coefficients[0])\n"
]
| [
[
"numpy.array",
"pandas.DataFrame"
]
]
|
MinorDudle/nasa_hack | [
"6ac146936a479a440ea235ee3656a15b8d66cd00"
]
| [
"app/utils/transformer.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nimport PIL.Image\nimport cv2 as cv\n\n\nclass FullTransformation:\n\n def __init__(self, original_path, style_path, output_path, params, model):\n self.original_path = original_path\n self.style_path = style_path\n self.output_path = output_path\n self.params = params\n self.model = model\n\n @staticmethod\n def tensor_to_image(tensor):\n tensor = tensor * 255\n tensor = np.array(tensor, dtype=np.uint8)\n if np.ndim(tensor) > 3:\n assert tensor.shape[0] == 1\n tensor = tensor[0]\n return PIL.Image.fromarray(tensor)\n\n @staticmethod\n def load_img(path_to_img):\n max_dim = 512\n img = tf.io.read_file(path_to_img)\n img = tf.image.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = max_dim / long_dim\n\n new_shape = tf.cast(shape * scale, tf.int32)\n\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :]\n return img\n\n @property\n def content_image(self):\n return self.load_img(self.original_path)\n\n @property\n def full_transformation(self):\n\n style_image = self.load_img(self.style_path)\n return self.tensor_to_image(self.model(tf.constant(self.content_image), tf.constant(style_image))[0][0])\n\n @property\n def original_transformation(self):\n return self.tensor_to_image(self.content_image)\n\n\nclass WeightedTransformation:\n\n def __init__(self, original_path, full_transformation_path, output_path, params):\n self.original_path = original_path\n self.full_transformation_path = full_transformation_path\n self.output_path = output_path\n self.params = params\n if params[\"T_wt\"]/2 != 0:\n self.beta = 0.5 + params[\"T_wt\"]/2\n else:\n self.beta = 0\n\n @property\n def original_image(self):\n return cv.imread(self.original_path)\n\n @property\n def styled_image(self):\n return cv.imread(self.full_transformation_path)\n\n @property\n def weighted_image(self):\n alpha = 1-self.beta\n return cv.addWeighted(self.original_image, alpha, self.styled_image, self.beta, 0.0)\n"
]
| [
[
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.cast",
"numpy.ndim",
"tensorflow.image.resize",
"tensorflow.image.decode_image",
"tensorflow.image.convert_image_dtype",
"tensorflow.io.read_file",
"numpy.array"
]
]
|
HenryLittle/OpenPCDet-HL | [
"7dba01750e10d170849314723ec0665782236a70"
]
| [
"pcdet/datasets/processor/data_processor.py"
]
| [
"from functools import partial\n\nimport numpy as np\nfrom skimage import transform\n\nfrom ...utils import box_utils, common_utils\n\ntv = None\ntry:\n import cumm.tensorview as tv\nexcept:\n pass\n\n\n\nclass VoxelGeneratorWrapper():\n \"\"\"\n A wrapper around the [spconv VoxelGenerator]\n\n self.grid_size: resolution of the voxel grid\n self.voxel: the size of a single voxel\n \"\"\"\n def __init__(self, vsize_xyz, coors_range_xyz, num_point_features, max_num_points_per_voxel, max_num_voxels):\n try:\n from spconv.utils import VoxelGeneratorV2 as VoxelGenerator\n self.spconv_ver = 1\n except:\n try:\n from spconv.utils import VoxelGenerator\n self.spconv_ver = 1\n except:\n from spconv.utils import Point2VoxelCPU3d as VoxelGenerator\n self.spconv_ver = 2\n\n if self.spconv_ver == 1:\n self._voxel_generator = VoxelGenerator(\n voxel_size=vsize_xyz,\n point_cloud_range=coors_range_xyz,\n max_num_points=max_num_points_per_voxel,\n max_voxels=max_num_voxels\n )\n else:\n self._voxel_generator = VoxelGenerator(\n vsize_xyz=vsize_xyz,\n coors_range_xyz=coors_range_xyz,\n num_point_features=num_point_features,\n max_num_points_per_voxel=max_num_points_per_voxel,\n max_num_voxels=max_num_voxels\n )\n\n def generate(self, points):\n if self.spconv_ver == 1:\n voxel_output = self._voxel_generator.generate(points)\n if isinstance(voxel_output, dict):\n voxels, coordinates, num_points = \\\n voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']\n else:\n voxels, coordinates, num_points = voxel_output\n else:\n assert tv is not None, f\"Unexpected error, library: 'cumm' wasn't imported properly.\"\n voxel_output = self._voxel_generator.point_to_voxel(tv.from_numpy(points))\n tv_voxels, tv_coordinates, tv_num_points = voxel_output\n # make copy with numpy(), since numpy_view() will disappear as soon as the generator is deleted\n voxels = tv_voxels.numpy()\n coordinates = tv_coordinates.numpy()\n num_points = tv_num_points.numpy()\n return voxels, coordinates, num_points\n\n\nclass DataProcessor(object):\n def __init__(self, processor_configs, point_cloud_range, training, num_point_features):\n self.point_cloud_range = point_cloud_range\n self.training = training\n self.num_point_features = num_point_features\n self.mode = 'train' if training else 'test'\n self.grid_size = self.voxel_size = None\n self.data_processor_queue = []\n\n self.voxel_generator = None\n\n for cur_cfg in processor_configs:\n cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg)\n self.data_processor_queue.append(cur_processor)\n\n def mask_points_and_boxes_outside_range(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.mask_points_and_boxes_outside_range, config=config)\n\n if data_dict.get('points', None) is not None:\n mask = common_utils.mask_points_by_range(data_dict['points'], self.point_cloud_range)\n data_dict['points'] = data_dict['points'][mask]\n\n if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:\n mask = box_utils.mask_boxes_outside_range_numpy(\n data_dict['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)\n )\n data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]\n return data_dict\n\n def shuffle_points(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.shuffle_points, config=config)\n\n if config.SHUFFLE_ENABLED[self.mode]:\n points = data_dict['points']\n shuffle_idx = np.random.permutation(points.shape[0])\n points = points[shuffle_idx]\n data_dict['points'] = points\n\n return data_dict\n\n def transform_points_to_voxels(self, data_dict=None, config=None):\n if data_dict is None:\n grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)\n self.grid_size = np.round(grid_size).astype(np.int64)\n self.voxel_size = config.VOXEL_SIZE\n # just bind the config, we will create the VoxelGeneratorWrapper later,\n # to avoid pickling issues in multiprocess spawn\n return partial(self.transform_points_to_voxels, config=config)\n\n if self.voxel_generator is None:\n self.voxel_generator = VoxelGeneratorWrapper(\n vsize_xyz=config.VOXEL_SIZE,\n coors_range_xyz=self.point_cloud_range,\n num_point_features=self.num_point_features,\n max_num_points_per_voxel=config.MAX_POINTS_PER_VOXEL,\n max_num_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode],\n )\n\n points = data_dict['points']\n voxel_output = self.voxel_generator.generate(points)\n voxels, coordinates, num_points = voxel_output\n\n if not data_dict['use_lead_xyz']:\n voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)\n\n data_dict['voxels'] = voxels\n data_dict['voxel_coords'] = coordinates\n data_dict['voxel_num_points'] = num_points\n return data_dict\n\n def sample_points(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.sample_points, config=config)\n\n num_points = config.NUM_POINTS[self.mode]\n if num_points == -1:\n return data_dict\n\n points = data_dict['points']\n if num_points < len(points):\n pts_depth = np.linalg.norm(points[:, 0:3], axis=1)\n pts_near_flag = pts_depth < 40.0\n far_idxs_choice = np.where(pts_near_flag == 0)[0]\n near_idxs = np.where(pts_near_flag == 1)[0]\n choice = []\n if num_points > len(far_idxs_choice):\n near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)\n choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \\\n if len(far_idxs_choice) > 0 else near_idxs_choice\n else: \n choice = np.arange(0, len(points), dtype=np.int32)\n choice = np.random.choice(choice, num_points, replace=False)\n np.random.shuffle(choice)\n else:\n choice = np.arange(0, len(points), dtype=np.int32)\n if num_points > len(points):\n extra_choice = np.random.choice(choice, num_points - len(points), replace=False)\n choice = np.concatenate((choice, extra_choice), axis=0)\n np.random.shuffle(choice)\n data_dict['points'] = points[choice]\n return data_dict\n\n def calculate_grid_size(self, data_dict=None, config=None):\n if data_dict is None:\n grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)\n self.grid_size = np.round(grid_size).astype(np.int64)\n self.voxel_size = config.VOXEL_SIZE\n return partial(self.calculate_grid_size, config=config)\n return data_dict\n\n def downsample_depth_map(self, data_dict=None, config=None):\n if data_dict is None:\n self.depth_downsample_factor = config.DOWNSAMPLE_FACTOR\n return partial(self.downsample_depth_map, config=config)\n\n data_dict['depth_maps'] = transform.downscale_local_mean(\n image=data_dict['depth_maps'],\n factors=(self.depth_downsample_factor, self.depth_downsample_factor)\n )\n return data_dict\n\n def forward(self, data_dict):\n \"\"\"\n Args:\n data_dict:\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n gt_names: optional, (N), string\n ...\n\n Returns:\n \"\"\"\n\n for cur_processor in self.data_processor_queue:\n data_dict = cur_processor(data_dict=data_dict)\n\n return data_dict\n"
]
| [
[
"numpy.random.choice",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.round",
"numpy.random.permutation",
"numpy.array",
"numpy.where"
]
]
|
ansonb/RECON | [
"1a0c21479f017a05d86b27f56e3d60bd9bf4a191"
]
| [
"semanticgraph/vis_utils.py"
]
| [
"import matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\n\nimport numpy as np\nfrom .graph_utils import vertex_by_token_position\n\n\ndef show_relation_graph(g):\n \"\"\"\n Displays the relation graph using matplotlib.\n\n :param g: input graph.\n \"\"\"\n if \"vertexSet\" not in g:\n vertex_indices = {str(indices):indices for e in g[\"edgeSet\"] for indices in [e[\"left\"]] + [e[\"right\"]] }\n g[\"vertexSet\"] = []\n for k, v in vertex_indices.items():\n g[\"vertexSet\"].append({\"lexicalInput\": \" \".join([g['tokens'][idx] for idx in v])})\n fig, ax = plt.subplots()\n step = np.pi*2 / float(len(g[\"vertexSet\"]))\n print(step, len(g[\"vertexSet\"]))\n x, y = 0.0, 0.0\n vertex_coordinates = {}\n\n for i, vertex in enumerate(g[\"vertexSet\"]):\n x, y = 1 - np.cos(step*i)*2, 1 - np.sin(step*i)\n vertex_coordinates[vertex[\"lexicalInput\"]] = x, y\n circle = mpatches.Circle([x,y], 0.1, fc = \"none\")\n ax.add_patch(circle)\n x, y = 1 - np.cos(step*i)*2.5, 1 - np.sin(step*i)*1.25\n plt.text(x, y, vertex[\"lexicalInput\"], ha=\"center\", family='sans-serif', size=10)\n\n for edge in g[\"edgeSet\"]:\n left_vertex = vertex_by_token_position(g, edge['left']) if len(edge['left']) > 0 else {}\n right_vertex = vertex_by_token_position(g, edge['right']) if len(edge['right']) > 0 else {}\n if left_vertex == {}:\n left_vertex['lexicalInput'] = \" \".join([g['tokens'][idx] for idx in edge['left']])\n if right_vertex == {}:\n right_vertex['lexicalInput'] = \" \".join([g['tokens'][idx] for idx in edge['right']])\n\n x, y = list(zip(vertex_coordinates[left_vertex[\"lexicalInput\"]], vertex_coordinates[right_vertex[\"lexicalInput\"]]))\n line = mlines.Line2D(x, y, lw=1., alpha=1)\n ax.add_line(line)\n property_kbid = \"\" if 'kbID' not in edge else edge['kbID']\n property_label = \"\" if 'lexicalInput' not in edge else edge['lexicalInput']\n plt.text(np.average(x), np.average(y), property_kbid + \":\" + property_label, ha=\"center\", family='sans-serif', size=10)\n\n plt.subplots_adjust(left=0, right=1, bottom=0, top=1)\n plt.axis('equal')\n plt.axis('off')\n\n plt.show()"
]
| [
[
"matplotlib.lines.Line2D",
"matplotlib.patches.Circle",
"matplotlib.pyplot.subplots",
"numpy.average",
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show"
]
]
|
tapper/Tapper | [
"f104ee44ad1f83b9367e2fcd1f0b4f5e54bfc84d"
]
| [
"examples/dpath/python_mason.py"
]
| [
"#!/usr/bin/env python\n\n#############\n# Author : Conny Seidel <[email protected]>\n#############\n\n'''\nThis is a testscript to gather the needed information in Python via socket and\nmason template.\n'''\nimport sys\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom socket import *\n\n\nmason='''#! mason <<EOTEMPLATE\n% use Data::DPath 'dpath';\n% use Data::Dumper;\n% use DateTime;\n%# my $dt1 = DateTime->new( year => 2009, month => 9,day => 11,hour => 7,minute => 29,second => 58,nanosecond => 0,time_zone => 'GMT',);\n%# my $dt2 = DateTime->new( year => 2009, month => 9,day => 11,hour => 7,minute => 30,second => 59,nanosecond => 0,time_zone => 'GMT',);\n%# proper timedefinition\n% my $dt1 = DateTime->today()->subtract( weeks => 1 );\n% my $dt2 = DateTime->today();\n% my @test = reportdata \"{ suite_name => {'like', 'Topic-%'}, created_at => { -between => [ '$dt1' , '$dt2' ] } } :: //xen_changeset/../\";\n% foreach my $t (@test) {\n% while ( my ($key, $value) = each(%$t) ) {\n% print \"$value,\";\n% }\n% print \"\\\\n\";\n% }\nEOTEMPLATE\n'''\n\ndef send_template(template):\n '''\n send the template to the receiver api\n '''\n s = socket(AF_INET, SOCK_STREAM)\n received_data = ''\n try:\n s.connect((\"165.204.15.71\",7358))\n s.send(template)\n while 1:\n data = s.recv(4096)\n if not data: break\n received_data += data\n finally:\n s.close()\n return received_data.split('\\n')\n\n\n\ndef parse(data):\n '''\n sort the collected data\n '''\n summary = { 32 : [], 64 : [] }\n for lines in data:\n if not data == '':\n split = re.sub(r',$','', lines).split(',')\n if re.search('.*xen.*i686.*', lines):\n summary[32].append(split)\n elif re.search('.*xen.*x86_64.*', lines):\n summary[64].append(split)\n return summary\n\ndef format_results(format):\n res = {}\n for key in format.keys():\n tmp_dict = {}\n for value in format[key]:\n if not tmp_dict.has_key(value[1]):\n tmp_dict[value[1]] = 1\n else:\n tmp_dict[value[1]] += 1\n res[key] = { 'versions': sorted(tmp_dict.keys()),\n 'values': [tmp_dict[i] for i in sorted(tmp_dict.keys())] }\n return res[32], res[64]\n\ntest_data = send_template(mason)\nparsed_data = parse(test_data)\ni386, x86_64 = format_results(parsed_data)\n\nN = len(i386['values'])\n\nbit32 = i386['values']\n\nind = np.arange(N) # the x locations for the groups\nwidth = 0.35 # the width of the bars\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nrects1 = ax.bar(ind, bit32, width, color='b')\n\nbit64 = x86_64['values']\nrects2 = ax.bar(ind+width, bit64, width, color='g')\n\n# add some\nax.set_ylabel('Tests')\nax.set_title('Overview:\\nXEN testing effort')\nax.set_xticks(ind+width)\nax.set_xticklabels( i386['versions'] )\nax.set_yticks(np.arange(0,60,10))\n\nax.legend( (rects1[0], rects2[0]), ('32 bit', '64 bit') )\n\ndef autolabel(rects):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\n\nplt.show()\n\n\n\n\n# vim:set ft=python et ts=4 sw=4 sts=4 sta ai si tw=78:\n"
]
| [
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
mvinyard/genomeIO | [
"e905966e95d6ba06748ca836d420d938dcaa3add"
]
| [
"genomeIO/_read_macs2/_read_narrowPeak.py"
]
| [
"\nimport glob\nimport os\nimport pandas as pd\n\ndef _return_narrowPeak_columns():\n\n \"\"\"\"\"\"\n\n return [\n \"chrom\",\n \"chromStart\",\n \"chromEnd\",\n \"name\",\n \"score\",\n \"strand\",\n \"signalValue\",\n \"pValue\",\n \"qValue\",\n \"peak\",\n ]\n\n\ndef _read_narrowPeak(path):\n\n \"\"\"\"\"\"\n\n narrowPeak_cols = _return_narrowPeak_columns()\n narrowPeaks_df = pd.read_csv(path, sep=\"\\t\", header=None, names=narrowPeak_cols)\n\n return narrowPeaks_df\n\n\nclass narrowPeaks:\n def __init__(self, path):\n\n \"\"\"\"\"\"\n \n self.path = path\n self.DataFrames = {}\n\n def read_group(\n self,\n columns=[\"chrom\", \"chromStart\", \"chromEnd\"],\n colnames=[\"chr\", \"start\", \"end\"],\n ):\n\n \"\"\"\"\"\"\n\n for n, path in enumerate(glob.glob(self.path)):\n df = _read_narrowPeak(path)\n df[\"sample\"] = name = os.path.basename(path).split(\".\")[0]\n self.DataFrames[name] = df\n\n self.df = (\n pd.concat(list(self.DataFrames.values()))[columns]\n .drop_duplicates()\n .reset_index(drop=True)\n )\n self.df.columns = colnames\n\n\ndef _read_multi_narrowPeaks(\n narrowPeak_path,\n columns=[\"chrom\", \"chromStart\", \"chromEnd\"],\n colnames=[\"chr\", \"start\", \"end\"],\n):\n\n \"\"\"\"\"\"\n\n nPeaks = narrowPeaks(narrowPeak_path)\n nPeaks.read_group()\n\n return nPeaks.df"
]
| [
[
"pandas.read_csv"
]
]
|
ngachung/incubator-sdap-nexus | [
"38e768694fcc142e2d88283cb1e44e05f88da847"
]
| [
"climatology/clim/util/array.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#!/bin/env python\n\n\"\"\"\narray.py -- Simple utilities for arrays: slice, compress by mask, bundle up, etc.\n\"\"\"\n\nimport sys, os\nimport numpy as N\n#import pynio as NC\n\n\ndef sliceArrays(i, j, *arrays):\n \"\"\"Slice all input arrays using the supplied indices, a[i:j], and return them in the\nsame order as input.\n \"\"\"\n return [a[i:j] for a in arrays]\n\ndef compressArrays(mask, *arrays):\n \"\"\"Compress all input arrays using the supplied mask, a.compress(mask), and return them\nin the same order as input.\n \"\"\"\n return [a.compress(mask) for a in arrays]\n\n\nclass BadNameError(RuntimeError): pass\nclass DimensionError(RuntimeError): pass\nclass VariableError(RuntimeError): pass\n\nReservedKeywords = ['dimensions', 'variables', 'attributes']\n\n\nclass BundleOfArrays(object):\n \"\"\"Simple holder class for a bundle of arrays (variables). Each variable is a vector\nor array over the supplied dimensions, or a scalar value. Its purpose is to synchronize\nmodification of many arrays, such as by slice or compress, and to provide persistence for\na bundle of variables to a file (e.g. netCDF).\n \"\"\"\n def __init__(self, dimensions=None, **kwargs):\n \"\"\"Init object with array dimensions and scalar values.\"\"\"\n self.dimensions = {}; self.variables = {}; self.attributes = {}\n self.createAttributes(**kwargs)\n if dimensions is None: dimensions = ('Record', -1) # default dim name for vectors\n self.createDims(dimensions) # of unlimited dimension (-1)\n\n def createAttribute(self, name, val):\n self.checkForBadName(name)\n self.attributes[name] = val\n setattr(self, name, val)\n return self\n\n def createAttributes(self, **kwargs):\n for key, val in kwargs.items():\n self.createAttribute(key, val)\n\n def createDim(self, name, size):\n \"\"\"Create a dimension to be used in the variable arrays.\"\"\"\n self.checkForBadName(name)\n if type(size) != int: raise DimensionError('Size of dimension must be an integer')\n self.dimensions[name] = size\n\n def createDims(self, *dims):\n \"\"\"Create multiple dimensions from list of (name, size) tuples.\"\"\"\n for dim in dims:\n if type(dim) == tuple:\n name, val = dim\n else:\n name = dim.name; val = dim\n self.createDim(name, val)\n return self\n\n def createVar(self, name, val, copy=False):\n \"\"\"Create a variable array. If the value is None, the variable is not created.\nBy default, the array is NOT copied since a copy may have just been made by slicing, etc.\nIf you need to make a copy of the incoming val array, use copy=True.\n \"\"\"\n self.checkForBadName(name)\n if val is not None:\n try:\n n = len(val)\n if copy:\n try:\n val = val.copy() # use copy method if it exists, e.g. numpy array\n except:\n val = val[:] # copy array by slicing\n except:\n raise VariableError('Variable must be a list, vector, array, or None.')\n self.variables[name] = val\n setattr(self, name, val)\n\n def createVars(self, *vars):\n \"\"\"Create multiple variables from list of (name, value) tuples.\"\"\"\n for var in vars:\n if type(var) == list or type(var) == tuple:\n name, val = var\n else:\n name = var.name; val = var\n self.createVar(name, val)\n return self\n\n def slice(self, i, j):\n \"\"\"Slice all of the variable arrays as a[i:j], and return new array bundle.\"\"\"\n out = self.shallowCopy()\n for key in self.variables:\n val = self.variables[key][i:j]\n out.createVar(key, val, copy=False)\n return out\n \n def compress(self, mask):\n \"\"\"Compress all of the variable arrays using a mask, a.compress(mask)[i:j], and return\na new array bundle.\n \"\"\"\n out = self.shallowCopy()\n for key in self.variables:\n s = self.variables[key].shape\n a = self.variables[key]\n if len(s) == 1:\n val = a.compress(mask)\n else:\n # Can't use N.compress() from old numpy version because\n # it flattens the additional dimensions (bug).\n # Temporarily, do it by lists in python (slower)\n val = N.array([a[i] for i, b in enumerate(mask) if b])\n out.createVar(key, val, copy=True)\n return out\n\n def shallowCopy(self):\n \"\"\"Shallow copy of the object, retaining all attributes, dimensions, and variables.\n*** Note: This routine does NOT copy the arrays themselves, but slice & compress do. ***\n \"\"\"\n out = BundleOfArrays()\n out.attributes = self.attributes.copy()\n # Must use copy() here or both bundles will point to same attr/dim/var dictionaries.\n # It is a shallow copy, but this is OK since attr/dim values are immutable.\n for key, val in out.attributes.items(): setattr(out, key, val)\n out.dimensions = self.dimensions.copy()\n out.variables = self.variables.copy()\n # Again, shallow copy is OK, referred-to arrays are copied when slice/compress called\n for key, val in out.variables.items(): setattr(out, key, val)\n return out\n\n def checkForBadName(self, name):\n \"\"\"Ensure that name is not in reserved attribute list.\nRaises exception BadNameError.\n \"\"\"\n if name in ReservedKeywords:\n raise BadNameError('Attribute name, \"%s\", is in reserved list %s' % (name, \\\n str(ReservedKeywords)))\n# try:\n# k = getattr(self, name)\n# raise BadNameError('Attribute %s already exists.' % name)\n# except:\n# pass\n\n def __repr__(self):\n return 'Attributes: %s\\nDimensions: %s\\nVariables: %s' % tuple(\n map(str, (self.attributes, self.dimensions, list(self.variables.keys()))))\n\n\ndef test(args):\n n = 300\n vars = ['obsL', 'errL', 'obsP', 'errP']\n obsL = N.array(list(range(n)))+1.; errL = N.ones(n)\n obsP = N.array(list(range(n)))+1.; errP = N.ones(n)\n obs = BundleOfArrays(name='test', nRecords=n).createVars(*list(zip(vars, (obsL, errL, obsP, errP))))\n print(obs)\n print(len(obs.obsL))\n a = obs.shallowCopy()\n print(a)\n print(len(a.obsL))\n b = obs.slice(100, 200)\n print(b)\n print(len(b.obsL))\n print(len(a.obsL))\n print(len(obs.obsL))\n\ndef main(args):\n test(args)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n"
]
| [
[
"numpy.ones"
]
]
|
FoteiniD/inter-rel-net | [
"09a27cc7ea71c26ea719e3bb5f6830acf622faef"
]
| [
"src/misc/data_io.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport json, glob\nimport linecache\nimport h5py\n\nfrom keras.utils import to_categorical\n\nTORSO, LEFT_HAND, RIGHT_HAND, LEFT_LEG, RIGHT_LEG = 0,1,2,3,4\n\n# OpenPose body parts\n\n# POSE_BODY_25_BODY_PARTS = [\"Nose\",\"Neck\",\"RShoulder\",\"RElbow\",\"RWrist\",\"LShoulder\",\"LElbow\",\"LWrist\",\"MidHip\",\"RHip\",\"RKnee\",\"RAnkle\",\"LHip\",\"LKnee\",\"LAnkle\",\"REye\",\"LEye\",\"REar\",\"LEar\",\"LBigToe\",\"LSmallToe\",\"LHeel\",\"RBigToe\",\"RSmallToe\",\"RHeel\",\"Background\"]\n# POSE_BODY_25_BODY_PARTS_COARSE = [TORSO, TORSO, RIGHT_HAND, RIGHT_HAND, RIGHT_HAND, LEFT_HAND, LEFT_HAND, LEFT_HAND, TORSO, RIGHT_LEG, RIGHT_LEG,RIGHT_LEG,LEFT_LEG,LEFT_LEG,LEFT_LEG,TORSO,TORSO,TORSO,TORSO,LEFT_LEG,LEFT_LEG,LEFT_LEG,RIGHT_LEG,RIGHT_LEG,RIGHT_LEG, 5]\n# POSE_BODY_25_BODY_PARTS_COARSE_TEXT = [\"TORSO\", \"TORSO\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"LEFT_HAND\", \"LEFT_HAND\", \"LEFT_HAND\", \"TORSO\", \"RIGHT_LEG\", \"RIGHT_LEG\",\"RIGHT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"TORSO\",\"TORSO\",\"TORSO\",\"TORSO\",\"LEFT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"RIGHT_LEG\",\"RIGHT_LEG\",\"RIGHT_LEG\", \"Background\"]\n\nPOSE_BODY_25_BODY_PARTS = [\"Nose\",\"Neck\",\"RShoulder\",\"RElbow\",\"RWrist\",\"LShoulder\",\"LElbow\",\"LWrist\",\"MidHip\",\"RHip\",\"RKnee\",\"RAnkle\",\"LHip\",\"LKnee\",\"LAnkle\",\"REye\",\"LEye\",\"REar\",\"LEar\",\"LBigToe\",\"LSmallToe\",\"LHeel\",\"RBigToe\",\"RSmallToe\",\"RHeel\"]\n\nPOSE_BODY_25_BODY_PARTS_COARSE = [TORSO, TORSO, RIGHT_HAND, RIGHT_HAND, RIGHT_HAND, LEFT_HAND, LEFT_HAND, LEFT_HAND, TORSO, RIGHT_LEG, RIGHT_LEG,RIGHT_LEG,LEFT_LEG,LEFT_LEG,LEFT_LEG,TORSO,TORSO,TORSO,TORSO,LEFT_LEG,LEFT_LEG,LEFT_LEG,RIGHT_LEG,RIGHT_LEG,RIGHT_LEG]\nPOSE_BODY_25_BODY_PARTS_COARSE_TEXT = [\"TORSO\", \"TORSO\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"LEFT_HAND\", \"LEFT_HAND\", \"LEFT_HAND\", \"TORSO\", \"RIGHT_LEG\", \"RIGHT_LEG\",\"RIGHT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"TORSO\",\"TORSO\",\"TORSO\",\"TORSO\",\"LEFT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"RIGHT_LEG\",\"RIGHT_LEG\",\"RIGHT_LEG\"]\n\n### SBU body parts\n\n# SBU_15_BODY_PARTS = [\"HEAD\",\"NECK\",\"TORSO\",\"LEFT_SHOULDER\",\"LEFT_ELBOW\",\"LEFT_HAND\",\"RIGHT_SHOULDER\",\"RIGHT_ELBOW\",\"RIGHT_HAND\",\"LEFT_HIP\",\"LEFT_KNEE\",\"LEFT_FOOT\",\"RIGHT_HIP\",\"RIGHT_KNEE\",\"RIGHT_FOOT\"]\nSBU_15_BODY_PARTS = [\"Nose\",\"Neck\",\"MidHip\",\"LShoulder\",\"LElbow\",\"LWrist\",\"RShoulder\",\"RElbow\",\"RWrist\",\"LHip\",\"LKnee\",\"LAnkle\",\"RHip\",\"RKnee\",\"RAnkle\"] # OpenPose equivalent?\nSBU_15_BODY_PARTS_COARSE = [TORSO,TORSO,TORSO,LEFT_HAND,LEFT_HAND,LEFT_HAND,RIGHT_HAND,RIGHT_HAND,RIGHT_HAND,LEFT_LEG,LEFT_LEG,LEFT_LEG,RIGHT_LEG,RIGHT_LEG,RIGHT_LEG]\nSBU_15_BODY_PARTS_COARSE_TEXT = [\"TORSO\",\"TORSO\",\"TORSO\",\"LEFT_HAND\",\"LEFT_HAND\",\"LEFT_HAND\",\"RIGHT_HAND\",\"RIGHT_HAND\",\"RIGHT_HAND\",\"LEFT_LEG\",\"LEFT_LEG\",\"LEFT_LEG\",\"RIGHT_LEG\",\"RIGHT_LEG\",\"RIGHT_LEG\"]\n\n### NTU body parts\n# NTU_25_BODY_PARTS = ['Base_of_the_spine', 'Middle_of_the_spine', 'Neck', 'Head', 'Left_shoulder', 'Left_elbow', 'Left_wrist', 'Left_hand', 'Right_shoulder', 'Right_elbow', 'Right_wrist', 'Right_hand', 'Left_hip', 'Left_knee', 'Left_ankle', 'Left_foot', 'Right_hip', 'Right_knee', 'Right_ankle', 'Right_foot', 'Spine', 'Tip_of_the_left_hand', 'Left_thumb', 'Tip_of_the_right_hand', 'Right_thumb']\n\n## OpenPose equivalent\nNTU_25_BODY_PARTS = ['MidHip','Middle_of_the_spine','Neck','Nose','LShoulder','LElbow','LWrist','Left_hand','RShoulder','RElbow','RWrist','Right_hand','LHip','LKnee','LAnkle','LBigToe','RHip','RKnee','RAnkle','RBigToe','Spine','Tip_of_the_left_hand','Left_thumb','Tip_of_the_right_hand','Right_thumb']\nNTU_25_BODY_PARTS_COARSE = [TORSO, TORSO, TORSO, TORSO, LEFT_HAND, LEFT_HAND, LEFT_HAND, LEFT_HAND, RIGHT_HAND, RIGHT_HAND, RIGHT_HAND, RIGHT_HAND, LEFT_LEG, LEFT_LEG, LEFT_LEG, LEFT_LEG, RIGHT_LEG, RIGHT_LEG, RIGHT_LEG, RIGHT_LEG, TORSO, LEFT_HAND, LEFT_HAND, RIGHT_HAND, RIGHT_HAND]\nNTU_25_BODY_PARTS_COARSE_TEXT = [\"TORSO\", \"TORSO\", \"TORSO\", \"TORSO\", \"LEFT_HAND\", \"LEFT_HAND\", \"LEFT_HAND\", \"LEFT_HAND\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"RIGHT_HAND\", \"LEFT_LEG\", \"LEFT_LEG\", \"LEFT_LEG\", \"LEFT_LEG\", \"RIGHT_LEG\", \"RIGHT_LEG\", \"RIGHT_LEG\", \"RIGHT_LEG\", \"TORSO\", \"LEFT_HAND\", \"LEFT_HAND\", \"RIGHT_HAND\", \"RIGHT_HAND\"]\n\n\ndef filter_joints(person, selected_joints, joint_indexing=POSE_BODY_25_BODY_PARTS):\n joints_mask = np.isin(joint_indexing, selected_joints)\n selected_parts = np.array(joint_indexing)[joints_mask]\n selected_coords = np.array(person['coords'])[joints_mask]\n \n person['coords'] = selected_coords\n \n return person\n\ndef prune_bodies(video_poses, metric_name = 'central'):\n max_num_ppl = np.max([ len(frame_poses) for frame_poses in video_poses])\n bodies_coords = [ [] for _ in range(max_num_ppl) ]\n \n central_points = []\n for frame_poses in video_poses:\n for pose_idx, pose in enumerate(frame_poses):\n bodies_coords[pose_idx].append(pose['coords'].tolist())\n ### Dynamic central_point based on the min and max coords\n curr_coords = [ person['coords'] for person in frame_poses ]\n max_coords = np.array(curr_coords).max(axis=1).max(axis=0)\n non_zero_curr_coords = np.array(curr_coords)\n non_zero_curr_coords[non_zero_curr_coords==0] = 9999\n min_coords = np.array(non_zero_curr_coords).min(axis=1).min(axis=0)\n frame_central_point = np.mean([min_coords, max_coords], axis=0)\n central_points.append(frame_central_point)\n central_point = np.mean(central_points, axis=0)\n \n ### Compute Metric\n metric = []\n for body_coords in bodies_coords:\n body = np.array(body_coords)\n if metric_name == 'motion':\n t1 = body[:-1]\n t2 = body[1:]\n frames_motion = np.linalg.norm(t2 - t1, axis=2).mean(axis=1)\n total_motion = frames_motion.sum()\n metric.append(total_motion)\n elif metric_name == 'central':\n ### Distance to body center\n central_idx = 8 # \"MidHip\"\n dist_central = np.linalg.norm(body[:,central_idx] - central_point, axis=1).mean()\n \n ### Distance to all joints\n # joints_distance = np.linalg.norm(body - central_point, axis=2)\n # dist_central = joints_distance.mean()\n # dist_central = np.median(joints_distance)\n \n metric.append(dist_central)\n \n ### Prune based on metric\n metric_cut = sorted(metric)[:2][-1]\n pruned_video_poses = []\n for frame_poses in video_poses:\n pruned_frame_poses = []\n for pose_idx, pose in enumerate(frame_poses):\n if metric[pose_idx] <= metric_cut:\n # print(pose_idx)\n pruned_frame_poses.append(pose)\n pruned_video_poses.append(pruned_frame_poses)\n \n return pruned_video_poses\n\ndef prune_people(people):\n prunned_people = []\n for person in people:\n if np.mean(person['confs']) < 0.15: # Used at UT\n continue\n prunned_people.append(person)\n \n return prunned_people\n\ndef parse_json(json_filepath, prune=True):\n with open(json_filepath) as json_file:\n frame_data = json.load(json_file)\n \n people = []\n for person in frame_data['people']:\n per = {}\n pose_keypoints_2d = person['pose_keypoints_2d']\n coords_x = pose_keypoints_2d[0::3]\n coords_y = pose_keypoints_2d[1::3]\n confidences = pose_keypoints_2d[2::3]\n coords = np.array([coords_x, coords_y]).T\n per['coords'] = coords\n per['confs'] = confidences\n people.append(per)\n \n if prune:\n people = prune_people(people)\n \n return people\n\ndef denormalize(norm_coords):\n \"\"\" SBU denormalization\n original_X = 1280 - (normalized_X .* 2560);\n original_Y = 960 - (normalized_Y .* 1920);\n original_Z = normalized_Z .* 10000 ./ 7.8125;\n \"\"\"\n denorm_coords = np.empty(norm_coords.shape)\n denorm_coords[:,0] = 1280 - (norm_coords[:,0] * 2560)\n denorm_coords[:,1] = 960 - (norm_coords[:,1] * 1920)\n denorm_coords[:,2] = norm_coords[:,1] * 10000 / 7.8125\n \n return denorm_coords\n\ndef parse_sbu_txt(pose_filepath, normalized=False):\n video_poses_mat = np.loadtxt(pose_filepath, delimiter=',', usecols=range(1,91))\n \n video_poses = []\n for frame_pose in video_poses_mat:\n people = []\n # 2 persons * 15 joints * 3 dimensions\n people_poses = frame_pose.reshape(2,45)\n for person in people_poses:\n per = {}\n if normalized:\n per['coords'] = person.reshape(15,3)\n else:\n per['coords'] = denormalize(person.reshape(15,3))\n per['confs'] = 15*[1]\n people.append(per)\n video_poses.append(people)\n \n return video_poses\n\ndef denormalize_ntu(norm_coords):\n # This is not possible because there is loss of information after normalization\n raise NotImplementedError(\"denormalize_ntu\")\n\ndef apply_NTU_normalization(video_poses, pose_style):\n \"\"\" From \"NTU RGB+D: A Large Scale Dataset for 3D Human Activity Analysis\"\n > We translate them to the body coordinate system with its origin on the “middle of the spine” joint (number 2 in Figure 1), followed by a 3D rotation to fix the X axis parallel to the 3D vector from “right shoulder” to “left shoulder”, and Y axis towards the 3D vector from “spine base” to “spine”. The Z axis is fixed as the new X × Y. In the last step of normalization, we scale all the 3D points based on the distance between “spine base” and “spine” joints.\n > In the cases of having more than one body in the scene, we transform all of them with regard to the main actor’s skeleton.\n \n - \"spine base\" was translated to \"MidHip\" at NTU_25_BODY_PARTS\n \n Obs: Since OpenPose and SBU do not have the joints “middle of the spine”\n and “spine”, 'MidHip' and 'Neck' are respectively used instead.\n \"\"\"\n \n if pose_style == 'OpenPose':\n joint_indexing = POSE_BODY_25_BODY_PARTS\n elif pose_style == 'SBU':\n joint_indexing = SBU_15_BODY_PARTS\n else:\n raise NotImplementedError(\"Invalid pose_style: \"+pose_style)\n \n middle_joint_idx = joint_indexing.index('MidHip')\n neck_joint_idx = joint_indexing.index('Neck')\n left_shoulder_joint_idx = joint_indexing.index('LShoulder')\n right_shoulder_joint_idx = joint_indexing.index('RShoulder')\n \n normalized_video_poses = []\n for frame_idx, frame_poses in enumerate(video_poses):\n p1_coords = frame_poses[0]['coords']\n p1_middle_joint = p1_coords[middle_joint_idx]\n p1_neck_joint = p1_coords[neck_joint_idx]\n p1_left_shoulder_joint = p1_coords[left_shoulder_joint_idx]\n p1_right_shoulder_joint = p1_coords[right_shoulder_joint_idx]\n \n new_origin = p1_middle_joint\n scale_val = .5 / np.linalg.norm(p1_middle_joint - p1_neck_joint)\n \n y = p1_neck_joint - p1_middle_joint\n y = y/np.linalg.norm(y)\n \n x = p1_left_shoulder_joint - p1_right_shoulder_joint\n x = x/np.linalg.norm(x)\n \n z = np.cross(x, y)\n z = z/np.linalg.norm(z)\n \n x = np.cross(y,z)\n x = x/np.linalg.norm(x)\n \n rotation_matrix = np.array([x,y,z])\n \n normalized_frame_poses = []\n for person in frame_poses:\n translated = person['coords'] - new_origin\n rotated = np.dot(rotation_matrix, translated.T).T\n scaled = rotated# * scale_val\n \n normalized_person = {'coords': scaled, 'confs': person['confs'].copy()}\n normalized_frame_poses.append(normalized_person)\n \n normalized_video_poses.append(normalized_frame_poses)\n \n return normalized_video_poses\n\ndef parse_ntu_skl(row_start, row_end, normalized=False, \n ntu_skl_filepath='data/ntu-rgbd/skl.csv'):\n \n if ntu_skl_filepath.endswith('.csv'):\n ### Not so fast\n video_poses_mat = []\n for row_idx in range(row_start, row_end+1):\n selected_row = linecache.getline(ntu_skl_filepath, row_idx)\n video_poses_mat.append(np.fromstring(selected_row, sep=','))\n video_poses_mat = np.array(video_poses_mat)\n elif ntu_skl_filepath.endswith('.npy'):\n ### Fast\n video_poses_mat = np.load(ntu_skl_filepath, mmap_mode='r')[row_start-1:row_end]\n elif ntu_skl_filepath.endswith('.mat'):\n ### Fast, but makes training hangs sometimes\n with h5py.File(ntu_skl_filepath,'r') as f:\n video_poses_mat = f['skl'][:,row_start-1:row_end].T\n \n video_poses = []\n for frame_pose in video_poses_mat:\n people = []\n # 2 persons * 25 joints * 3 dimensions\n people_poses = frame_pose.reshape(2,75)\n for person in people_poses:\n per = {}\n if normalized:\n per['coords'] = person.reshape(25,3)\n else:\n per['coords'] = denormalize_ntu(person.reshape(25,3))\n per['confs'] = 25*[1]\n people.append(per)\n video_poses.append(people)\n \n return video_poses\n\ndef parse_ntu_skeleton(skl_filepath):\n \"\"\" Based on ReadBodyFile.m from Liu Jun \"\"\"\n with open(skl_filepath) as skl_file:\n frame_num = int(skl_file.readline().strip())\n video_poses = []\n for frame_id in range(frame_num):\n num_people = int(skl_file.readline().strip())\n persons = []\n for person_id in range(num_people):\n values = skl_file.readline().strip().split()\n keys = ['bodyID','clipedEdges','handLeftConfidence',\n 'handLeftState','handRightConfidence','handRightState',\n 'isResticted','leanX','leanY','trackingState','jointCount']\n person_info = dict(zip(keys, values))\n \n num_joints = int(skl_file.readline().strip())\n joints = []\n for joint_id in range(num_joints):\n values = skl_file.readline().strip().split()\n joint_info = {\n 'x': float(values[0]),\n 'y': float(values[1]),\n 'z': float(values[2]),\n 'depthX': float(values[3]),\n 'depthY': float(values[4]),\n 'colorX': float(values[5]),\n 'colorY': float(values[6]),\n 'orientationW': float(values[7]),\n 'orientationX': float(values[8]),\n 'orientationY': float(values[9]),\n 'orientationZ': float(values[10]),\n 'trackingState': values[11],\n }\n joints.append(joint_info)\n person_info['joints'] = joints\n persons.append(person_info)\n video_poses.append(persons)\n return video_poses\n\ndef track_bodies(video_poses):\n # null_joints = np.array([ [-1, -1] for _ in range(25) ])\n null_joints = np.array([ [0, 0] for _ in range(25) ])\n max_num_ppl = np.max([ len(frame_poses) for frame_poses in video_poses])\n bodies_coords = [ [] for _ in range(max_num_ppl) ]\n for pose_idx, pose in enumerate(video_poses[0]):\n bodies_coords[pose_idx].append(pose['coords'].tolist())\n for missing_idx in range(pose_idx+1,max_num_ppl):\n bodies_coords[missing_idx].append(null_joints)\n \n prvs_coords = [ np.array(body[0]) for body in bodies_coords ]\n prvs_coords_log = []\n for prvs_idx, frame_poses in enumerate(video_poses[1:]):\n if frame_poses == []: # Skipping frame because there is no pose extracted\n for body_coords in bodies_coords:\n body_coords.append(null_joints)\n continue\n curr_coords = [ person['coords'] for person in frame_poses ]\n \n joints_distances = [ np.linalg.norm(curr_coords - coord, axis=2) for coord in prvs_coords ]\n distances = np.array([ np.median(dists, axis=1).tolist() \n for dists in joints_distances ])\n \n ### Average current distance with prvs distance from prvs_coords_log\n last_k = 3\n dists_list = [distances]\n for older_prvs_coords in prvs_coords_log[-last_k:]:\n older_joints_distances = [ np.linalg.norm(curr_coords - coord, axis=2) \n for coord in older_prvs_coords ]\n older_dists = np.array([ np.median(dists, axis=1).tolist() \n for dists in older_joints_distances ])\n dists_list.append(older_dists)\n distances = np.average(dists_list, axis=0)\n \n ### Attribute pose to min distance body, if body still have not been appended yet\n sorted_idx = np.dstack(np.unravel_index(np.argsort(\n distances.ravel()), distances.shape))[0]\n poses_used = []\n bodies_appended = []\n for dist_idx in sorted_idx:\n body_idx, pose_idx = dist_idx\n if body_idx in bodies_appended or pose_idx in poses_used:\n continue\n min_dist = distances[tuple(dist_idx)]\n if min_dist < 50: # Hand-picked threshold\n bodies_coords[body_idx].append(curr_coords[pose_idx])\n poses_used.append(pose_idx)\n bodies_appended.append(body_idx)\n if len(poses_used) == len(curr_coords):\n break\n \n # Making sure all bodies are appended\n for body_coords in bodies_coords:\n if len(body_coords) == (prvs_idx+1):\n body_coords.append(null_joints)\n \n prvs_coords_log.append([ body_prvs_coords.copy() for body_prvs_coords in prvs_coords])\n \n # Updating prvs_coords for the non-zero values\n for body_coords, body_prvs_coords in zip(bodies_coords, prvs_coords):\n last_body_coords = body_coords[-1]\n body_prvs_coords[last_body_coords > 0] = last_body_coords[last_body_coords > 0]\n \n tracked_video_poses = []\n for frame_idx in range(len(video_poses)):\n tracked_video_poses.append([ \n {'coords': np.array(body_coords[frame_idx])} \n for body_coords in bodies_coords ])\n \n return tracked_video_poses\n\ndef read_video_poses(video_gt, pose_style='OpenPose', normalization=None, prune=True):\n if pose_style == 'OpenPose':\n video_keypoints_dir = video_gt.path\n json_list = glob.glob(video_keypoints_dir+'/*.json')\n json_list.sort()\n \n if json_list == []:\n raise FileNotFoundError(\"Error reading keypoints at: \"+video_keypoints_dir)\n\n video_poses = []\n for json_file in json_list:\n people = parse_json(json_file, prune)\n video_poses.append(people)\n \n if prune:\n tracked_video_poses = track_bodies(video_poses)\n pruned_video_poses = prune_bodies(tracked_video_poses)\n video_poses = pruned_video_poses\n \n if normalization == 'NTU':\n video_poses = apply_NTU_normalization(video_poses, pose_style)\n elif pose_style == 'SBU':\n video_keypoints_dir = video_gt.path\n pose_filepath = video_keypoints_dir + '/skeleton_pos.txt'\n normalized = (normalization == 'SBU')\n video_poses = parse_sbu_txt(pose_filepath, normalized=normalized)\n \n if normalization == 'NTU':\n video_poses = apply_NTU_normalization(video_poses, pose_style)\n elif pose_style == 'NTU':\n normalized = (normalization == 'NTU')\n video_poses = parse_ntu_skl(\n video_gt.start_frame_pt, video_gt.end_frame_pt, \n normalized=normalized,\n ntu_skl_filepath=video_gt.DATA_DIR+'/skl.npy')\n elif pose_style == 'NTU-V2':\n normalized = (normalization == 'NTU')\n video_poses = parse_ntu_skl(\n video_gt.start_frame_pt, video_gt.end_frame_pt, \n normalized=normalized,\n ntu_skl_filepath=video_gt.DATA_DIR+'/skl.npy')\n \n # Add 'zeros' person if there is a single person for the majority of the poses\n num_people_med = np.median([ len(frame_pose) for frame_pose in video_poses ])\n num_peoples = np.array([ len(frame_pose) for frame_pose in video_poses ])\n if num_people_med == 1:\n zero_coords = np.zeros_like(video_poses[0][0]['coords'])\n for frame_pose in video_poses:\n zero_person = {'coords': zero_coords}\n frame_pose.append(zero_person)\n \n return video_poses\n\ndef insert_joint_idx(p1_and_p2, num_joints, scale):\n for idx in range(num_joints):\n p1_and_p2[idx].append(idx/scale)\n p1_and_p2[idx+num_joints].append(idx/scale)\n # p1_and_p2[idx].append(idx/(num_joints-1)) # div_len_idx\n # p1_and_p2[idx+num_joints].append(idx/(num_joints-1)) # div_len_idx\n \n # one_hot = np.zeros(num_joints)\n # one_hot[idx] = 1\n # p1_and_p2[idx] += one_hot.tolist()\n # p1_and_p2[idx+num_joints] += one_hot.tolist()\n pass\n\ndef insert_body_part(p1_and_p2, num_joints, scale, body_parts_mapping):\n num_body_parts = len(np.unique(body_parts_mapping))\n for idx in range(num_joints):\n body_part_idx = body_parts_mapping[idx]/scale\n p1_and_p2[idx].append(body_part_idx)\n p1_and_p2[idx+num_joints].append(body_part_idx)\n \ndef get_data(gt_split, pose_style, timesteps=16, skip_timesteps=None,\n add_joint_idx=True, add_body_part=True, normalization=None, \n selected_joints=None, num_classes=None, prune=True, \n sample_method = 'central', seq_step=None, flat_seqs=False):\n if pose_style == 'OpenPose':\n joint_indexing = POSE_BODY_25_BODY_PARTS\n body_parts_mapping = POSE_BODY_25_BODY_PARTS_COARSE\n elif pose_style == 'SBU':\n joint_indexing = SBU_15_BODY_PARTS\n body_parts_mapping = SBU_15_BODY_PARTS_COARSE\n elif pose_style == 'NTU' or pose_style == 'NTU-V2':\n joint_indexing = NTU_25_BODY_PARTS\n body_parts_mapping = NTU_25_BODY_PARTS_COARSE\n \n all_video_poses = []\n for video_id, video_gt in gt_split.iterrows():\n video_poses = read_video_poses(video_gt, pose_style, \n normalization=normalization, prune=prune)\n if selected_joints is not None:\n for frame_pose in video_poses:\n for person in frame_pose:\n filter_joints(person, selected_joints, joint_indexing)\n all_video_poses.append(video_poses)\n \n scale = (1 if normalization is None else 10) # unscaled or div_10\n \n X = []\n Y = []\n num_joints = len(all_video_poses[0][0][0]['coords'])\n num_dim = len(all_video_poses[0][0][0]['coords'][0])\n for i, video_poses in enumerate(all_video_poses):\n action = gt_split.action.values[i]\n p1_all_joint_coords = [ [] for _ in range(num_joints) ]\n p2_all_joint_coords = [ [] for _ in range(num_joints) ]\n \n if skip_timesteps is not None:\n video_poses = video_poses[::skip_timesteps]\n \n for frame_pose in video_poses:\n if len(frame_pose) < 2: # Skipping frames with insufficient people\n continue\n p1 = frame_pose[0]\n for idx, coord in enumerate(p1['coords']):\n p1_all_joint_coords[idx] += coord.tolist()\n p2 = frame_pose[1]\n for idx, coord in enumerate(p2['coords']):\n p2_all_joint_coords[idx] += coord.tolist()\n p1_and_p2 = np.array(p1_all_joint_coords + p2_all_joint_coords)\n \n ### 1) Keeping only the central timesteps\n if sample_method == 'central':\n p1_and_p2.resize((num_joints*2, p1_and_p2.shape[1]//num_dim, num_dim))\n if p1_and_p2.shape[1] < timesteps: # Need to pad sequence\n pad_val = int( np.ceil( (timesteps - p1_and_p2.shape[1])/2 ) )\n pad_width = ((0,0), (pad_val,pad_val), (0,0))\n p1_and_p2 = np.pad(p1_and_p2, pad_width=pad_width, mode='constant')\n center = p1_and_p2.shape[1]//2\n central_window = slice(center - timesteps//2, center + timesteps//2)\n p1_and_p2 = p1_and_p2[:,central_window].reshape(\n (num_joints*2, timesteps*num_dim))\n p1_and_p2 = p1_and_p2.tolist()\n \n if add_joint_idx:\n insert_joint_idx(p1_and_p2, num_joints, scale)\n if add_body_part:\n insert_body_part(p1_and_p2, num_joints, scale, body_parts_mapping)\n \n X.append(p1_and_p2)\n Y.append(action)\n \n ### 2) Breaking the video into multiple inputs of length 'timesteps'\n if sample_method == 'all':\n p1_and_p2.resize((num_joints*2, p1_and_p2.shape[1]//num_dim, num_dim))\n if p1_and_p2.shape[1] < timesteps: # Need to pad sequence\n pad_val = int( np.ceil( (timesteps - p1_and_p2.shape[1])/2 ) )\n pad_width = ((0,0), (pad_val,pad_val), (0,0))\n p1_and_p2 = np.pad(p1_and_p2, pad_width=pad_width, mode='constant')\n \n num_frames = p1_and_p2.shape[1]\n range_end = (num_frames - timesteps + 1)\n if seq_step is None:\n seq_step = timesteps//2\n p1_and_p2 = np.array([ p1_and_p2[:,i:i+timesteps].reshape(-1,timesteps*num_dim).tolist()\n for i in range(0, range_end, seq_step)])\n \n p1_and_p2 = p1_and_p2.tolist()\n \n if add_joint_idx:\n for p1_and_p2_seq in p1_and_p2:\n insert_joint_idx(p1_and_p2_seq, num_joints, scale)\n if add_body_part:\n for p1_and_p2_seq in p1_and_p2:\n insert_body_part(p1_and_p2_seq, num_joints, scale, body_parts_mapping)\n \n if not flat_seqs:\n X.append(p1_and_p2)\n Y.append(action)\n else:\n X += p1_and_p2\n Y += [action] * len(p1_and_p2)\n \n if num_classes is None:\n num_classes = gt_split.action.max()+1\n Y = to_categorical(Y, num_classes)\n \n # Input for the network must be (n_joints, n_samples, timesteps*num_dim)\n if sample_method == 'central' or flat_seqs:\n X = np.array(X).transpose((1,0,2)).tolist()\n \n return X, Y\n"
]
| [
[
"numpy.isin",
"numpy.dot",
"numpy.pad",
"numpy.unique",
"numpy.median",
"numpy.average",
"numpy.linalg.norm",
"numpy.ceil",
"numpy.fromstring",
"numpy.mean",
"numpy.zeros_like",
"numpy.cross",
"numpy.load",
"numpy.array",
"numpy.empty"
]
]
|
OOXXXXOO/XCloud | [
"239115558c50f62947679dbcf01852684a3656ac"
]
| [
"research/iqa/lean_detector.py"
]
| [
"# a class wrapper for Lean Detection\nimport math\n\nimport cv2\nimport numpy as np\n\n\nclass LeanDetector:\n def __init__(self, lean_degree_threshold=5):\n # the hyper-param lean_degree_threshold should be tuned according to your application scene\n self.lean_degree_threshold = lean_degree_threshold\n\n def calculate_lean_angle(self, img):\n \"\"\"\n calculate lean angle of a Region of Interest (RoI)\n if angle degree > 0, then take (x_min, y_min, x_max, y_max) as diagonal\n else take (x_min, y_max, x_max, y_min) as diagonal\n :param img:\n :return:\n \"\"\"\n if isinstance(img, str):\n img = cv2.imread(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 100, 150, apertureSize=3)\n\n minLineLength = 50\n maxLineGap = 10\n\n longest_line = (0, 0, 0, 0)\n\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 60, minLineLength, maxLineGap, 10)\n if lines is not None and len(lines) > 0:\n for line in lines:\n for x1, y1, x2, y2 in line:\n if np.linalg.norm(np.array([x1, y1]) - np.array([x2, y2])) > np.linalg.norm(\n np.array([longest_line[0], longest_line[1]]) - np.array(\n [longest_line[2], longest_line[3]])):\n longest_line = (x1, y1, x2, y2)\n\n if longest_line[0] <= longest_line[2]:\n x_l = longest_line[0]\n y_l = longest_line[1]\n x_r = longest_line[2]\n y_r = longest_line[3]\n else:\n x_l = longest_line[2]\n y_l = longest_line[3]\n x_r = longest_line[0]\n y_r = longest_line[1]\n\n return np.degrees(np.arctan(((y_r - y_l) / (x_r - x_l))))\n else:\n return 0\n\n def judge_lean(self, img):\n degree = self.calculate_lean_angle(img)\n\n return {'degree': degree, 'desc': 'Not Lean' if math.fabs(degree) == 0 else 'Lean'}\n"
]
| [
[
"numpy.array",
"numpy.arctan"
]
]
|
Iwvvwl/MSMARCO-Passage-Ranking-Submissions | [
"0118ed3c55e75a8783a83356a2e0afb0fe242443"
]
| [
"analysis/plot_leaderboard_over_time.py"
]
| [
"# MS MARCO Passage: Script for plotting leaderboard over time scatter plots\n\nimport pandas as pd\nimport datetime\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\n# Solution to use Type 1 fonts:\n# https://stackoverflow.com/questions/13132194/type-1-fonts-with-log-graphs\n\n# If fonttype = 1 doesn't work with LaTeX, try fonttype 42.\nplt.rc('pdf',fonttype = 42)\nplt.rc('ps',fonttype = 42)\n\n\ndf = pd.read_csv('../leaderboard/leaderboard.csv')\ndf['date']= pd.to_datetime(df['date'])\n\n# Plot all the runs\nax = df.plot(x='date',y='eval',marker='o',linestyle='none',label='Submission')\n\n# Overlay all the runs that have 'BERT' in their names, in orange\nbert = df[df['description'].str.contains('BERT', case=False)]\nbert.plot(ax=ax, x='date',y='eval',marker='o',color = 'orange',linestyle='none',label='\"BERT\" technique')\n\n# Overlay all SOTA runs, in red.\nsota = df[df['Unnamed: 0'] == '🏆']\nsota.plot(ax=ax, x='date',y='eval',marker='o',color = 'red',linestyle='none',label='SOTA')\n\nax.set_xlim([datetime.date(2018, 10, 1), datetime.date(2021, 5, 1)])\n\nplt.title('MS MARCO Passage Leaderboard')\nplt.xlabel('Date')\nplt.ylabel('MRR@10')\n\nplt.savefig('leaderboard.pdf', bbox_inches='tight', format='pdf')\n\n"
]
| [
[
"pandas.to_datetime",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
]
|
Bill13579/furry | [
"3336ccd88996494c8bf0b8445bf7e7058a5f3397"
]
| [
"furry/optimizer/optimizer.py"
]
| [
"import torch\nfrom furry.data.utils import float32\nfrom furry.dev import default_device\n\nclass Optimizer:\n def __init__(self, module=None, dtype=float32, dev=None):\n if dev is None:\n dev = default_device()\n self.__module = None\n self.__dtype = dtype\n self.__dev = dev\n if module is not None:\n self.module = module\n \n @property\n def dtype(self):\n return self.__dtype\n \n @property\n def device(self):\n return self.__dev\n\n def step(self):\n pass\n \n def reset_grads(self):\n with torch.no_grad():\n for param in self.module.parameters(recurse=True):\n param.grad.data.zero_()\n \n def init(self):\n pass\n\n @property\n def module(self):\n return self.__module\n \n @module.setter\n def module(self, module):\n self.__module = module\n self.init()\n"
]
| [
[
"torch.no_grad"
]
]
|
cgroeneveld/amuse | [
"9684fd22ce8293b837d2c78f56948e3ec3d04032"
]
| [
"src/amuse/community/sse/interface.py"
]
| [
"import numpy\nfrom operator import itemgetter\nfrom amuse.community import *\nfrom amuse.units import units\nfrom amuse.units import constants\nfrom amuse.support.interface import InCodeComponentImplementation\nfrom amuse.community.interface import common\n\nfrom amuse.datamodel import Particles\nfrom amuse.datamodel import ParticlesSubset\nclass SSEInterface(CodeInterface, common.CommonCodeInterface , LiteratureReferencesMixIn): \n \"\"\"\n Stellar evolution is performed by the rapid single-star evolution (SSE)\n algorithm. This is a package of analytical formulae fitted to the detailed \n models of Pols et al. (1998) that covers all phases of evolution from the \n zero-age main-sequence up to and including remnant phases. It is valid for \n masses in the range 0.1-100 Msun and metallicity can be varied. The SSE \n package contains a prescription for mass loss by stellar winds. It also \n follows the evolution of rotational angular momentum for the star. Full \n details can be found in the SSE paper:\n \n .. [#] Hurley J.R., Pols O.R., Tout C.A., 2000, MNRAS, 315, 543:\n .. [#] ... \"Comprehensive analytic formulae for stellar evolution as a function of mass and metallicity\"\n \"\"\"\n def __init__(self, **options):\n CodeInterface.__init__(self, name_of_the_worker=\"sse_worker\", **options)\n LiteratureReferencesMixIn.__init__(self)\n \n @legacy_function \n def initialize():\n function = LegacyFunctionSpecification() \n function.addParameter('z_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('neta_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('bwind_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('hewind_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('sigma_in', dtype='d', direction=function.IN, unit = units.km / units.s)\n function.addParameter('ifflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)\n function.addParameter('wdflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)\n function.addParameter('bhflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)\n function.addParameter('nsflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)\n function.addParameter('mxns_in', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('pts1_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('pts2_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('pts3_in', dtype='d', direction=function.IN, unit = NO_UNIT)\n function.addParameter('status', dtype='i', direction=function.OUT, unit = NO_UNIT)\n return function\n \n @legacy_function \n def evolve_star():\n function = LegacyFunctionSpecification() \n function.name = 'evolve0'\n function.can_handle_array = True \n function.addParameter('kw', dtype='i', direction=function.INOUT, unit = units.stellar_type)\n function.addParameter('mass', dtype='d', direction=function.INOUT, unit = units.MSun)\n function.addParameter('mt', dtype='d', direction=function.INOUT, unit = units.MSun)\n function.addParameter('r', dtype='d', direction=function.INOUT, unit = units.RSun)\n function.addParameter('lum', dtype='d', direction=function.INOUT, unit = units.LSun)\n function.addParameter('mc', dtype='d', direction=function.INOUT, unit = units.MSun)\n function.addParameter('rc', dtype='d', direction=function.INOUT, unit = units.RSun)\n function.addParameter('menv', dtype='d', direction=function.INOUT, unit = units.MSun)\n function.addParameter('renv', dtype='d', direction=function.INOUT, unit = units.RSun)\n function.addParameter('ospin', dtype='d', direction=function.INOUT, unit = units.yr**-1)\n function.addParameter('epoch', dtype='d', direction=function.INOUT, unit = units.Myr)\n function.addParameter('tm', dtype='d', direction=function.INOUT, unit = units.Myr)\n function.addParameter('tphys', dtype='d', direction=function.INOUT, unit = units.Myr)\n function.addParameter('tphysf', dtype='d', direction=function.INOUT, unit = units.Myr)\n return function\n \n @legacy_function \n def get_time_step():\n function = LegacyFunctionSpecification() \n function.can_handle_array = True \n function.addParameter('kw', dtype='i', direction=function.IN, unit = units.stellar_type)\n function.addParameter('mass', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('age', dtype='d', direction=function.IN, unit = units.Myr)\n function.addParameter('mt', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('tm', dtype='d', direction=function.IN, unit = units.Myr)\n function.addParameter('epoch', dtype='d', direction=function.IN, unit = units.Myr)\n function.addParameter('dt', dtype='d', direction=function.OUT, unit = units.Myr)\n \n return function\n \n @legacy_function \n def get_mass_loss_wind():\n function = LegacyFunctionSpecification() \n function.can_handle_array = True \n function.addParameter('kw', dtype='i', direction=function.IN, unit = units.stellar_type)\n function.addParameter('lum', dtype='d', direction=function.IN, unit = units.LSun)\n function.addParameter('r', dtype='d', direction=function.IN, unit = units.RSun)\n function.addParameter('mt', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('mc', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('mlout', dtype='d', direction=function.OUT, unit = units.MSun/units.yr)\n \n return function\n \n\n @legacy_function \n def get_gyration_radius():\n function = LegacyFunctionSpecification() \n function.can_handle_array = True \n function.addParameter('kw', dtype='i', direction=function.IN, unit = units.stellar_type)\n function.addParameter('mass', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('mt', dtype='d', direction=function.IN, unit = units.MSun)\n function.addParameter('r', dtype='d', direction=function.IN, unit = units.RSun)\n function.addParameter('lum', dtype='d', direction=function.IN, unit = units.LSun)\n function.addParameter('epoch', dtype='d', direction=function.IN, unit = units.Myr)\n function.addParameter('tm', dtype='d', direction=function.IN, unit = units.Myr)\n function.addParameter('tphys', dtype='d', direction=function.IN, unit = units.Myr)\n function.addParameter('rg', dtype='d', direction=function.OUT, unit = NO_UNIT)\n \n return function\n \n def initialize_code(self):\n return 0\n \n def commit_parameters(self):\n return 0\n \n def recommit_parameters(self):\n return 0\n \n def cleanup_code(self):\n return 0\n \n def commit_particles(self):\n return 0\n \n \n \nclass SSEParticles(Particles):\n \n def __init__(self, code_interface, storage = None):\n Particles.__init__(self, storage = storage)\n self._private.code_interface = code_interface \n self.add_calculated_attribute(\"temperature\", self.calculate_effective_temperature, [\"luminosity\", \"radius\"])\n self.add_function_attribute(\"evolve_one_step\", self.particleset_evolve_one_step, self.evolve_one_step)\n self.add_function_attribute(\"evolve_for\", self.particleset_evolve_for, self.evolve_for)\n \n def calculate_effective_temperature(self, luminosity, radius):\n return ((luminosity/(constants.four_pi_stefan_boltzmann*radius**2))**.25).in_(units.K)\n \n def add_particles_to_store(self, keys, attributes = [], values = []):\n if len(keys) == 0:\n return\n \n all_attributes = []\n all_attributes.extend(attributes)\n all_values = []\n all_values.extend(values)\n \n mapping_from_attribute_to_default_value = {\n \"stellar_type\" : 1 | units.stellar_type,\n \"radius\": 0 | units.RSun,\n \"luminosity\": 0 | units.LSun,\n \"core_mass\": 0 | units.MSun,\n \"CO_core_mass\": 0 | units.MSun,\n \"core_radius\": 0 | units.RSun,\n \"convective_envelope_mass\": 0 | units.MSun,\n \"convective_envelope_radius\": 0 | units.RSun,\n \"epoch\": 0 | units.Myr,\n \"spin\": 0 | units.yr**-1,\n \"main_sequence_lifetime\": 0 | units.Myr,\n \"age\": 0 | units.Myr\n }\n \n given_attributes = set(attributes)\n \n if not \"initial_mass\" in given_attributes:\n index_of_mass_attibute = attributes.index(\"mass\")\n all_attributes.append(\"initial_mass\")\n all_values.append(values[index_of_mass_attibute] * 1.0)\n \n for attribute, default_value in mapping_from_attribute_to_default_value.iteritems():\n if not attribute in given_attributes:\n all_attributes.append(attribute)\n all_values.append(default_value.as_vector_with_length(len(keys)))\n \n super(SSEParticles, self).add_particles_to_store(keys, all_attributes, all_values)\n \n added_particles = ParticlesSubset(self, keys)\n self._private.code_interface._evolve_particles(added_particles, 0 | units.yr)\n \n def evolve_one_step(self, particles, subset):\n self._private.code_interface._evolve_particles(subset.as_set(), subset.age + subset.time_step)\n \n def particleset_evolve_one_step(self, particles):\n self._private.code_interface._evolve_particles(particles, particles.age + particles.time_step)\n \n def evolve_for(self, particles, subset, delta_time):\n self._private.code_interface._evolve_particles(subset.as_set(), subset.age + delta_time)\n \n def particleset_evolve_for(self, particles, delta_time):\n self._private.code_interface._evolve_particles(particles, particles.age + delta_time)\n \n \n def get_defined_attribute_names(self):\n return [\"mass\", \"radius\"]\n \n \n \n\n \n\n \n \n \n\nclass SSE(common.CommonCode):\n \n def __init__(self, **options):\n InCodeComponentImplementation.__init__(self, SSEInterface(**options), **options)\n \n self.model_time = 0.0 | units.yr\n \n \n def define_parameters(self, handler):\n \n handler.add_caching_parameter(\n \"initialize\",\n \"z_in\",\n \"metallicity\",\n \"Metallicity of all stars\",\n 0.02\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"neta_in\",\n \"reimers_mass_loss_coefficient\",\n \"Reimers mass-loss coefficient (neta*4x10^-13; 0.5 normally)\",\n 0.5\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"bwind_in\",\n \"binary_enhanced_mass_loss_parameter\",\n \"The binary enhanced mass loss parameter (inactive for single).\",\n 0.0\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"hewind_in\",\n \"helium_star_mass_loss_factor\",\n \"Helium star mass loss factor\",\n 0.5\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"sigma_in\",\n \"SN_kick_speed_dispersion\",\n \"The dispersion in the Maxwellian for the SN kick speed (190 km/s).\",\n 190.0 | units.km / units.s\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"ifflag_in\",\n \"white_dwarf_IFMR_flag\",\n \"ifflag > 0 uses white dwarf IFMR (initial-final mass relation) of HPE, 1995, MNRAS, 272, 800 (0).\",\n 0\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"wdflag_in\",\n \"white_dwarf_cooling_flag\",\n \"wdflag > 0 uses modified-Mestel cooling for WDs (0).\",\n 1\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"bhflag_in\",\n \"black_hole_kick_flag\",\n \"bhflag > 0 allows velocity kick at BH formation (0).\",\n 0\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"nsflag_in\",\n \"neutron_star_mass_flag\",\n \"nsflag > 0 takes NS/BH mass from Belczynski et al. 2002, ApJ, 572, 407 (1).\",\n 1\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"mxns_in\",\n \"maximum_neutron_star_mass\",\n \"The maximum neutron star mass (1.8, nsflag=0; 3.0, nsflag=1).\",\n 3.0 | units.MSun\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"pts1_in\",\n \"fractional_time_step_1\",\n \"The timesteps chosen in each evolution phase as decimal fractions of the time taken in that phase: MS (0.05)\",\n 0.05\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"pts2_in\",\n \"fractional_time_step_2\",\n \"The timesteps chosen in each evolution phase as decimal fractions of the time taken in that phase: GB, CHeB, AGB, HeGB (0.01)\",\n 0.01\n )\n \n handler.add_caching_parameter(\n \"initialize\",\n \"pts3_in\",\n \"fractional_time_step_3\",\n \"The timesteps chosen in each evolution phase as decimal fractions of the time taken in that phase: HG, HeMS (0.02)\",\n 0.02\n )\n \n def define_state(self, handler):\n common.CommonCode.define_state(self, handler)\n handler.add_transition('INITIALIZED','RUN','commit_parameters')\n handler.add_method('RUN', 'evolve_star')\n \n handler.add_method('RUN', 'before_get_parameter')\n handler.add_method('RUN', 'before_set_parameter')\n \n \n def define_particle_sets(self, handler):\n handler.define_inmemory_set('particles', SSEParticles)\n \n handler.add_attribute(\n 'particles',\n 'time_step', \n 'get_time_step', \n ('stellar_type', 'initial_mass', 'age', \n 'mass', 'main_sequence_lifetime', 'epoch')\n )\n \n handler.add_attribute(\n 'particles',\n 'mass_loss_wind', \n 'get_mass_loss_wind', \n ('stellar_type', 'luminosity', \n 'radius', 'mass', \n 'CO_core_mass')\n )\n \n handler.add_attribute(\n 'particles',\n 'gyration_radius', \n 'get_gyration_radius', \n ('stellar_type', 'initial_mass','mass','radius',\n 'luminosity','epoch','main_sequence_lifetime',\n 'age')\n )\n \n def _evolve_particles(self, particles, end_time):\n attributes = (\n \"stellar_type\",\n \"initial_mass\", \n \"mass\", \n \"radius\", \n \"luminosity\", \n \"core_mass\", \n \"core_radius\",\n \"convective_envelope_mass\", \n \"convective_envelope_radius\", \n \"spin\", \n \"epoch\",\n \"main_sequence_lifetime\",\n \"age\", \n \"end_time\"\n )\n \n result = self.evolve_star(\n particles.stellar_type,\n particles.initial_mass,\n particles.mass,\n particles.radius,\n particles.luminosity,\n particles.core_mass,\n particles.core_radius,\n particles.convective_envelope_mass,\n particles.convective_envelope_radius,\n particles.spin,\n particles.epoch,\n particles.main_sequence_lifetime,\n particles.age,\n end_time.as_vector_with_length(len(particles)))\n \n # For helium (and helium exhausted) stars, the core mass returned is actually the CO core mass\n type = result[0].value_in(units.stellar_type)\n helium_star_selection = (type > 6) & (type < 16) & (type != 10)\n helium_stars = particles[helium_star_selection]\n other_stars = particles - helium_stars\n if len(helium_stars):\n helium_stars_results = [sub[helium_star_selection] for sub in result]\n helium_stars_results.append(helium_stars_results[2])\n helium_stars.set_values_in_store(helium_stars.get_all_indices_in_store(), (\n \"stellar_type\", \"initial_mass\", \"mass\", \"radius\", \"luminosity\", \n \"CO_core_mass\", \n \"core_radius\", \"convective_envelope_mass\", \"convective_envelope_radius\", \"spin\", \"epoch\",\n \"main_sequence_lifetime\", \"age\", \"end_time\", \n \"core_mass\"), helium_stars_results)\n if len(other_stars):\n other_star_selection = numpy.logical_not(helium_star_selection)\n other_stars.set_values_in_store(other_stars.get_all_indices_in_store(), attributes, \n [sub[other_star_selection] for sub in result])\n \n def evolve_model(self, end_time = None, keep_synchronous = True):\n if not keep_synchronous:\n self._evolve_particles(self.particles, self.particles.time_step + self.particles.age)\n return\n \n if end_time is None:\n end_time = self.model_time + min(self.particles.time_step)\n self._evolve_particles(self.particles, end_time - self.model_time + self.particles.age)\n self.model_time = end_time\n \n def _evolve_model_old(self, end_time = None, keep_synchronous = True):\n \"\"\"\n This is the old implementation of evolve_model. Even with (keep_synchronous = True) \n it is unable to evolve all stars to a common age, since it relies on the \n individual timesteps as determined by the community code. Furthermore, it \n is not suited to simulations with ongoing star formation, since it evolves \n newly created stars to the same age as the old stars. \n \"\"\"\n if end_time is None:\n if keep_synchronous:\n ages = self.particles.age\n index, min_age = min(enumerate(ages), key=itemgetter(1))\n new_age = min_age + self.particles[index].time_step\n selection = self.particles.select(lambda x : x < new_age, [\"age\"])\n self._evolve_particles(selection, selection.time_step + selection.age)\n return\n end_time = self.particles.time_step + self.particles.age\n \n self._evolve_particles(self.particles, end_time)\n \n \n def commit_parameters(self):\n self.parameters.send_cached_parameters_to_code()\n self.overridden().commit_parameters()\n \n def initialize_module_with_current_parameters(self):\n self.parameters.send_cached_parameters_to_code()\n \n def initialize_module_with_default_parameters(self):\n self.parameters.set_defaults()\n self.commit_parameters()\n\n def update_time_steps(self):\n pass\n"
]
| [
[
"numpy.logical_not"
]
]
|
wadzaw/text-classification | [
"1be6dc653c1d5fb386c3edb7da3e15cbf15cd4c1"
]
| [
"Text+Classification+using+python,+scikit+and+nltk.py"
]
| [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Loading the data set - training data.\nfrom sklearn.datasets import fetch_20newsgroups\ntwenty_train = fetch_20newsgroups(subset='train', shuffle=True)\n\n\n# In[4]:\n\n\n# You can check the target names (categories) and some data files by following commands.\ntwenty_train.target_names #prints all the categories\n\n\n# In[5]:\n\n\nprint(\"\\n\".join(twenty_train.data[0].split(\"\\n\")[:3])) #prints first line of the first data file\n\n\n# In[6]:\n\n\n# Extracting features from text files\nfrom sklearn.feature_extraction.text import CountVectorizer\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(twenty_train.data)\nX_train_counts.shape\n\n\n# In[7]:\n\n\n# TF-IDF\nfrom sklearn.feature_extraction.text import TfidfTransformer\ntfidf_transformer = TfidfTransformer()\nX_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\nX_train_tfidf.shape\n\n\n# In[9]:\n\n\n# Machine Learning\n# Training Naive Bayes (NB) classifier on training data.\nfrom sklearn.naive_bayes import MultinomialNB\nclf = MultinomialNB().fit(X_train_tfidf, twenty_train.target)\n\n\n# In[14]:\n\n\n# Building a pipeline: We can write less code and do all of the above, by building a pipeline as follows:\n# The names ‘vect’ , ‘tfidf’ and ‘clf’ are arbitrary but will be used later.\n# We will be using the 'text_clf' going forward.\nfrom sklearn.pipeline import Pipeline\n\ntext_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())])\n\ntext_clf = text_clf.fit(twenty_train.data, twenty_train.target)\n\n\n# In[15]:\n\n\n# Performance of NB Classifier\nimport numpy as np\ntwenty_test = fetch_20newsgroups(subset='test', shuffle=True)\npredicted = text_clf.predict(twenty_test.data)\nnp.mean(predicted == twenty_test.target)\n\n\n# In[16]:\n\n\n# Training Support Vector Machines - SVM and calculating its performance\n\nfrom sklearn.linear_model import SGDClassifier\ntext_clf_svm = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\n ('clf-svm', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, max_iter=5, random_state=42))])\n\ntext_clf_svm = text_clf_svm.fit(twenty_train.data, twenty_train.target)\npredicted_svm = text_clf_svm.predict(twenty_test.data)\nnp.mean(predicted_svm == twenty_test.target)\n\n\n# In[18]:\n\n\n# Grid Search\n# Here, we are creating a list of parameters for which we would like to do performance tuning. \n# All the parameters name start with the classifier name (remember the arbitrary name we gave). \n# E.g. vect__ngram_range; here we are telling to use unigram and bigrams and choose the one which is optimal.\n\nfrom sklearn.model_selection import GridSearchCV\nparameters = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False), 'clf__alpha': (1e-2, 1e-3)}\n\n\n# In[19]:\n\n\n# Next, we create an instance of the grid search by passing the classifier, parameters \n# and n_jobs=-1 which tells to use multiple cores from user machine.\n\ngs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)\ngs_clf = gs_clf.fit(twenty_train.data, twenty_train.target)\n\n\n# In[23]:\n\n\n# To see the best mean score and the params, run the following code\n\ngs_clf.best_score_\ngs_clf.best_params_\n\n# Output for above should be: The accuracy has now increased to ~90.6% for the NB classifier (not so naive anymore! 😄)\n# and the corresponding parameters are {‘clf__alpha’: 0.01, ‘tfidf__use_idf’: True, ‘vect__ngram_range’: (1, 2)}.\n\n\n# In[24]:\n\n\n# Similarly doing grid search for SVM\nfrom sklearn.model_selection import GridSearchCV\nparameters_svm = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False),'clf-svm__alpha': (1e-2, 1e-3)}\n\ngs_clf_svm = GridSearchCV(text_clf_svm, parameters_svm, n_jobs=-1)\ngs_clf_svm = gs_clf_svm.fit(twenty_train.data, twenty_train.target)\n\n\ngs_clf_svm.best_score_\ngs_clf_svm.best_params_\n\n\n# In[25]:\n\n\n# NLTK\n# Removing stop words\nfrom sklearn.pipeline import Pipeline\ntext_clf = Pipeline([('vect', CountVectorizer(stop_words='english')), ('tfidf', TfidfTransformer()), \n ('clf', MultinomialNB())])\n\n\n# In[26]:\n\n\n# Stemming Code\n\nimport nltk\nnltk.download()\n\nfrom nltk.stem.snowball import SnowballStemmer\nstemmer = SnowballStemmer(\"english\", ignore_stopwords=True)\n\nclass StemmedCountVectorizer(CountVectorizer):\n def build_analyzer(self):\n analyzer = super(StemmedCountVectorizer, self).build_analyzer()\n return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])\n \nstemmed_count_vect = StemmedCountVectorizer(stop_words='english')\n\ntext_mnb_stemmed = Pipeline([('vect', stemmed_count_vect), ('tfidf', TfidfTransformer()), \n ('mnb', MultinomialNB(fit_prior=False))])\n\ntext_mnb_stemmed = text_mnb_stemmed.fit(twenty_train.data, twenty_train.target)\n\npredicted_mnb_stemmed = text_mnb_stemmed.predict(twenty_test.data)\n\nnp.mean(predicted_mnb_stemmed == twenty_test.target)\n\n\n# In[ ]:\n\n\n\n\n"
]
| [
[
"sklearn.model_selection.GridSearchCV",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.mean",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.linear_model.SGDClassifier",
"sklearn.datasets.fetch_20newsgroups"
]
]
|
tsia/c3nav | [
"18fdb34b3fbcf7eb4617794750494cfa16428c54"
]
| [
"src/c3nav/mapdata/render/renderdata.py"
]
| [
"import operator\nimport os\nimport pickle\nimport threading\nfrom collections import deque\nfrom itertools import chain\n\nimport numpy as np\nfrom django.conf import settings\nfrom scipy.interpolate import NearestNDInterpolator\nfrom shapely import prepared\nfrom shapely.geometry import GeometryCollection\nfrom shapely.ops import unary_union\n\nfrom c3nav.mapdata.models import Level, MapUpdate, Source\nfrom c3nav.mapdata.render.geometry import AltitudeAreaGeometries, LevelGeometries\nfrom c3nav.mapdata.utils.cache import AccessRestrictionAffected, MapHistory\nfrom c3nav.mapdata.utils.cache.package import CachePackage\nfrom c3nav.mapdata.utils.geometry import get_rings\n\nempty_geometry_collection = GeometryCollection()\n\n\nclass Cropper:\n def __init__(self, geometry=None):\n self.geometry = geometry\n self.geometry_prep = None if geometry is None else prepared.prep(geometry)\n\n def intersection(self, other):\n if self.geometry is None:\n return other\n if self.geometry_prep.intersects(other):\n return self.geometry.intersection(other)\n return empty_geometry_collection\n\n\nclass LevelRenderData:\n \"\"\"\n Renderdata for a level to display.\n This contains multiple LevelGeometries instances because you might to look through holes onto lower levels.\n \"\"\"\n def __init__(self):\n self.levels = []\n self.base_altitude = None\n self.lowest_important_level = None\n self.darken_area = None\n\n @staticmethod\n def rebuild():\n levels = tuple(Level.objects.prefetch_related('altitudeareas', 'buildings', 'doors', 'spaces',\n 'spaces__holes', 'spaces__areas', 'spaces__columns',\n 'spaces__obstacles', 'spaces__lineobstacles',\n 'spaces__groups', 'spaces__ramps'))\n\n package = CachePackage(bounds=tuple(chain(*Source.max_bounds())))\n\n # first pass in reverse to collect some data that we need later\n single_level_geoms = {}\n interpolators = {}\n last_interpolator = None\n altitudeareas_above = []\n for level in reversed(levels):\n single_level_geoms[level.pk] = LevelGeometries.build_for_level(level, altitudeareas_above)\n\n # ignore intermediate levels in this pass\n if level.on_top_of_id is not None:\n altitudeareas_above.extend(single_level_geoms[level.pk].altitudeareas)\n altitudeareas_above.sort(key=operator.attrgetter('altitude'))\n continue\n\n # create interpolator to create the pieces that fit multiple layers together\n if last_interpolator is not None:\n interpolators[level.pk] = last_interpolator\n\n coords = deque()\n values = deque()\n for area in single_level_geoms[level.pk].altitudeareas:\n new_coords = np.vstack(tuple(np.array(ring.coords) for ring in get_rings(area.geometry)))\n coords.append(new_coords)\n values.append(np.full((new_coords.shape[0], 1), fill_value=area.altitude))\n\n last_interpolator = NearestNDInterpolator(np.vstack(coords), np.vstack(values))\n\n for i, level in enumerate(levels):\n if level.on_top_of_id is not None:\n continue\n\n map_history = MapHistory.open_level(level.pk, 'base')\n\n sublevels = tuple(sublevel for sublevel in levels\n if sublevel.on_top_of_id == level.pk or sublevel.base_altitude <= level.base_altitude)\n\n level_crop_to = {}\n\n # choose a crop area for each level. non-intermediate levels (not on_top_of) below the one that we are\n # currently rendering will be cropped to only render content that is visible through holes indoors in the\n # levels above them.\n crop_to = None\n primary_level_count = 0\n main_level_passed = 0\n lowest_important_level = None\n for sublevel in reversed(sublevels):\n geoms = single_level_geoms[sublevel.pk]\n\n if geoms.holes is not None:\n primary_level_count += 1\n\n # get lowest intermediate level directly below main level\n\n if not main_level_passed:\n if geoms.pk == level.pk:\n main_level_passed = 1\n else:\n if not sublevel.on_top_of_id:\n main_level_passed += 1\n if main_level_passed < 2:\n lowest_important_level = sublevel\n\n # set crop area if we area on the second primary layer from top or below\n level_crop_to[sublevel.pk] = Cropper(crop_to if primary_level_count > 1 else None)\n\n if geoms.holes is not None:\n if crop_to is None:\n crop_to = geoms.holes\n else:\n crop_to = crop_to.intersection(geoms.holes)\n\n if crop_to.is_empty:\n break\n\n render_data = LevelRenderData()\n render_data.base_altitude = level.base_altitude\n render_data.lowest_important_level = lowest_important_level.pk\n access_restriction_affected = {}\n\n # go through sublevels, get their level geometries and crop them\n lowest_important_level_passed = False\n for sublevel in sublevels:\n try:\n crop_to = level_crop_to[sublevel.pk]\n except KeyError:\n break\n\n old_geoms = single_level_geoms[sublevel.pk]\n\n if render_data.lowest_important_level == sublevel.pk:\n lowest_important_level_passed = True\n\n if old_geoms.holes and render_data.darken_area is None and lowest_important_level_passed:\n render_data.darken_area = old_geoms.holes\n\n if crop_to.geometry is not None:\n map_history.composite(MapHistory.open_level(sublevel.pk, 'base'), crop_to.geometry)\n elif level.pk != sublevel.pk:\n map_history.composite(MapHistory.open_level(sublevel.pk, 'base'), None)\n\n new_geoms = LevelGeometries()\n new_geoms.doors = crop_to.intersection(old_geoms.doors)\n new_geoms.walls = crop_to.intersection(old_geoms.walls)\n new_geoms.all_walls = crop_to.intersection(old_geoms.all_walls)\n new_geoms.short_walls = tuple((altitude, geom) for altitude, geom in tuple(\n (altitude, crop_to.intersection(geom))\n for altitude, geom in old_geoms.short_walls\n ) if not geom.is_empty)\n\n for altitudearea in old_geoms.altitudeareas:\n new_geometry = crop_to.intersection(altitudearea.geometry)\n if new_geometry.is_empty:\n continue\n new_geometry_prep = prepared.prep(new_geometry)\n\n new_altitudearea = AltitudeAreaGeometries()\n new_altitudearea.geometry = new_geometry\n new_altitudearea.altitude = altitudearea.altitude\n new_altitudearea.altitude2 = altitudearea.altitude2\n new_altitudearea.point1 = altitudearea.point1\n new_altitudearea.point2 = altitudearea.point2\n\n new_colors = {}\n for color, areas in altitudearea.colors.items():\n new_areas = {}\n for access_restriction, area in areas.items():\n if not new_geometry_prep.intersects(area):\n continue\n new_area = new_geometry.intersection(area)\n if not new_area.is_empty:\n new_areas[access_restriction] = new_area\n if new_areas:\n new_colors[color] = new_areas\n new_altitudearea.colors = new_colors\n\n new_altitudearea.obstacles = {key: tuple(new_geometry.intersection(obstacle)\n for obstacle in height_obstacles\n if new_geometry_prep.intersects(obstacle))\n for key, height_obstacles in altitudearea.obstacles.items()}\n new_altitudearea.obstacles = {height: height_obstacles\n for height, height_obstacles in new_altitudearea.obstacles.items()\n if height_obstacles}\n\n new_geoms.altitudeareas.append(new_altitudearea)\n\n if new_geoms.walls.is_empty and not new_geoms.altitudeareas:\n continue\n\n new_geoms.ramps = tuple(\n ramp for ramp in (crop_to.intersection(ramp) for ramp in old_geoms.ramps)\n if not ramp.is_empty\n )\n\n new_geoms.heightareas = tuple(\n (area, height) for area, height in ((crop_to.intersection(area), height)\n for area, height in old_geoms.heightareas)\n if not area.is_empty\n )\n\n new_geoms.affected_area = unary_union((\n *(altitudearea.geometry for altitudearea in new_geoms.altitudeareas),\n crop_to.intersection(new_geoms.walls.buffer(1))\n ))\n\n for access_restriction, area in old_geoms.access_restriction_affected.items():\n new_area = crop_to.intersection(area)\n if not new_area.is_empty:\n access_restriction_affected.setdefault(access_restriction, []).append(new_area)\n\n new_geoms.restricted_spaces_indoors = {}\n for access_restriction, area in old_geoms.restricted_spaces_indoors.items():\n new_area = crop_to.intersection(area)\n if not new_area.is_empty:\n new_geoms.restricted_spaces_indoors[access_restriction] = new_area\n\n new_geoms.restricted_spaces_outdoors = {}\n for access_restriction, area in old_geoms.restricted_spaces_outdoors.items():\n new_area = crop_to.intersection(area)\n if not new_area.is_empty:\n new_geoms.restricted_spaces_outdoors[access_restriction] = new_area\n\n new_geoms.pk = old_geoms.pk\n new_geoms.on_top_of_id = old_geoms.on_top_of_id\n new_geoms.short_label = old_geoms.short_label\n new_geoms.base_altitude = old_geoms.base_altitude\n new_geoms.default_height = old_geoms.default_height\n new_geoms.door_height = old_geoms.door_height\n new_geoms.min_altitude = (min(area.altitude for area in new_geoms.altitudeareas)\n if new_geoms.altitudeareas else new_geoms.base_altitude)\n\n new_geoms.build_mesh(interpolators.get(level.pk) if sublevel.pk == level.pk else None)\n\n render_data.levels.append(new_geoms)\n\n access_restriction_affected = {\n access_restriction: unary_union(areas)\n for access_restriction, areas in access_restriction_affected.items()\n }\n\n access_restriction_affected = AccessRestrictionAffected.build(access_restriction_affected)\n access_restriction_affected.save_level(level.pk, 'composite')\n\n map_history.save_level(level.pk, 'composite')\n\n package.add_level(level.pk, map_history, access_restriction_affected)\n\n render_data.save(level.pk)\n\n package.save_all()\n\n cached = {}\n cache_key = None\n cache_lock = threading.Lock()\n\n @staticmethod\n def _level_filename(pk):\n return os.path.join(settings.CACHE_ROOT, 'render_data_level_%d.pickle' % pk)\n\n @classmethod\n def get(cls, level):\n # get the current render data from local variable if no new processed mapupdate exists.\n # this is much faster than any other possible cache\n with cls.cache_lock:\n cache_key = MapUpdate.current_processed_cache_key()\n level_pk = str(level.pk if isinstance(level, Level) else level)\n if cls.cache_key != cache_key:\n cls.cache_key = cache_key\n cls.cached = {}\n else:\n result = cls.cached.get(level_pk, None)\n if result is not None:\n return result\n\n pk = level.pk if isinstance(level, Level) else level\n result = pickle.load(open(cls._level_filename(pk), 'rb'))\n\n cls.cached[level_pk] = result\n return result\n\n def save(self, pk):\n return pickle.dump(self, open(self._level_filename(pk), 'wb'))\n"
]
| [
[
"numpy.array",
"numpy.vstack",
"numpy.full"
]
]
|
CyberFlameGO/AE-WTN | [
"1a4b4bfbc4d6255dfc58c89a059f8e25ca3478e8"
]
| [
"maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms\nfrom maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist\nfrom maskrcnn_benchmark.modeling.box_coder import BoxCoder\n\n\nclass PostProcessor(nn.Module):\n \"\"\"\n From a set of classification scores, box regression and proposals,\n computes the post-processed boxes, and applies NMS to obtain the\n final results\n \"\"\"\n\n def __init__(\n self, \n score_thresh=0.05, \n nms=0.5, \n detections_per_img=100, \n box_coder=None,\n cls_agnostic_bbox_reg=False\n ):\n \"\"\"\n Arguments:\n score_thresh (float)\n nms (float)\n detections_per_img (int)\n box_coder (BoxCoder)\n \"\"\"\n super(PostProcessor, self).__init__()\n self.score_thresh = score_thresh\n self.nms = nms\n self.detections_per_img = detections_per_img\n if box_coder is None:\n box_coder = BoxCoder(weights=(10., 10., 5., 5.))\n self.box_coder = box_coder\n self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg\n\n def forward(self, x, boxes):\n \"\"\"\n Arguments:\n x (tuple[tensor, tensor]): x contains the class logits\n and the box_regression from the model.\n boxes (list[BoxList]): bounding boxes that are used as\n reference, one for ech image\n\n Returns:\n results (list[BoxList]): one BoxList for each image, containing\n the extra fields labels and scores\n \"\"\"\n class_logits, box_regression = x\n class_prob = torch.sigmoid(class_logits)\n\n # TODO think about a representation of batch of boxes\n image_shapes = [box.size for box in boxes]\n boxes_per_image = [len(box) for box in boxes]\n concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)\n\n if self.cls_agnostic_bbox_reg:\n box_regression = box_regression[:, -4:]\n proposals = self.box_coder.decode(\n box_regression.view(sum(boxes_per_image), -1), concat_boxes\n )\n if self.cls_agnostic_bbox_reg:\n proposals = proposals.repeat(1, class_prob.shape[1])\n\n num_classes = class_prob.shape[1]\n\n proposals = proposals.split(boxes_per_image, dim=0)\n class_prob = class_prob.split(boxes_per_image, dim=0)\n\n results = []\n for prob, boxes_per_img, image_shape in zip(\n class_prob, proposals, image_shapes\n ):\n boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)\n boxlist = boxlist.clip_to_image(remove_empty=False)\n boxlist = self.filter_results(boxlist, num_classes)\n results.append(boxlist)\n return results\n\n def prepare_boxlist(self, boxes, scores, image_shape):\n \"\"\"\n Returns BoxList from `boxes` and adds probability scores information\n as an extra field\n `boxes` has shape (#detections, 4 * #classes), where each row represents\n a list of predicted bounding boxes for each of the object classes in the\n dataset (including the background class). The detections in each row\n originate from the same object proposal.\n `scores` has shape (#detection, #classes), where each row represents a list\n of object detection confidence scores for each of the object classes in the\n dataset (including the background class). `scores[i, j]`` corresponds to the\n box at `boxes[i, j * 4:(j + 1) * 4]`.\n \"\"\"\n boxes = boxes.reshape(-1, 4)\n scores = scores.reshape(-1)\n boxlist = BoxList(boxes, image_shape, mode=\"xyxy\")\n boxlist.add_field(\"scores\", scores)\n return boxlist\n\n def filter_results(self, boxlist, num_classes):\n \"\"\"Returns bounding-box detection results by thresholding on scores and\n applying non-maximum suppression (NMS).\n \"\"\"\n # unwrap the boxlist to avoid additional overhead.\n # if we had multi-class NMS, we could perform this directly on the boxlist\n boxes = boxlist.bbox.reshape(-1, num_classes * 4)\n scores = boxlist.get_field(\"scores\").reshape(-1, num_classes)\n\n device = scores.device\n result = []\n # Apply threshold on detection probabilities and apply NMS\n # Skip j = 0, because it's the background class\n inds_all = scores > self.score_thresh\n for j in range(1, num_classes+1):\n inds = inds_all[:, j-1].nonzero().squeeze(1)\n scores_j = scores[inds, j-1]\n boxes_j = boxes[inds, (j-1) * 4 : (j-1 + 1) * 4]\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\n boxlist_for_class.add_field(\"scores\", scores_j)\n boxlist_for_class = boxlist_nms(\n boxlist_for_class, self.nms, score_field=\"scores\"\n )\n num_labels = len(boxlist_for_class)\n boxlist_for_class.add_field(\n \"labels\", torch.full((num_labels,), j, dtype=torch.int64, device=device)\n )\n result.append(boxlist_for_class)\n\n result = cat_boxlist(result)\n number_of_detections = len(result)\n\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > self.detections_per_img > 0:\n cls_scores = result.get_field(\"scores\")\n image_thresh, _ = torch.kthvalue(\n cls_scores.cpu(), number_of_detections - self.detections_per_img + 1\n )\n keep = cls_scores >= image_thresh.item()\n keep = torch.nonzero(keep).squeeze(1)\n result = result[keep]\n return result\n\n\ndef make_roi_box_post_processor(cfg):\n use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN\n\n bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS\n box_coder = BoxCoder(weights=bbox_reg_weights)\n\n score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH\n nms_thresh = cfg.MODEL.ROI_HEADS.NMS\n detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG\n cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG\n\n postprocessor = PostProcessor(\n score_thresh, \n nms_thresh, \n detections_per_img, \n box_coder,\n cls_agnostic_bbox_reg\n )\n return postprocessor\n"
]
| [
[
"torch.sigmoid",
"torch.nonzero",
"torch.full",
"torch.cat"
]
]
|
swaitw/EasyRec | [
"1a44891c557935eec29c6dc0d4dfc4a543fbd39a"
]
| [
"easy_rec/python/model/esmm.py"
]
| [
"# -*- encoding:utf-8 -*-\n# Copyright (c) Alibaba, Inc. and its affiliates.\nimport logging\n\nimport tensorflow as tf\n\nfrom easy_rec.python.layers import dnn\nfrom easy_rec.python.model.multi_task_model import MultiTaskModel\nfrom easy_rec.python.protos.esmm_pb2 import ESMM as ESMMConfig\nfrom easy_rec.python.protos.loss_pb2 import LossType\n\nif tf.__version__ >= '2.0':\n tf = tf.compat.v1\nlosses = tf.losses\nmetrics = tf.metrics\n\n\nclass ESMM(MultiTaskModel):\n\n def __init__(self,\n model_config,\n feature_configs,\n features,\n labels=None,\n is_training=False):\n super(ESMM, self).__init__(model_config, feature_configs, features, labels,\n is_training)\n assert self._model_config.WhichOneof('model') == 'esmm', \\\n 'invalid model config: %s' % self._model_config.WhichOneof('model')\n self._model_config = self._model_config.esmm\n assert isinstance(self._model_config, ESMMConfig)\n\n self._group_num = len(self._model_config.groups)\n self._group_features = []\n if self._group_num > 0:\n logging.info('group_num: {0}'.format(self._group_num))\n for group_id in range(self._group_num):\n group = self._model_config.groups[group_id]\n group_feature, _ = self._input_layer(self._feature_dict, group.input)\n self._group_features.append(group_feature)\n else:\n group_feature, _ = self._input_layer(self._feature_dict, 'all')\n self._group_features.append(group_feature)\n\n # This model only supports two tasks (cvr+ctr or playtime+ctr).\n # In order to be consistent with the paper,\n # we call these two towers cvr_tower (main tower) and ctr_tower (aux tower).\n self._cvr_tower_cfg = self._model_config.cvr_tower\n self._ctr_tower_cfg = self._model_config.ctr_tower\n self._init_towers([self._cvr_tower_cfg, self._ctr_tower_cfg])\n\n assert self._model_config.ctr_tower.loss_type == LossType.CLASSIFICATION, \\\n 'ctr tower must be binary classification.'\n for task_tower_cfg in self._task_towers:\n assert task_tower_cfg.num_class == 1, 'Does not support multiclass classification problem'\n\n def build_loss_graph(self):\n \"\"\"Build loss graph.\n\n Returns:\n self._loss_dict: Weighted loss of ctr and cvr.\n \"\"\"\n cvr_tower_name = self._cvr_tower_cfg.tower_name\n ctr_tower_name = self._ctr_tower_cfg.tower_name\n cvr_label_name = self._label_name_dict[cvr_tower_name]\n ctr_label_name = self._label_name_dict[ctr_tower_name]\n if self._cvr_tower_cfg.loss_type == LossType.CLASSIFICATION:\n ctcvr_label = tf.cast(\n self._labels[cvr_label_name] * self._labels[ctr_label_name],\n tf.float32)\n cvr_losses = tf.keras.backend.binary_crossentropy(\n ctcvr_label, self._prediction_dict['probs_ctcvr'])\n cvr_loss = tf.reduce_sum(cvr_losses, name='ctcvr_loss')\n # The weight defaults to 1.\n self._loss_dict['weighted_cross_entropy_loss_%s' %\n cvr_tower_name] = self._cvr_tower_cfg.weight * cvr_loss\n\n elif self._cvr_tower_cfg.loss_type == LossType.L2_LOSS:\n logging.info('l2 loss is used')\n cvr_dtype = self._labels[cvr_label_name].dtype\n ctcvr_label = self._labels[cvr_label_name] * tf.cast(\n self._labels[ctr_label_name], cvr_dtype)\n cvr_loss = tf.losses.mean_squared_error(\n labels=ctcvr_label,\n predictions=self._prediction_dict['y_ctcvr'],\n weights=self._sample_weight)\n self._loss_dict['weighted_l2_loss_%s' %\n cvr_tower_name] = self._cvr_tower_cfg.weight * cvr_loss\n _labels = tf.cast(self._labels[ctr_label_name], tf.float32)\n _logits = self._prediction_dict['logits_%s' % ctr_tower_name]\n cross = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=_labels, logits=_logits, name='ctr_loss')\n ctr_loss = tf.reduce_sum(cross)\n self._loss_dict['weighted_cross_entropy_loss_%s' %\n ctr_tower_name] = self._ctr_tower_cfg.weight * ctr_loss\n return self._loss_dict\n\n def build_metric_graph(self, eval_config):\n \"\"\"Build metric graph.\n\n Args:\n eval_config: Evaluation configuration.\n\n Returns:\n metric_dict: Calculate AUC of ctr, cvr and ctrvr.\n \"\"\"\n metric_dict = {}\n\n cvr_tower_name = self._cvr_tower_cfg.tower_name\n ctr_tower_name = self._ctr_tower_cfg.tower_name\n cvr_label_name = self._label_name_dict[cvr_tower_name]\n ctr_label_name = self._label_name_dict[ctr_tower_name]\n for metric in self._cvr_tower_cfg.metrics_set:\n # CTCVR metric\n ctcvr_label_name = cvr_label_name + '_ctcvr'\n cvr_dtype = self._labels[cvr_label_name].dtype\n self._labels[ctcvr_label_name] = self._labels[cvr_label_name] * tf.cast(\n self._labels[ctr_label_name], cvr_dtype)\n metric_dict.update(\n self._build_metric_impl(\n metric,\n loss_type=self._cvr_tower_cfg.loss_type,\n label_name=ctcvr_label_name,\n num_class=self._cvr_tower_cfg.num_class,\n suffix='_ctcvr'))\n\n # CVR metric\n cvr_label_masked_name = cvr_label_name + '_masked'\n ctr_mask = self._labels[ctr_label_name] > 0\n self._labels[cvr_label_masked_name] = tf.boolean_mask(\n self._labels[cvr_label_name], ctr_mask)\n pred_prefix = 'probs' if self._cvr_tower_cfg.loss_type == LossType.CLASSIFICATION else 'y'\n pred_name = '%s_%s' % (pred_prefix, cvr_tower_name)\n self._prediction_dict[pred_name + '_masked'] = tf.boolean_mask(\n self._prediction_dict[pred_name], ctr_mask)\n metric_dict.update(\n self._build_metric_impl(\n metric,\n loss_type=self._cvr_tower_cfg.loss_type,\n label_name=cvr_label_masked_name,\n num_class=self._cvr_tower_cfg.num_class,\n suffix='_%s_masked' % cvr_tower_name))\n\n for metric in self._ctr_tower_cfg.metrics_set:\n # CTR metric\n metric_dict.update(\n self._build_metric_impl(\n metric,\n loss_type=self._ctr_tower_cfg.loss_type,\n label_name=ctr_label_name,\n num_class=self._ctr_tower_cfg.num_class,\n suffix='_%s' % ctr_tower_name))\n return metric_dict\n\n def _add_to_prediction_dict(self, output):\n super(ESMM, self)._add_to_prediction_dict(output)\n if self._cvr_tower_cfg.loss_type == LossType.CLASSIFICATION:\n prob = tf.multiply(\n self._prediction_dict['probs_%s' % self._cvr_tower_cfg.tower_name],\n self._prediction_dict['probs_%s' % self._ctr_tower_cfg.tower_name])\n # pctcvr = pctr * pcvr\n self._prediction_dict['probs_ctcvr'] = prob\n\n else:\n prob = tf.multiply(\n self._prediction_dict['y_%s' % self._cvr_tower_cfg.tower_name],\n self._prediction_dict['probs_%s' % self._ctr_tower_cfg.tower_name])\n # pctcvr = pctr * pcvr\n self._prediction_dict['y_ctcvr'] = prob\n\n def build_predict_graph(self):\n \"\"\"Forward function.\n\n Returns:\n self._prediction_dict: Prediction result of two tasks.\n \"\"\"\n if self._group_num > 0:\n group_fea_arr = []\n # Both towers share the underlying network.\n for group_id in range(self._group_num):\n group_fea = self._group_features[group_id]\n group = self._model_config.groups[group_id]\n group_name = group.input\n dnn_model = dnn.DNN(group.dnn, self._l2_reg, group_name,\n self._is_training)\n group_fea = dnn_model(group_fea)\n group_fea_arr.append(group_fea)\n all_fea = tf.concat(group_fea_arr, axis=1)\n else:\n all_fea = self._group_features[0]\n\n cvr_tower_name = self._cvr_tower_cfg.tower_name\n dnn_model = dnn.DNN(\n self._cvr_tower_cfg.dnn,\n self._l2_reg,\n name=cvr_tower_name,\n is_training=self._is_training)\n cvr_tower_output = dnn_model(all_fea)\n cvr_tower_output = tf.layers.dense(\n inputs=cvr_tower_output,\n units=1,\n kernel_regularizer=self._l2_reg,\n name='%s/dnn_output' % cvr_tower_name)\n\n ctr_tower_name = self._ctr_tower_cfg.tower_name\n dnn_model = dnn.DNN(\n self._ctr_tower_cfg.dnn,\n self._l2_reg,\n name=ctr_tower_name,\n is_training=self._is_training)\n ctr_tower_output = dnn_model(all_fea)\n ctr_tower_output = tf.layers.dense(\n inputs=ctr_tower_output,\n units=1,\n kernel_regularizer=self._l2_reg,\n name='%s/dnn_output' % ctr_tower_name)\n\n tower_outputs = {\n cvr_tower_name: cvr_tower_output,\n ctr_tower_name: ctr_tower_output\n }\n self._add_to_prediction_dict(tower_outputs)\n return self._prediction_dict\n\n def get_outputs(self):\n \"\"\"Get model outputs.\n\n Returns:\n outputs: The list of tensor names output by the model.\n \"\"\"\n outputs = super(ESMM, self).get_outputs()\n if self._cvr_tower_cfg.loss_type == LossType.CLASSIFICATION:\n outputs.append('probs_ctcvr')\n elif self._cvr_tower_cfg.loss_type == LossType.L2_LOSS:\n outputs.append('y_ctcvr')\n else:\n raise ValueError('invalid cvr_tower loss type: %s' %\n str(self._cvr_tower_cfg.loss_type))\n return outputs\n"
]
| [
[
"tensorflow.keras.backend.binary_crossentropy",
"tensorflow.boolean_mask",
"tensorflow.losses.mean_squared_error",
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.layers.dense",
"tensorflow.nn.sigmoid_cross_entropy_with_logits"
]
]
|
meteoNL/CM1_experiments | [
"49e3f471845cd821fa5e5bf92a3f37227ede9e68"
]
| [
"cm1r19.8/run/scripts/read_nc_CM1.py"
]
| [
"#!/usr/bin/env python3\n###### -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 14 17:29:45 2019\n\n@author: egroot\n\"\"\"\n#imports\nimport matplotlib.pyplot as pl\nfrom preparation_script import *\n#import numpy.ma as ma\n\nfnames=[] #initialise list\n\n## distinction for legend between integrated/surface and 3D quantities!\nif lvl == \"vert\":\n viewdbz=test[varname][:,:,:]\nelse: \n viewdbz=test[varname][:,lvl,:,:]\n maxlvl = len(test[varname][0,:,0,0])\n\nfor i in np.arange(steps):\n # create separate .png-files for .gif-animation\n #clear plot, make figue and initiate grid\n pl.clf();\n pl.figure(figsize=(12,8))\n pl.grid()\n \n #plot the field 2D\n pl.contourf(xmask,ymask,viewdbz[i,:,:], clrs,cmap=\"gist_rainbow_r\",vmin=minimum,vmax=maximum)\n pl.colorbar(ticks = np.linspace(minimum,maximum,11))\n \n #add labels and set plotted coordinates\n pl.xlabel(\"x (km)\")\n pl.ylabel(\"y (km)\")\n pl.ylim(-lensim/2.,lensim/2.)\n pl.xlim(-lensim/2.,lensim/2.)\n \n #adapt title to either 2D or 3D quantity\n if lvl != \"vert\":\n pl.title(namesim+\" | time = %.3d\"% time[i]+\" min\"+ \" | level = \"+str(lvl)+\"/\"+str(maxlvl))\n else:\n pl.title(namesim+\" | time = %.3d\"% time[i]+\" min\")\n \n #add full variable name and unit to legend\n fullname=str(getattr(test.variables[varname], \"long_name\")+\" (\"+test.variables[varname].units+\")\")\n pl.text((lensim*0.52),0, fullname, verticalalignment='center',rotation=90)\n \n #save and add filenames to list for gif\n fn = str(path+namesim+\"/pngs/\"+name_figs+\"%.3d\" % i+\".png\")\n pl.savefig(fn)\n fnames+=[fn]\n#%%\nimport imageio\nwith imageio.get_writer(str(path+namesim+\"/movie\"+varname+\".gif\"), mode='I') as writer:\n #create gif from separate .png-files\n for fn in fnames:\n image = imageio.imread(fn)\n writer.append_data(image)\n\n\n"
]
| [
[
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure"
]
]
|
nordme/nordme_work_repo | [
"950a5077730885330a4975c4ca1bd3c51903a1a6"
]
| [
"indv_movies.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport os\nimport os.path as op\nimport mne\nimport numpy as np\n\n# set variables\n\nsigned = False\ninv_method = 'dSPM'\nsubj_dir = '/data/acdc/'\nsubjects = ['erica_peterson']\nbackend = 'mavayi'\n\nif signed:\n tag = '/signed/'\n clim = {'kind': 'percent', 'pos_lims': np.arange(96, 101, 2)}\nelse:\n tag = '/'\n clim = {'kind': 'percent', 'lims': [96, 98, 100]}\n\n# stc_options = ['al01',\n# 'al02',\n# 'al03',\n# 'fl01',\n# 'fl02',\n# 'fl03',\n# 'el01',\n# 'el02',\n# 'el03',\n# 'tl01',\n# 'tl02',\n# 'tl03']\n\nstc_options = ['oddball1_dSPM',\n 'oddball2_dSPM',\n 'standard_dSPM',\n ]\n\n# read in the stc and plot it\n\nfor subject in subjects:\n print('Working on the movie for subject %s' % subject)\n# stcs_dir = op.join(subj_dir, subject, '%s_stc' % inv_method, 'auditory')\n stcs_dir = op.join(subj_dir, subject, 'stcs')\n movie_dir = op.join(subj_dir, subject, 'movies')\n stc_names = ['%s_%s' % (subject, x) for x in stc_options]\n for name in stc_names:\n stc = mne.read_source_estimate(op.join(stcs_dir, name))\n title = 'stc: %s method: %s (96+ pctl)' % (name, inv_method)\n plot = stc.plot(subject=subject, surface='inflated', hemi='split', colormap='cool', views=['lat', 'med'],\n clim=clim, title=title, time_viewer=True, size=(800, 800), show_traces=True)\n save_name = op.join(movie_dir, '%s_percent.mov' % (name))\n plot.save_movie(filename=save_name, time_dilation=24, framerate=20, interpolation='linear')\n plot.close()"
]
| [
[
"numpy.arange"
]
]
|
briansp2020/courses | [
"7849aaff37518dd31eb22266e8453ecb469b1f90"
]
| [
"deeplearning1/nbs/vgg16.py"
]
| [
"from __future__ import division, print_function\n\nimport os, json\nfrom glob import glob\nimport numpy as np\nfrom scipy import misc, ndimage\nfrom scipy.ndimage.interpolation import zoom\n\nfrom keras import backend as K\n\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils.data_utils import get_file\nfrom keras.models import Sequential\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers.pooling import GlobalAveragePooling2D\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras.preprocessing import image\nfrom keras.utils import convert_all_kernels_in_model\n\ndef ceildiv(a, b):\n return -(-a // b)\n\t\n# In case we are going to use the TensorFlow backend we need to explicitly set the Theano image ordering\nK.set_image_dim_ordering('th')\n\n\nvgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))\ndef vgg_preprocess(x):\n \"\"\"\n Subtracts the mean RGB value, and transposes RGB to BGR.\n The mean RGB was computed on the image set used to train the VGG model.\n\n Args: \n x: Image array (height x width x channels)\n Returns:\n Image array (height x width x transposed_channels)\n \"\"\"\n x = x - vgg_mean\n return x[:, ::-1] # reverse axis rgb->bgr\n\n\nclass Vgg16():\n \"\"\"\n The VGG 16 Imagenet model\n \"\"\"\n\n\n def __init__(self):\n self.FILE_PATH = 'http://files.fast.ai/models/'\n self.create()\n self.get_classes()\n\n\n def get_classes(self):\n \"\"\"\n Downloads the Imagenet classes index file and loads it to self.classes.\n The file is downloaded only if it not already in the cache.\n \"\"\"\n fname = 'imagenet_class_index.json'\n fpath = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')\n with open(fpath) as f:\n class_dict = json.load(f)\n self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]\n\n def predict(self, imgs, details=False):\n \"\"\"\n Predict the labels of a set of images using the VGG16 model.\n\n Args:\n imgs (ndarray) : An array of N images (size: N x width x height x channels).\n details : ??\n \n Returns:\n preds (np.array) : Highest confidence value of the predictions for each image.\n idxs (np.ndarray): Class index of the predictions with the max confidence.\n classes (list) : Class labels of the predictions with the max confidence.\n \"\"\"\n # predict probability of each class for each image\n all_preds = self.model.predict(imgs)\n # for each image get the index of the class with max probability\n idxs = np.argmax(all_preds, axis=1)\n # get the values of the highest probability for each image\n preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]\n # get the label of the class with the highest probability for each image\n classes = [self.classes[idx] for idx in idxs]\n return np.array(preds), idxs, classes\n\n\n def ConvBlock(self, layers, filters):\n \"\"\"\n Adds a specified number of ZeroPadding and Covolution layers\n to the model, and a MaxPooling layer at the very end.\n\n Args:\n layers (int): The number of zero padded convolution layers\n to be added to the model.\n filters (int): The number of convolution filters to be \n created for each layer.\n \"\"\"\n model = self.model\n for i in range(layers):\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(filters, (3, 3), activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n\n def FCBlock(self):\n \"\"\"\n Adds a fully connected layer of 4096 neurons to the model with a\n Dropout of 0.5\n\n Args: None\n Returns: None\n \"\"\"\n model = self.model\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n\n\n def create(self):\n \"\"\"\n Creates the VGG16 network achitecture and loads the pretrained weights.\n\n Args: None\n Returns: None\n \"\"\"\n model = self.model = Sequential()\n model.add(Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224)))\n\n self.ConvBlock(2, 64)\n self.ConvBlock(2, 128)\n self.ConvBlock(3, 256)\n self.ConvBlock(3, 512)\n self.ConvBlock(3, 512)\n\n model.add(Flatten())\n self.FCBlock()\n self.FCBlock()\n model.add(Dense(1000, activation='softmax'))\n\n fname = 'vgg16.h5'\n model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))\n # convert to tf\n convert_all_kernels_in_model(model)\n\n\n def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):\n \"\"\"\n Takes the path to a directory, and generates batches of augmented/normalized data. Yields batches indefinitely, in an infinite loop.\n\n See Keras documentation: https://keras.io/preprocessing/image/\n \"\"\"\n return gen.flow_from_directory(path, target_size=(224,224),\n class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)\n\n\n def ft(self, num):\n \"\"\"\n Replace the last layer of the model with a Dense (fully connected) layer of num neurons.\n Will also lock the weights of all layers except the new layer so that we only learn\n weights for the last layer in subsequent training.\n\n Args:\n num (int) : Number of neurons in the Dense layer\n Returns:\n None\n \"\"\"\n model = self.model\n model.pop()\n for layer in model.layers: layer.trainable=False\n model.add(Dense(num, activation='softmax'))\n self.compile()\n\n def finetune(self, batches):\n \"\"\"\n Modifies the original VGG16 network architecture and updates self.classes for new training data.\n \n Args:\n batches : A keras.preprocessing.image.ImageDataGenerator object.\n See definition for get_batches().\n \"\"\"\n self.ft(batches.num_classes)\n classes = list(iter(batches.class_indices)) # get a list of all the class labels\n \n # batches.class_indices is a dict with the class name as key and an index as value\n # eg. {'cats': 0, 'dogs': 1}\n\n # sort the class labels by index according to batches.class_indices and update model.classes\n for c in batches.class_indices:\n classes[batches.class_indices[c]] = c\n self.classes = classes\n\n\n def compile(self, lr=0.001):\n \"\"\"\n Configures the model for training.\n See Keras documentation: https://keras.io/models/model/\n \"\"\"\n self.model.compile(optimizer=Adam(lr=lr),\n loss='categorical_crossentropy', metrics=['accuracy'])\n\n\n def fit_data(self, trn, labels, val, val_labels, nb_epoch=1, batch_size=64):\n \"\"\"\n Trains the model for a fixed number of epochs (iterations on a dataset).\n See Keras documentation: https://keras.io/models/model/\n \"\"\"\n self.model.fit(trn, labels, nb_epoch=nb_epoch,\n validation_data=(val, val_labels), batch_size=batch_size)\n\n\n def fit(self, batches, val_batches, nb_epoch=1):\n \"\"\"\n Fits the model on data yielded batch-by-batch by a Python generator.\n See Keras documentation: https://keras.io/models/model/\n \"\"\"\n self.model.fit_generator(batches, steps_per_epoch=ceildiv(batches.samples, batches.batch_size), epochs=nb_epoch,\n validation_data=val_batches, validation_steps=ceildiv(val_batches.samples, val_batches.batch_size))\n\n\n def test(self, path, batch_size=8):\n \"\"\"\n Predicts the classes using the trained model on data yielded batch-by-batch.\n\n Args:\n path (string): Path to the target directory. It should contain one subdirectory \n per class.\n batch_size (int): The number of images to be considered in each batch.\n \n Returns:\n test_batches, numpy array(s) of predictions for the test_batches.\n \n \"\"\"\n test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)\n return test_batches, self.model.predict_generator(test_batches, ceildiv(test_batches.samples, test_batches.batch_size))\n\n"
]
| [
[
"numpy.array",
"numpy.argmax"
]
]
|
world-admin/folium | [
"1b775f63fb3fcda385348bcbabe82d1915091c62"
]
| [
"tests/test_folium.py"
]
| [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nFolium Tests\n-------\n\n\"\"\"\nimport pytest\n\nimport os\nimport json\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\nimport pandas as pd\nimport jinja2\nfrom jinja2 import Environment, PackageLoader\n\nfrom six import PY3\nimport branca.element\n\nimport folium\nfrom folium.map import Popup, Marker, FitBounds, FeatureGroup\nfrom folium.features import TopoJson, RectangleMarker, PolygonMarker\n\nrootpath = os.path.abspath(os.path.dirname(__file__))\n\n\ndef setup_data():\n \"\"\"Import economic data for testing.\"\"\"\n with open(os.path.join(rootpath, 'us-counties.json'), 'r') as f:\n get_id = json.load(f)\n\n county_codes = [x['id'] for x in get_id['features']]\n county_df = pd.DataFrame({'FIPS_Code': county_codes}, dtype=str)\n\n # Read into Dataframe, cast to string for consistency.\n df = pd.read_csv(os.path.join(rootpath, 'us_county_data.csv'),\n na_values=[' '])\n df['FIPS_Code'] = df['FIPS_Code'].astype(str)\n\n # Perform an inner join, pad NA's with data from nearest county.\n merged = pd.merge(df, county_df, on='FIPS_Code', how='inner')\n return merged.fillna(method='pad')\n\n\ndef test_get_templates():\n \"\"\"Test template getting.\"\"\"\n\n env = branca.utilities.get_templates()\n assert isinstance(env, jinja2.environment.Environment)\n\n\nclass TestFolium(object):\n \"\"\"Test class for the Folium library.\"\"\"\n\n def setup(self):\n \"\"\"Setup Folium Map.\"\"\"\n with mock.patch('branca.element.uuid4') as uuid4:\n uuid4().hex = '0' * 32\n self.map = folium.Map(location=[45.5236, -122.6750], width=900,\n height=400, max_zoom=20, zoom_start=4)\n self.env = Environment(loader=PackageLoader('folium', 'templates'))\n\n def test_init(self):\n \"\"\"Test map initialization.\"\"\"\n\n assert self.map.get_name() == 'map_00000000000000000000000000000000'\n assert self.map.get_root() == self.map._parent\n assert self.map.location == [45.5236, -122.6750]\n assert self.map.zoom_start == 4\n assert self.map.max_lat == 90\n assert self.map.min_lat == -90\n assert self.map.max_lon == 180\n assert self.map.min_lon == -180\n assert self.map.position == 'relative'\n assert self.map.height == (400, 'px')\n assert self.map.width == (900, 'px')\n assert self.map.left == (0, '%')\n assert self.map.top == (0, '%')\n assert self.map.global_switches.prefer_canvas is False\n assert self.map.global_switches.no_touch is False\n assert self.map.global_switches.disable_3d is False\n assert self.map.to_dict() == {\n \"name\": \"Map\",\n \"id\": \"00000000000000000000000000000000\",\n \"children\": {\n \"openstreetmap\": {\n \"name\": \"TileLayer\",\n \"id\": \"00000000000000000000000000000000\",\n \"children\": {}\n }\n }\n }\n\n def test_cloudmade(self):\n \"\"\"Test cloudmade tiles and the API key.\"\"\"\n with pytest.raises(ValueError):\n folium.Map(location=[45.5236, -122.6750], tiles='cloudmade')\n\n map = folium.Map(location=[45.5236, -122.6750], tiles='cloudmade',\n API_key='###')\n cloudmade = 'http://{s}.tile.cloudmade.com/###/997/256/{z}/{x}/{y}.png'\n assert map._children['cloudmade'].tiles == cloudmade\n\n bounds = map.get_bounds()\n assert bounds == [[None, None], [None, None]], bounds\n\n def test_builtin_tile(self):\n \"\"\"Test custom maptiles.\"\"\"\n\n default_tiles = ['OpenStreetMap', 'Stamen Terrain', 'Stamen Toner']\n for tiles in default_tiles:\n map = folium.Map(location=[45.5236, -122.6750], tiles=tiles)\n tiles = ''.join(tiles.lower().strip().split())\n url = 'tiles/{}/tiles.txt'.format\n attr = 'tiles/{}/attr.txt'.format\n url = map._env.get_template(url(tiles)).render()\n attr = map._env.get_template(attr(tiles)).render()\n\n assert map._children[tiles].tiles == url\n assert map._children[tiles].attr == attr\n\n bounds = map.get_bounds()\n assert bounds == [[None, None], [None, None]], bounds\n\n def test_custom_tile(self):\n \"\"\"Test custom tile URLs.\"\"\"\n\n url = 'http://{s}.custom_tiles.org/{z}/{x}/{y}.png'\n attr = 'Attribution for custom tiles'\n\n with pytest.raises(ValueError):\n folium.Map(location=[45.5236, -122.6750], tiles=url)\n\n map = folium.Map(location=[45.52, -122.67], tiles=url, attr=attr)\n assert map._children[url].tiles == url\n assert map._children[url].attr == attr\n\n bounds = map.get_bounds()\n assert bounds == [[None, None], [None, None]], bounds\n\n def test_feature_group(self):\n \"\"\"Test FeatureGroup.\"\"\"\n\n map = folium.Map()\n feature_group = FeatureGroup()\n feature_group.add_child(Marker([45, -30], popup=Popup('-30')))\n feature_group.add_child(Marker([45, 30], popup=Popup('30')))\n map.add_child(feature_group)\n map.add_child(folium.map.LayerControl())\n\n map._repr_html_()\n\n bounds = map.get_bounds()\n assert bounds == [[45, -30], [45, 30]], bounds\n\n def test_circle_marker(self):\n \"\"\"Test circle marker additions.\"\"\"\n\n self.map = folium.Map(location=[45.60, -122.8])\n circ_templ = self.env.get_template('circle_marker.js')\n\n # Single Circle marker.\n marker = folium.features.CircleMarker([45.60, -122.8], popup='Hi')\n self.map.add_child(marker)\n circle_1 = circ_templ.render({'circle': marker.get_name(),\n 'lat': 45.60,\n 'lon': -122.8, 'radius': 500,\n 'line_color': 'black',\n 'fill_color': 'black',\n 'fill_opacity': 0.6})\n assert (''.join(circle_1.split())[:-1] in\n ''.join(self.map.get_root().render().split()))\n\n # Second circle marker.\n marker = folium.features.CircleMarker([45.70, -122.9], popup='Hi')\n self.map.add_child(marker)\n circle_2 = circ_templ.render({'circle': marker.get_name(),\n 'lat': 45.70,\n 'lon': -122.9, 'radius': 500,\n 'line_color': 'black',\n 'fill_color': 'black',\n 'fill_opacity': 0.6})\n assert (''.join(circle_2.split())[:-1] in\n ''.join(self.map.get_root().render().split()))\n\n bounds = self.map.get_bounds()\n assert bounds == [[45.6, -122.9], [45.7, -122.8]], bounds\n\n def test_rectangle_marker(self):\n \"\"\"Test rectangle marker additions.\"\"\"\n\n self.map = folium.Map(location=[45.60, -122.8])\n rect_templ = self.env.get_template('rectangle_marker.js')\n\n # Single Rectangle marker.\n bounds = [45.60, -122.8, 45.61, -122.7]\n self.map.add_child(RectangleMarker(bounds=bounds, popup='Hi'))\n marker = list(self.map._children.values())[-1]\n rect_1 = rect_templ.render({'RectangleMarker': marker.get_name(),\n 'location': [45.60, -122.8, 45.61, -122.7],\n 'color': 'black',\n 'fill_color': 'black',\n 'fill_opacity': 0.6,\n 'weight': 1})\n assert (''.join(rect_1.split())[:-1] in\n ''.join(self.map.get_root().render().split()))\n\n # Second Rectangle marker.\n bounds = [45.70, -122.9, 45.75, -122.5]\n self.map.add_child(RectangleMarker(bounds=bounds, popup='Hi'))\n marker = list(self.map._children.values())[-1]\n rect_2 = rect_templ.render({'RectangleMarker': marker.get_name(),\n 'location': [45.70, -122.9, 45.75, -122.5],\n 'color': 'black',\n 'fill_color': 'black',\n 'fill_opacity': 0.6,\n 'weight': 1})\n assert (''.join(rect_2.split())[:-1] in\n ''.join(self.map.get_root().render().split()))\n\n bounds = self.map.get_bounds()\n assert bounds == [[45.6, -122.9], [45.7, -122.8]], bounds\n\n def test_polygon_marker(self):\n \"\"\"Test polygon additions.\"\"\"\n\n self.map = folium.Map(location=[45.60, -122.8])\n polygon_templ = self.env.get_template('polygon.js')\n\n # Single PolygonMarker.\n locations = [[35.6636, 139.7634],\n [35.6629, 139.7664],\n [35.6663, 139.7706],\n [35.6725, 139.7632],\n [35.6728, 139.7627],\n [35.6720, 139.7606],\n [35.6682, 139.7588],\n [35.6663, 139.7627]]\n self.map.add_child(PolygonMarker(locations=locations, popup='Hi'))\n marker = list(self.map._children.values())[-1]\n polygon_1 = polygon_templ.render({'PolygonMarker': marker.get_name(),\n 'location': locations,\n 'color': 'black',\n 'fill_color': 'black',\n 'fill_opacity': 0.6,\n 'weight': 1})\n assert (''.join(polygon_1.split())[:-1] in\n ''.join(self.map.get_root().render().split()))\n\n # Second PolygonMarker.\n locations = [[35.5636, 138.7634],\n [35.5629, 138.7664],\n [35.5663, 138.7706],\n [35.5725, 138.7632],\n [35.5728, 138.7627],\n [35.5720, 138.7606],\n [35.5682, 138.7588],\n [35.5663, 138.7627]]\n self.map.add_child(PolygonMarker(locations=locations, color='red',\n fill_color='red', fill_opacity=0.7,\n weight=3, popup='Hi'))\n marker = list(self.map._children.values())[-1]\n polygon_2 = polygon_templ.render({'PolygonMarker': marker.get_name(),\n 'location': locations,\n 'color': 'red',\n 'fill_color': 'red',\n 'fill_opacity': 0.7,\n 'weight': 3})\n assert (''.join(polygon_2.split())[:-1] in\n ''.join(self.map.get_root().render().split()))\n\n bounds = self.map.get_bounds()\n assert bounds == [[[35.5636, 138.7634], [35.5629, 138.7664]],\n [[35.6636, 139.7634], [35.6629, 139.7664]]], bounds\n\n def test_topo_json_smooth_factor(self):\n \"\"\"Test topojson smooth factor method.\"\"\"\n self.map = folium.Map([43, -100], zoom_start=4)\n\n # Adding TopoJSON as additional layer.\n path = os.path.join(rootpath, 'or_counties_topo.json')\n self.map.choropleth(geo_path=path,\n topojson='objects.or_counties_geo',\n smooth_factor=0.5)\n\n out = self.map._parent.render()\n\n # Verify TopoJson\n topo_json = [val for key, val in self.map._children.items()\n if isinstance(val, TopoJson)][0]\n topojson_str = topo_json._template.module.script(topo_json)\n assert ''.join(topojson_str.split())[:-1] in ''.join(out.split())\n\n def test_map_build(self):\n \"\"\"Test map build.\"\"\"\n\n # Standard map.\n self.setup()\n out = self.map._parent.render()\n html_templ = self.env.get_template('fol_template.html')\n attr = ('Data by <a href=\"http://openstreetmap.org\">OpenStreetMap'\n '</a>,under '\n '<a href=\"http://www.openstreetmap.org/copyright\">ODbL</a>.')\n tile_layers = [\n {'id': 'tile_layer_'+'0'*32,\n 'address': 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',\n 'attr': attr,\n 'max_zoom': 20,\n 'min_zoom': 1,\n 'detect_retina': False,\n 'no_wrap': False,\n 'continuous_world': False\n }]\n tmpl = {'map_id': 'map_' + '0' * 32,\n 'lat': 45.5236, 'lon': -122.675,\n 'size': 'width: 900.0px; height: 400.0px;',\n 'zoom_level': 4,\n 'min_lat': -90,\n 'max_lat': 90,\n 'min_lon': -180,\n 'max_lon': 180,\n 'tile_layers': tile_layers,\n 'crs': 'EPSG3857',\n 'world_copy_jump': False\n }\n HTML = html_templ.render(tmpl, plugins={})\n\n assert ''.join(out.split()) == ''.join(HTML.split())\n\n def test_tile_attr_unicode(self):\n \"\"\"Test tile attribution unicode\n\n Test not cover b'юникод'\n because for python 3 bytes can only contain ASCII literal characters.\n \"\"\"\n\n if not PY3:\n map = folium.Map(location=[45.5236, -122.6750],\n tiles='test', attr=b'unicode')\n map._parent.render()\n else:\n map = folium.Map(location=[45.5236, -122.6750],\n tiles='test', attr=u'юникод')\n map._parent.render()\n map = folium.Map(location=[45.5236, -122.6750],\n tiles='test', attr='юникод')\n map._parent.render()\n\n def test_fit_bounds(self):\n \"\"\"Test fit_bounds.\"\"\"\n bounds = ((52.193636, -2.221575), (52.636878, -1.139759))\n\n self.setup()\n self.map.fit_bounds(bounds)\n fitbounds = [val for key, val in self.map._children.items() if\n isinstance(val, FitBounds)][0]\n out = self.map._parent.render()\n\n fit_bounds_tpl = self.env.get_template('fit_bounds.js')\n fit_bounds_rendered = fit_bounds_tpl.render({\n 'bounds': json.dumps(bounds),\n 'this': fitbounds,\n 'fit_bounds_options': {}, })\n\n assert ''.join(fit_bounds_rendered.split()) in ''.join(out.split())\n\n self.setup()\n self.map.fit_bounds(bounds, max_zoom=15, padding=(3, 3))\n fitbounds = [val for key, val in self.map._children.items() if\n isinstance(val, FitBounds)][0]\n out = self.map._parent.render()\n\n fit_bounds_tpl = self.env.get_template('fit_bounds.js')\n fit_bounds_rendered = fit_bounds_tpl.render({\n 'bounds': json.dumps(bounds),\n 'fit_bounds_options': json.dumps({'maxZoom': 15,\n 'padding': (3, 3), },\n sort_keys=True),\n 'this': fitbounds,\n })\n\n assert ''.join(fit_bounds_rendered.split()) in ''.join(out.split())\n\n bounds = self.map.get_bounds()\n assert bounds == [[None, None], [None, None]], bounds\n\n def test_custom_icon(self):\n \"\"\"Test CustomIcon.\"\"\"\n self.setup()\n\n icon_image = \"http://leafletjs.com/docs/images/leaf-green.png\"\n shadow_image = \"http://leafletjs.com/docs/images/leaf-shadow.png\"\n\n self.map = folium.Map([45, -100], zoom_start=4)\n i = folium.features.CustomIcon(icon_image,\n icon_size=(38, 95),\n icon_anchor=(22, 94),\n shadow_image=shadow_image,\n shadow_size=(50, 64),\n shadow_anchor=(4, 62),\n popup_anchor=(-3, -76),)\n mk = folium.map.Marker([45, -100], icon=i,\n popup=folium.map.Popup('Hello'))\n self.map.add_child(mk)\n self.map._parent.render()\n\n bounds = self.map.get_bounds()\n assert bounds == [[45, -100], [45, -100]], bounds\n\n def test_tile_layer(self):\n mapa = folium.Map([48., 5.], tiles='stamentoner', zoom_start=6)\n layer = 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'\n mapa.add_child(folium.map.TileLayer(layer, name='OpenStreetMap',\n attr='attribution'))\n mapa.add_child(folium.map.TileLayer(layer,\n name='OpenStreetMap2',\n attr='attribution2',\n overlay=True))\n mapa.add_child(folium.map.LayerControl())\n mapa._repr_html_()\n\n bounds = self.map.get_bounds()\n assert bounds == [[None, None], [None, None]], bounds\n\n def test_global_switches(self):\n mapa = folium.Map(prefer_canvas=True)\n assert (mapa.global_switches.prefer_canvas is True and\n mapa.global_switches.no_touch is False and\n mapa.global_switches.disable_3d is False)\n\n mapb = folium.Map(no_touch=True)\n assert (mapb.global_switches.prefer_canvas is False and\n mapb.global_switches.no_touch is True and\n mapb.global_switches.disable_3d is False)\n\n mapc = folium.Map(disable_3d=True)\n assert (mapc.global_switches.prefer_canvas is False and\n mapc.global_switches.no_touch is False and\n mapc.global_switches.disable_3d is True)\n\n mapd = folium.Map(prefer_canvas=True, no_touch=True, disable_3d=True)\n assert (mapd.global_switches.prefer_canvas is True and\n mapd.global_switches.no_touch is True and\n mapd.global_switches.disable_3d is True)\n"
]
| [
[
"pandas.merge",
"pandas.DataFrame"
]
]
|
etetteh/covidECTRA | [
"66adcbe4e2217f3eea9e279bda2626936cde3d73"
]
| [
"electra_small/finetune/qa/qa_tasks.py"
]
| [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Question answering tasks. SQuAD 1.1/2.0 and 2019 MRQA tasks are supported.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport json\nimport os\nimport six\nimport tensorflow.compat.v1 as tf\n\nimport configure_finetuning\nfrom finetune import feature_spec\nfrom finetune import task\nfrom finetune.qa import qa_metrics\nfrom model import modeling\nfrom model import tokenization\nfrom util import utils\n\n\nclass QAExample(task.Example):\n \"\"\"Question-answering example.\"\"\"\n\n def __init__(self,\n task_name,\n eid,\n qas_id,\n qid,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n super(QAExample, self).__init__(task_name)\n self.eid = eid\n self.qas_id = qas_id\n self.qid = qid\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % self.start_position\n if self.start_position:\n s += \", end_position: %d\" % self.end_position\n if self.start_position:\n s += \", is_impossible: %r\" % self.is_impossible\n return s\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return new_start, new_end\n\n return input_start, input_end\n\n\ndef is_whitespace(c):\n return c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F\n\n\nclass QATask(task.Task):\n \"\"\"A span-based question answering tasks (e.g., SQuAD).\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, config: configure_finetuning.FinetuningConfig, name,\n tokenizer, v2=False):\n super(QATask, self).__init__(config, name)\n self._tokenizer = tokenizer\n self._examples = {}\n self.v2 = v2\n\n def _add_examples(self, examples, example_failures, paragraph, split):\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"] if \"id\" in qa else None\n qid = qa[\"qid\"] if \"qid\" in qa else None\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if split == \"train\":\n if self.v2:\n is_impossible = qa[\"is_impossible\"]\n if not is_impossible:\n if \"detected_answers\" in qa: # MRQA format\n answer = qa[\"detected_answers\"][0]\n answer_offset = answer[\"char_spans\"][0][0]\n else: # SQuAD format\n answer = qa[\"answers\"][0]\n answer_offset = answer[\"answer_start\"]\n orig_answer_text = answer[\"text\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n if answer_offset + answer_length - 1 >= len(char_to_word_offset):\n utils.log(\"End position is out of document!\")\n example_failures[0] += 1\n continue\n end_position = char_to_word_offset[answer_offset + answer_length - 1]\n\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n actual_text = actual_text.lower()\n cleaned_answer_text = cleaned_answer_text.lower()\n if actual_text.find(cleaned_answer_text) == -1:\n utils.log(\"Could not find answer: '{:}' in doc vs. \"\n \"'{:}' in provided answer\".format(\n tokenization.printable_text(actual_text),\n tokenization.printable_text(cleaned_answer_text)))\n example_failures[0] += 1\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = QAExample(\n task_name=self.name,\n eid=len(examples),\n qas_id=qas_id,\n qid=qid,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n def get_feature_specs(self):\n return [\n feature_spec.FeatureSpec(self.name + \"_eid\", []),\n feature_spec.FeatureSpec(self.name + \"_start_positions\", []),\n feature_spec.FeatureSpec(self.name + \"_end_positions\", []),\n feature_spec.FeatureSpec(self.name + \"_is_impossible\", []),\n ]\n\n def featurize(self, example: QAExample, is_training, log=False,\n for_eval=False):\n all_features = []\n query_tokens = self._tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > self.config.max_query_length:\n query_tokens = query_tokens[0:self.config.max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = self._tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, self._tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = self.config.max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, self.config.doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = self._tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < self.config.max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == self.config.max_seq_length\n assert len(input_mask) == self.config.max_seq_length\n assert len(segment_ids) == self.config.max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if log:\n utils.log(\"*** Example ***\")\n utils.log(\"doc_span_index: %s\" % doc_span_index)\n utils.log(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n utils.log(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n utils.log(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n utils.log(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n utils.log(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n utils.log(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n utils.log(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n utils.log(\"start_position: %d\" % start_position)\n utils.log(\"end_position: %d\" % end_position)\n utils.log(\"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n features = {\n \"task_id\": self.config.task_names.index(self.name),\n self.name + \"_eid\": (1000 * example.eid) + doc_span_index,\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids,\n }\n if for_eval:\n features.update({\n self.name + \"_doc_span_index\": doc_span_index,\n self.name + \"_tokens\": tokens,\n self.name + \"_token_to_orig_map\": token_to_orig_map,\n self.name + \"_token_is_max_context\": token_is_max_context,\n })\n if is_training:\n features.update({\n self.name + \"_start_positions\": start_position,\n self.name + \"_end_positions\": end_position,\n self.name + \"_is_impossible\": example.is_impossible\n })\n all_features.append(features)\n return all_features\n\n def get_prediction_module(self, bert_model, features, is_training,\n percent_done):\n final_hidden = bert_model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n\n answer_mask = tf.cast(features[\"input_mask\"], tf.float32)\n answer_mask *= tf.cast(features[\"segment_ids\"], tf.float32)\n answer_mask += tf.one_hot(0, seq_length)\n\n start_logits = tf.squeeze(tf.layers.dense(final_hidden, 1), -1)\n\n start_top_log_probs = tf.zeros([batch_size, self.config.beam_size])\n start_top_index = tf.zeros([batch_size, self.config.beam_size], tf.int32)\n end_top_log_probs = tf.zeros([batch_size, self.config.beam_size,\n self.config.beam_size])\n end_top_index = tf.zeros([batch_size, self.config.beam_size,\n self.config.beam_size], tf.int32)\n if self.config.joint_prediction:\n start_logits += 1000.0 * (answer_mask - 1)\n start_log_probs = tf.nn.log_softmax(start_logits)\n start_top_log_probs, start_top_index = tf.nn.top_k(\n start_log_probs, k=self.config.beam_size)\n\n if not is_training:\n # batch, beam, length, hidden\n end_features = tf.tile(tf.expand_dims(final_hidden, 1),\n [1, self.config.beam_size, 1, 1])\n # batch, beam, length\n start_index = tf.one_hot(start_top_index,\n depth=seq_length, axis=-1, dtype=tf.float32)\n # batch, beam, hidden\n start_features = tf.reduce_sum(\n tf.expand_dims(final_hidden, 1) *\n tf.expand_dims(start_index, -1), axis=-2)\n # batch, beam, length, hidden\n start_features = tf.tile(tf.expand_dims(start_features, 2),\n [1, 1, seq_length, 1])\n else:\n start_index = tf.one_hot(\n features[self.name + \"_start_positions\"], depth=seq_length,\n axis=-1, dtype=tf.float32)\n start_features = tf.reduce_sum(tf.expand_dims(start_index, -1) *\n final_hidden, axis=1)\n start_features = tf.tile(tf.expand_dims(start_features, 1),\n [1, seq_length, 1])\n end_features = final_hidden\n\n final_repr = tf.concat([start_features, end_features], -1)\n final_repr = tf.layers.dense(final_repr, 512, activation=modeling.gelu,\n name=\"qa_hidden\")\n # batch, beam, length (batch, length when training)\n end_logits = tf.squeeze(tf.layers.dense(final_repr, 1), -1,\n name=\"qa_logits\")\n if is_training:\n end_logits += 1000.0 * (answer_mask - 1)\n else:\n end_logits += tf.expand_dims(1000.0 * (answer_mask - 1), 1)\n\n if not is_training:\n end_log_probs = tf.nn.log_softmax(end_logits)\n end_top_log_probs, end_top_index = tf.nn.top_k(\n end_log_probs, k=self.config.beam_size)\n end_logits = tf.zeros([batch_size, seq_length])\n else:\n end_logits = tf.squeeze(tf.layers.dense(final_hidden, 1), -1)\n start_logits += 1000.0 * (answer_mask - 1)\n end_logits += 1000.0 * (answer_mask - 1)\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_sum(one_hot_positions * log_probs, axis=-1)\n return loss\n\n start_positions = features[self.name + \"_start_positions\"]\n end_positions = features[self.name + \"_end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n losses = (start_loss + end_loss) / 2.0\n\n answerable_logit = tf.zeros([batch_size])\n if self.config.answerable_classifier:\n final_repr = final_hidden[:, 0]\n if self.config.answerable_uses_start_logits:\n start_p = tf.nn.softmax(start_logits)\n start_feature = tf.reduce_sum(tf.expand_dims(start_p, -1) *\n final_hidden, axis=1)\n final_repr = tf.concat([final_repr, start_feature], -1)\n final_repr = tf.layers.dense(final_repr, 512,\n activation=modeling.gelu)\n answerable_logit = tf.squeeze(tf.layers.dense(final_repr, 1), -1)\n answerable_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.cast(features[self.name + \"_is_impossible\"], tf.float32),\n logits=answerable_logit)\n losses += answerable_loss * self.config.answerable_weight\n\n return losses, dict(\n loss=losses,\n start_logits=start_logits,\n end_logits=end_logits,\n answerable_logit=answerable_logit,\n start_positions=features[self.name + \"_start_positions\"],\n end_positions=features[self.name + \"_end_positions\"],\n start_top_log_probs=start_top_log_probs,\n start_top_index=start_top_index,\n end_top_log_probs=end_top_log_probs,\n end_top_index=end_top_index,\n eid=features[self.name + \"_eid\"],\n )\n\n def get_scorer(self, split=\"dev\"):\n return qa_metrics.SpanBasedQAScorer(self.config, self, split, self.v2)\n\n\nclass MRQATask(QATask):\n \"\"\"Class for finetuning tasks from the 2019 MRQA shared task.\"\"\"\n\n def __init__(self, config: configure_finetuning.FinetuningConfig, name,\n tokenizer):\n super(MRQATask, self).__init__(config, name, tokenizer)\n\n def get_examples(self, split):\n if split in self._examples:\n utils.log(\"N EXAMPLES\", split, len(self._examples[split]))\n return self._examples[split]\n\n examples = []\n example_failures = [0]\n with tf.io.gfile.GFile(os.path.join(\n self.config.raw_data_dir(self.name), split + \".jsonl\"), \"r\") as f:\n for i, line in enumerate(f):\n if self.config.debug and i > 10:\n break\n paragraph = json.loads(line.strip())\n if \"header\" in paragraph:\n continue\n self._add_examples(examples, example_failures, paragraph, split)\n self._examples[split] = examples\n utils.log(\"{:} examples created, {:} failures\".format(\n len(examples), example_failures[0]))\n return examples\n\n def get_scorer(self, split=\"dev\"):\n return qa_metrics.SpanBasedQAScorer(self.config, self, split, self.v2)\n\n\nclass SQuADTask(QATask):\n \"\"\"Class for finetuning on SQuAD 2.0 or 1.1.\"\"\"\n\n def __init__(self, config: configure_finetuning.FinetuningConfig, name,\n tokenizer, v2=False):\n super(SQuADTask, self).__init__(config, name, tokenizer, v2=v2)\n\n def get_examples(self, split):\n if split in self._examples:\n return self._examples[split]\n\n with tf.io.gfile.GFile(os.path.join(\n self.config.raw_data_dir(self.name),\n split + (\"-debug\" if self.config.debug else \"\") + \".json\"), \"r\") as f:\n input_data = json.load(f)[\"data\"]\n\n examples = []\n example_failures = [0]\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n self._add_examples(examples, example_failures, paragraph, split)\n self._examples[split] = examples\n utils.log(\"{:} examples created, {:} failures\".format(\n len(examples), example_failures[0]))\n return examples\n\n def get_scorer(self, split=\"dev\"):\n return qa_metrics.SpanBasedQAScorer(self.config, self, split, self.v2)\n\n \nclass B4B(SQuADTask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(SQuADv1, self).__init__(config, \"b4b\", tokenizer)\n \n \nclass B4B(SQuADTask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(SQuADv1, self).__init__(config, \"b5b\", tokenizer)\n \n \nclass B6B(SQuADTask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(SQuADv1, self).__init__(config, \"b6b\", tokenizer)\n\n\nclass NewsQA(MRQATask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(NewsQA, self).__init__(config, \"newsqa\", tokenizer)\n\n\nclass NaturalQuestions(MRQATask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(NaturalQuestions, self).__init__(config, \"naturalqs\", tokenizer)\n\n\nclass SearchQA(MRQATask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(SearchQA, self).__init__(config, \"searchqa\", tokenizer)\n\n\nclass TriviaQA(MRQATask):\n def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer):\n super(TriviaQA, self).__init__(config, \"triviaqa\", tokenizer)\n"
]
| [
[
"tensorflow.compat.v1.nn.log_softmax",
"tensorflow.compat.v1.nn.softmax",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.nn.top_k",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.layers.dense"
]
]
|
Turtlesyu-27/qlib | [
"cb2c3028b8cabfca909f22890c9b126a18a35daf"
]
| [
"qlib/contrib/report/analysis_model/analysis_model_performance.py"
]
| [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport pandas as pd\n\nimport plotly.tools as tls\nimport plotly.graph_objs as go\n\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\nfrom scipy import stats\n\nfrom ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph\n\n\ndef _group_return(pred_label: pd.DataFrame = None, reverse: bool = False, N: int = 5, **kwargs) -> tuple:\n \"\"\"\n\n :param pred_label:\n :param reverse:\n :param N:\n :return:\n \"\"\"\n if reverse:\n pred_label[\"score\"] *= -1\n\n pred_label = pred_label.sort_values(\"score\", ascending=False)\n\n # Group1 ~ Group5 only consider the dropna values\n pred_label_drop = pred_label.dropna(subset=[\"score\"])\n\n # Group\n t_df = pd.DataFrame(\n {\n \"Group%d\"\n % (i + 1): pred_label_drop.groupby(level=\"datetime\")[\"label\"].apply(\n lambda x: x[len(x) // N * i : len(x) // N * (i + 1)].mean()\n )\n for i in range(N)\n }\n )\n t_df.index = pd.to_datetime(t_df.index)\n\n # Long-Short\n t_df[\"long-short\"] = t_df[\"Group1\"] - t_df[\"Group%d\" % N]\n\n # Long-Average\n t_df[\"long-average\"] = t_df[\"Group1\"] - pred_label.groupby(level=\"datetime\")[\"label\"].mean()\n\n t_df = t_df.dropna(how=\"all\") # for days which does not contain label\n # FIXME: support HIGH-FREQ\n t_df.index = t_df.index.strftime(\"%Y-%m-%d\")\n # Cumulative Return By Group\n group_scatter_figure = ScatterGraph(\n t_df.cumsum(),\n layout=dict(title=\"Cumulative Return\", xaxis=dict(type=\"category\", tickangle=45)),\n ).figure\n\n t_df = t_df.loc[:, [\"long-short\", \"long-average\"]]\n _bin_size = ((t_df.max() - t_df.min()) / 20).min()\n group_hist_figure = SubplotsGraph(\n t_df,\n kind_map=dict(kind=\"DistplotGraph\", kwargs=dict(bin_size=_bin_size)),\n subplots_kwargs=dict(\n rows=1,\n cols=2,\n print_grid=False,\n subplot_titles=[\"long-short\", \"long-average\"],\n ),\n ).figure\n\n return group_scatter_figure, group_hist_figure\n\n\ndef _plot_qq(data: pd.Series = None, dist=stats.norm) -> go.Figure:\n \"\"\"\n\n :param data:\n :param dist:\n :return:\n \"\"\"\n fig, ax = plt.subplots(figsize=(8, 5))\n _mpl_fig = sm.qqplot(data.dropna(), dist, fit=True, line=\"45\", ax=ax)\n return tls.mpl_to_plotly(_mpl_fig)\n\n\ndef _pred_ic(pred_label: pd.DataFrame = None, rank: bool = False, **kwargs) -> tuple:\n \"\"\"\n\n :param pred_label:\n :param rank:\n :return:\n \"\"\"\n if rank:\n ic = pred_label.groupby(level=\"datetime\").apply(\n lambda x: x[\"label\"].rank(pct=True).corr(x[\"score\"].rank(pct=True))\n )\n else:\n ic = pred_label.groupby(level=\"datetime\").apply(lambda x: x[\"label\"].corr(x[\"score\"]))\n\n _index = ic.index.get_level_values(0).astype(\"str\").str.replace(\"-\", \"\").str.slice(0, 6)\n _monthly_ic = ic.groupby(_index).mean()\n _monthly_ic.index = pd.MultiIndex.from_arrays(\n [_monthly_ic.index.str.slice(0, 4), _monthly_ic.index.str.slice(4, 6)],\n names=[\"year\", \"month\"],\n )\n\n # fill month\n _month_list = pd.date_range(\n start=pd.Timestamp(f\"{_index.min()[:4]}0101\"),\n end=pd.Timestamp(f\"{_index.max()[:4]}1231\"),\n freq=\"1M\",\n )\n _years = []\n _month = []\n for _date in _month_list:\n _date = _date.strftime(\"%Y%m%d\")\n _years.append(_date[:4])\n _month.append(_date[4:6])\n\n fill_index = pd.MultiIndex.from_arrays([_years, _month], names=[\"year\", \"month\"])\n\n _monthly_ic = _monthly_ic.reindex(fill_index)\n\n _ic_df = ic.to_frame(\"ic\")\n ic_bar_figure = ic_figure(_ic_df, kwargs.get(\"show_nature_day\", True))\n\n ic_heatmap_figure = HeatmapGraph(\n _monthly_ic.unstack(),\n layout=dict(title=\"Monthly IC\", yaxis=dict(tickformat=\",d\")),\n graph_kwargs=dict(xtype=\"array\", ytype=\"array\"),\n ).figure\n\n dist = stats.norm\n _qqplot_fig = _plot_qq(ic, dist)\n\n if isinstance(dist, stats.norm.__class__):\n dist_name = \"Normal\"\n else:\n dist_name = \"Unknown\"\n\n _bin_size = ((_ic_df.max() - _ic_df.min()) / 20).min()\n _sub_graph_data = [\n (\n \"ic\",\n dict(\n row=1,\n col=1,\n name=\"\",\n kind=\"DistplotGraph\",\n graph_kwargs=dict(bin_size=_bin_size),\n ),\n ),\n (_qqplot_fig, dict(row=1, col=2)),\n ]\n ic_hist_figure = SubplotsGraph(\n _ic_df.dropna(),\n kind_map=dict(kind=\"HistogramGraph\", kwargs=dict()),\n subplots_kwargs=dict(\n rows=1,\n cols=2,\n print_grid=False,\n subplot_titles=[\"IC\", \"IC %s Dist. Q-Q\" % dist_name],\n ),\n sub_graph_data=_sub_graph_data,\n layout=dict(\n yaxis2=dict(title=\"Observed Quantile\"),\n xaxis2=dict(title=f\"{dist_name} Distribution Quantile\"),\n ),\n ).figure\n\n return ic_bar_figure, ic_heatmap_figure, ic_hist_figure\n\n\ndef _pred_autocorr(pred_label: pd.DataFrame, lag=1, **kwargs) -> tuple:\n pred = pred_label.copy()\n pred[\"score_last\"] = pred.groupby(level=\"instrument\")[\"score\"].shift(lag)\n ac = pred.groupby(level=\"datetime\").apply(lambda x: x[\"score\"].rank(pct=True).corr(x[\"score_last\"].rank(pct=True)))\n # FIXME: support HIGH-FREQ\n _df = ac.to_frame(\"value\")\n _df.index = _df.index.strftime(\"%Y-%m-%d\")\n ac_figure = ScatterGraph(\n _df,\n layout=dict(title=\"Auto Correlation\", xaxis=dict(type=\"category\", tickangle=45)),\n ).figure\n return (ac_figure,)\n\n\ndef _pred_turnover(pred_label: pd.DataFrame, N=5, lag=1, **kwargs) -> tuple:\n pred = pred_label.copy()\n pred[\"score_last\"] = pred.groupby(level=\"instrument\")[\"score\"].shift(lag)\n top = pred.groupby(level=\"datetime\").apply(\n lambda x: 1\n - x.nlargest(len(x) // N, columns=\"score\").index.isin(x.nlargest(len(x) // N, columns=\"score_last\").index).sum()\n / (len(x) // N)\n )\n bottom = pred.groupby(level=\"datetime\").apply(\n lambda x: 1\n - x.nsmallest(len(x) // N, columns=\"score\")\n .index.isin(x.nsmallest(len(x) // N, columns=\"score_last\").index)\n .sum()\n / (len(x) // N)\n )\n r_df = pd.DataFrame(\n {\n \"Top\": top,\n \"Bottom\": bottom,\n }\n )\n # FIXME: support HIGH-FREQ\n r_df.index = r_df.index.strftime(\"%Y-%m-%d\")\n turnover_figure = ScatterGraph(\n r_df,\n layout=dict(title=\"Top-Bottom Turnover\", xaxis=dict(type=\"category\", tickangle=45)),\n ).figure\n return (turnover_figure,)\n\n\ndef ic_figure(ic_df: pd.DataFrame, show_nature_day=True, **kwargs) -> go.Figure:\n \"\"\"IC figure\n\n :param ic_df: ic DataFrame\n :param show_nature_day: whether to display the abscissa of non-trading day\n :return: plotly.graph_objs.Figure\n \"\"\"\n if show_nature_day:\n date_index = pd.date_range(ic_df.index.min(), ic_df.index.max())\n ic_df = ic_df.reindex(date_index)\n # FIXME: support HIGH-FREQ\n ic_df.index = ic_df.index.strftime(\"%Y-%m-%d\")\n ic_bar_figure = BarGraph(\n ic_df,\n layout=dict(\n title=\"Information Coefficient (IC)\",\n xaxis=dict(type=\"category\", tickangle=45),\n ),\n ).figure\n return ic_bar_figure\n\n\ndef model_performance_graph(\n pred_label: pd.DataFrame,\n lag: int = 1,\n N: int = 5,\n reverse=False,\n rank=False,\n graph_names: list = [\"group_return\", \"pred_ic\", \"pred_autocorr\"],\n show_notebook: bool = True,\n show_nature_day=True,\n) -> [list, tuple]:\n \"\"\"Model performance\n\n :param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score,\n label]**. It is usually same as the label of model training(e.g. \"Ref($close, -2)/Ref($close, -1) - 1\")\n\n\n .. code-block:: python\n\n instrument datetime score label\n SH600004 2017-12-11 -0.013502 -0.013502\n 2017-12-12 -0.072367 -0.072367\n 2017-12-13 -0.068605 -0.068605\n 2017-12-14 0.012440 0.012440\n 2017-12-15 -0.102778 -0.102778\n\n\n :param lag: `pred.groupby(level='instrument')['score'].shift(lag)`. It will be only used in the auto-correlation computing.\n :param N: group number, default 5\n :param reverse: if `True`, `pred['score'] *= -1`\n :param rank: if **True**, calculate rank ic\n :param graph_names: graph names; default ['cumulative_return', 'pred_ic', 'pred_autocorr', 'pred_turnover']\n :param show_notebook: whether to display graphics in notebook, the default is `True`\n :param show_nature_day: whether to display the abscissa of non-trading day\n :return: if show_notebook is True, display in notebook; else return `plotly.graph_objs.Figure` list\n \"\"\"\n figure_list = []\n for graph_name in graph_names:\n fun_res = eval(f\"_{graph_name}\")(\n pred_label=pred_label,\n lag=lag,\n N=N,\n reverse=reverse,\n rank=rank,\n show_nature_day=show_nature_day,\n )\n figure_list += fun_res\n\n if show_notebook:\n BarGraph.show_graph_in_notebook(figure_list)\n else:\n return figure_list\n"
]
| [
[
"pandas.to_datetime",
"matplotlib.pyplot.subplots",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame"
]
]
|
rstoneback/pysatCDF | [
"0d0d0fa843e26d269b17591fd27e4561bb32d40f"
]
| [
"pysatCDF/_cdf.py"
]
| [
"from __future__ import print_function\nfrom __future__ import absolute_import\nimport copy\nimport numpy as np\nimport string\nimport sys\n\nimport pandas\nimport pysat\n\nfrom pysatCDF import fortran_cdf\n\n\nclass CDF(object):\n \"\"\"Reads data from NASA Common Data Format (CDF) files.\n\n pysatCDF presents a Python interface to NASA CDF files.\n To provide an easy installation experience the CDF library\n is included with the software and should be built\n automatically when pysatCDF is installed. In addition\n to zVariable support in CDFs, pysatCDF provides\n functionality to load CDF data and export it into a\n format for pysat integration.\n\n pysatCDF provides Fortran calls to the simplest CDF fortran\n interface, which is itself mapped from C\n code. The pysatCDF Fortran is wrapped up by f2py for Python and\n is used by the high level python in pysatCDF.\n The routines have been observed to be stable over many\n data loads.\n\n Note when opening a CDF file with this module all data is\n automatically loaded from disk unless specific variables\n are excluded upon instantiation.\n\n \"\"\"\n\n def __init__(self, fname):\n # In CDF docs it says don't include .cdf in name\n name = fname\n if fname[-4:].lower() == '.cdf':\n name = fname[:-4]\n\n self.fname = name\n status = fortran_cdf.open(name)\n\n self.data_loaded = False\n\n # CDF library numeric codes for data types.\n cdty = {}\n cdty['real4'] = 21\n cdty['float'] = 44\n cdty['real8'] = 22\n cdty['double'] = 45\n\n cdty['byte'] = 41\n cdty['int1'] = 1\n cdty['int2'] = 2\n cdty['int4'] = 4\n cdty['uint1'] = 11\n cdty['uint2'] = 12\n cdty['uint4'] = 14\n\n cdty['char'] = 51\n cdty['uchar'] = 52\n cdty['epoch'] = 31\n cdty['epoch16'] = 32\n cdty['TT2000'] = 33\n self.cdf_data_types = cdty\n\n if status == 0:\n # Inquire as to files contents.\n self.inquire()\n\n # Get all attribute info.\n self._read_all_attribute_info()\n\n # Get z variable info, basic stats on the variables.\n self._read_all_z_variable_info()\n\n # Load variables.\n self.load_all_variables()\n\n # Load all variable attribute data (zVariables).\n self._read_all_z_attribute_data()\n else:\n raise IOError(fortran_cdf.statusreporter(status))\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n pass\n\n def __getitem__(self, key):\n \"\"\"Return CDF variable by name.\"\"\"\n return chameleon(self.fname, key, self.data[key], self.meta[key],\n self.z_variable_info[key])\n\n def inquire(self):\n \"\"\"Maps to fortran CDF_Inquire.\n\n Assigns parameters returned by CDF_Inquire\n to pysatCDF instance. Not intended\n for regular direct use by user.\n\n \"\"\"\n\n name = copy.deepcopy(self.fname)\n stats = fortran_cdf.inquire(name)\n\n # Break out fortran output into something meaningful.\n status = stats[0]\n if status == 0:\n self._num_dims = stats[1]\n self._dim_sizes = stats[2]\n self._encoding = stats[3]\n self._majority = stats[4]\n self._max_rec = stats[5]\n self._num_r_vars = stats[6]\n self._num_z_vars = stats[7]\n self._num_attrs = stats[8]\n else:\n raise IOError(fortran_cdf.statusreporter(status))\n\n def _read_all_z_variable_info(self):\n \"\"\"Gets all CDF z-variable information, not data though.\n\n Maps to calls using var_inquire. Gets information on\n data type, number of elements, number of dimensions, etc.\n\n \"\"\"\n\n self.z_variable_info = {}\n self.z_variable_names_by_num = {}\n\n # Call Fortran that grabs all of the basic stats on all of the\n # zVariables in one go.\n info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars,\n len(self.fname))\n status = info[0]\n data_types = info[1]\n num_elems = info[2]\n rec_varys = info[3]\n dim_varys = info[4]\n num_dims = info[5]\n dim_sizes = info[6]\n rec_nums = info[7]\n var_nums = info[8]\n var_names = info[9]\n\n if status == 0:\n for i in np.arange(len(data_types)):\n out = {}\n out['data_type'] = data_types[i]\n out['num_elems'] = num_elems[i]\n out['rec_vary'] = rec_varys[i]\n out['dim_varys'] = dim_varys[i]\n out['num_dims'] = num_dims[i]\n\n # Only looking at first possible extra dimension.\n out['dim_sizes'] = dim_sizes[i, :1]\n if out['dim_sizes'][0] == 0:\n out['dim_sizes'][0] += 1\n out['rec_num'] = rec_nums[i]\n out['var_num'] = var_nums[i]\n var_name = ''.join(var_names[i].astype('U'))\n out['var_name'] = var_name.rstrip()\n self.z_variable_info[out['var_name']] = out\n self.z_variable_names_by_num[out['var_num']] = var_name\n else:\n raise IOError(fortran_cdf.statusreporter(status))\n\n def load_all_variables(self):\n \"\"\"Loads all variables from CDF.\n\n Note this routine is called automatically\n upon instantiation.\n\n \"\"\"\n\n self.data = {}\n\n # Need to add r variable names.\n file_var_names = self.z_variable_info.keys()\n\n # Collect variable information for each, organize it neatly for\n # fortran call.\n dim_sizes = []\n rec_nums = []\n data_types = []\n names = []\n for i, name in enumerate(file_var_names):\n dim_sizes.extend(self.z_variable_info[name]['dim_sizes'])\n rec_nums.append(self.z_variable_info[name]['rec_num'])\n data_types.append(self.z_variable_info[name]['data_type'])\n names.append(name.ljust(256))\n dim_sizes = np.array(dim_sizes)\n rec_nums = np.array(rec_nums)\n data_types = np.array(data_types)\n\n # Individually load all variables by each data type.\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['real4'],\n fortran_cdf.get_multi_z_real4)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['float'],\n fortran_cdf.get_multi_z_real4)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['real8'],\n fortran_cdf.get_multi_z_real8)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['double'],\n fortran_cdf.get_multi_z_real8)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['int4'],\n fortran_cdf.get_multi_z_int4)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['uint4'],\n fortran_cdf.get_multi_z_int4,\n data_offset=2 ** 32)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['int2'],\n fortran_cdf.get_multi_z_int2)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['uint2'],\n fortran_cdf.get_multi_z_int2,\n data_offset=2 ** 16)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['int1'],\n fortran_cdf.get_multi_z_int1)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['uint1'],\n fortran_cdf.get_multi_z_int1,\n data_offset=2 ** 8)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['byte'],\n fortran_cdf.get_multi_z_int1)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['epoch'],\n fortran_cdf.get_multi_z_real8,\n epoch=True)\n self._call_multi_fortran_z(names, data_types, rec_nums, 2 * dim_sizes,\n self.cdf_data_types['epoch16'],\n fortran_cdf.get_multi_z_epoch16,\n epoch16=True)\n self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,\n self.cdf_data_types['TT2000'],\n fortran_cdf.get_multi_z_tt2000,\n epoch=True)\n\n # Mark data has been loaded.\n self.data_loaded = True\n\n return\n\n def _call_multi_fortran_z(self, names, data_types, rec_nums,\n dim_sizes, input_type_code, func,\n epoch=False, data_offset=None, epoch16=False):\n \"\"\"Calls fortran functions to load CDF variable data\n\n Parameters\n ----------\n names : list-like\n List of variables names.\n data_types : list-like\n List of all loaded data type codes as used by CDF.\n rec_nums : list-like\n List of record numbers in CDF file. Provided by variable_info.\n dim_sizes : list-like\n List of dimensions as provided by variable_info.\n input_type_code : int\n Specific type code to load.\n func : function\n Fortran function via python interface that will be used for\n actual loading.\n epoch : bool\n Flag indicating type is epoch. Translates things to datetime\n standard. (default=False)\n data_offset :\n Offset value to be applied to data. Required for unsigned\n integers in CDF. (default=None)\n epoch16 : bool\n Flag indicating type is epoch16. Translates things to datetime\n standard. (default=False)\n\n\n \"\"\"\n\n # Isolate input type code variables from total supplied types.\n idx, = np.where(data_types == input_type_code)\n\n if len(idx) > 0:\n # Read all data of a given type at once.\n max_rec = rec_nums[idx].max()\n sub_names = np.array(names)[idx]\n sub_sizes = dim_sizes[idx]\n status, data = func(self.fname, sub_names.tolist(),\n sub_sizes, sub_sizes.sum(), max_rec,\n len(sub_names))\n if status == 0:\n # Account for quirks of CDF data storage for certain types.\n if data_offset is not None:\n data = data.astype(int)\n idx, idy, = np.where(data < 0)\n data[idx, idy] += data_offset\n if epoch:\n # Account for difference in seconds between\n # CDF epoch and python's epoch, leap year in there\n # (datetime(1971,1,2) -\n # datetime(1,1,1)).total_seconds()*1000\n data -= 62167219200000\n data = data.astype('<M8[ms]')\n if epoch16:\n data[0::2, :] -= 62167219200\n data = data[0::2, :] * 1E9 + data[1::2, :] / 1.E3\n data = data.astype('datetime64[ns]')\n sub_sizes /= 2\n\n # All data of a type has been loaded and tweaked as necessary.\n # Parse through returned array to break out the individual\n # variables as appropriate.\n self._process_return_multi_z(data, sub_names, sub_sizes)\n else:\n raise IOError(fortran_cdf.statusreporter(status))\n\n return\n\n def _process_return_multi_z(self, data, names, dim_sizes):\n \"\"\"Process and attach data from various `fortran_cdf` 'get' functions.\n \"\"\"\n\n d1 = 0\n d2 = 0\n for name, dim_size in zip(names, dim_sizes):\n d2 = d1 + dim_size\n if dim_size == 1:\n self.data[name.rstrip()] = data[d1, :]\n else:\n self.data[name.rstrip()] = data[d1:d2, :]\n d1 += dim_size\n\n return\n\n def _read_all_attribute_info(self):\n \"\"\"Read all attribute properties, g, r, and z attributes\"\"\"\n\n num = copy.deepcopy(self._num_attrs)\n fname = copy.deepcopy(self.fname)\n out = fortran_cdf.inquire_all_attr(fname, num, len(fname))\n status = out[0]\n names = out[1].astype('U')\n scopes = out[2]\n max_gentries = out[3]\n max_rentries = out[4]\n max_zentries = out[5]\n attr_nums = out[6]\n\n global_attrs_info = {}\n var_attrs_info = {}\n if status == 0:\n for (name, scope, gentry,\n rentry, zentry, num) in zip(names, scopes, max_gentries,\n max_rentries, max_zentries,\n attr_nums):\n name = ''.join(name)\n name = name.rstrip()\n nug = {}\n nug['scope'] = scope\n nug['max_gentry'] = gentry\n nug['max_rentry'] = rentry\n nug['max_zentry'] = zentry\n nug['attr_num'] = num\n flag = (gentry == 0) & (rentry == 0) & (zentry == 0)\n if not flag:\n if scope == 1:\n global_attrs_info[name] = nug\n elif scope == 2:\n var_attrs_info[name] = nug\n\n self.global_attrs_info = global_attrs_info\n self.var_attrs_info = var_attrs_info\n else:\n raise IOError(fortran_cdf.statusreporter(status))\n\n return\n\n def _read_all_z_attribute_data(self):\n \"\"\"Read all CDF z-attribute data\"\"\"\n self.meta = {}\n\n # Collect attribute info needed to get more info from\n # fortran routines.\n max_entries = []\n attr_nums = []\n names = []\n attr_names = []\n names = self.var_attrs_info.keys()\n num_z_attrs = len(names)\n exp_attr_nums = []\n for key in names:\n max_entries.append(self.var_attrs_info[key]['max_zentry'])\n attr_nums.append(self.var_attrs_info[key]['attr_num'])\n attr_nums = np.array(attr_nums)\n max_entries = np.array(max_entries)\n\n info = fortran_cdf.z_attr_all_inquire(self.fname, attr_nums,\n num_z_attrs, max_entries,\n self._num_z_vars, len(self.fname))\n\n status = info[0]\n data_types = info[1]\n num_elems = info[2]\n entry_nums = info[3]\n\n if status == 0:\n for i, name in enumerate(names):\n self.var_attrs_info[name]['data_type'] = data_types[i]\n self.var_attrs_info[name]['num_elems'] = num_elems[i]\n self.var_attrs_info[name]['entry_num'] = entry_nums[i]\n exp_attr_nums.extend([self.var_attrs_info[name]['attr_num']]\n * len(entry_nums[i]))\n attr_names.extend([name] * len(entry_nums[i]))\n else:\n raise IOError(fortran_cdf.statusreporter(status))\n\n # All the info is now packed up.\n # Need to break it out to make it easier to load via fortran.\n # Includes:\n # attribute id, entry id (zVariable ID), data_type, num_elems\n data_types = data_types.flatten()\n num_elems = num_elems.flatten()\n entry_nums = entry_nums.flatten()\n attr_nums = np.array(exp_attr_nums)\n\n # Drop everything that isn't valid\n idx, = np.where(entry_nums > 0)\n\n data_types = data_types[idx]\n num_elems = num_elems[idx]\n entry_nums = entry_nums[idx]\n attr_nums = attr_nums[idx]\n attr_names = np.array(attr_names)[idx]\n\n # Grab corresponding variable name for each attribute\n var_names = [self.z_variable_names_by_num[i].rstrip()\n for i in entry_nums]\n\n # The names that go along with this are already set up\n # in `attr_names`.\n # Chunk by data type, grab largest num_elems.\n\n # Get data back, shorten to num_elems, add to structure.\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['real4'],\n fortran_cdf.get_multi_z_attr_real4)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['float'],\n fortran_cdf.get_multi_z_attr_real4)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['real8'],\n fortran_cdf.get_multi_z_attr_real8)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['double'],\n fortran_cdf.get_multi_z_attr_real8)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['byte'],\n fortran_cdf.get_multi_z_attr_int1)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['int1'],\n fortran_cdf.get_multi_z_attr_int1)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['uint1'],\n fortran_cdf.get_multi_z_attr_int1,\n data_offset=256)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['int2'],\n fortran_cdf.get_multi_z_attr_int2)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['uint2'],\n fortran_cdf.get_multi_z_attr_int2,\n data_offset=65536)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['int4'],\n fortran_cdf.get_multi_z_attr_int4)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['uint4'],\n fortran_cdf.get_multi_z_attr_int4,\n data_offset=2 ** 32)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['char'],\n fortran_cdf.get_multi_z_attr_char)\n self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n self.cdf_data_types['uchar'],\n fortran_cdf.get_multi_z_attr_char)\n return\n\n def _call_multi_fortran_z_attr(self, names, data_types, num_elems,\n entry_nums, attr_nums, var_names,\n input_type_code, func, data_offset=None):\n \"\"\"Calls Fortran function that reads attribute data.\n\n data_offset translates unsigned into signed.\n If number read in is negative, offset added.\n \"\"\"\n # Isolate input type code variables.\n idx, = np.where(data_types == input_type_code)\n\n if len(idx) > 0:\n # Maximum array dimension.\n max_num = num_elems[idx].max()\n sub_num_elems = num_elems[idx]\n sub_names = np.array(names)[idx]\n sub_var_names = np.array(var_names)[idx]\n\n # zVariable numbers, 'entry' number.\n sub_entry_nums = entry_nums[idx]\n\n # Attribute number.\n sub_attr_nums = attr_nums[idx]\n status, data = func(self.fname, sub_attr_nums, sub_entry_nums,\n len(sub_attr_nums), max_num, len(self.fname))\n if (status == 0).all():\n if data_offset is not None:\n data = data.astype(int)\n idx, idy, = np.where(data < 0)\n data[idx, idy] += data_offset\n self._process_return_multi_z_attr(data, sub_names,\n sub_var_names, sub_num_elems)\n else:\n # Raise the first error.\n idx, = np.where(status != 0)\n raise IOError(fortran_cdf.statusreporter(status[idx][0]))\n return\n\n def _process_return_multi_z_attr(self, data, attr_names, var_names,\n sub_num_elems):\n '''process and attach data from fortran_cdf.get_multi_*'''\n\n for i, (attr_name, var_name, num_e) in enumerate(zip(attr_names,\n var_names,\n sub_num_elems)):\n if var_name not in self.meta.keys():\n self.meta[var_name] = {}\n if num_e == 1:\n self.meta[var_name][attr_name] = data[i, 0]\n else:\n if data[i].dtype == '|S1':\n chars = []\n for d in data[i, :num_e]:\n try:\n chars.append(d.astype('U'))\n except UnicodeDecodeError:\n # Uninterpretable character was encountered.\n # Fill inserted.\n chars.append('*')\n self.meta[var_name][attr_name] = ''.join(chars).rstrip()\n else:\n self.meta[var_name][attr_name] = data[i, 0:num_e]\n\n def to_pysat(self, flatten_twod=True, units_label='UNITS',\n name_label='LONG_NAME', fill_label='FILLVAL',\n plot_label='FIELDNAM', min_label='VALIDMIN',\n max_label='VALIDMAX', notes_label='VAR_NOTES',\n desc_label='CATDESC', axis_label='LABLAXIS'):\n \"\"\"Export loaded CDF data into data, meta for pysat module.\n\n Parameters\n ----------\n flatten_twod : bool (True)\n If True, then two dimensional data is flattened across\n columns. Name mangling is used to group data, first column\n is 'name', last column is 'name_end'. In between numbers are\n appended 'name_1', 'name_2', etc. All data for a given 2D array\n may be accessed via, data.ix[:,'item':'item_end']\n If False, then 2D data is stored as a series of DataFrames,\n indexed by Epoch. data.ix[0, 'item']\n units_label : str\n Identifier within metadata for units. Defults to CDAWab standard.\n (default='UNITS')\n name_label : str\n Identifier within metadata for variable name, not normally present\n within CDAWeb files. If not, will use values from the variable name\n in the file. (default='LONG_NAME')\n fill_label : str\n Identifier within metadata for Fill Values. Defults to CDAWab\n standard. (default='FILLVAL')\n plot_label : str\n Identifier within metadata for variable name used when plotting.\n Defults to CDAWab standard. (default='FIELDNAM')\n min_label : str\n Identifier within metadata for minimim variable value.\n Defults to CDAWab standard. (default='VALIDMIN')\n max_label : str\n Identifier within metadata for maximum variable value.\n Defults to CDAWab standard. (default='VALIDMAX')\n notes_label : str\n Identifier within metadata for notes. Defults to CDAWab standard.\n (default='VAR_NOTES')\n desc_label : str\n Identifier within metadata for a variable description.\n Defults to CDAWab standard. (default='CATDESC')\n axis_label : str\n Identifier within metadata for axis name used when plotting.\n Defults to CDAWab standard. (default='LABLAXIS')\n\n\n Returns\n -------\n data : pandas.DataFrame, pysat.Meta\n Data suitable for attachment to a pysat.Instrument object.\n meta : pysat.Meta\n pysat Metadata class suitable for attachment to a pysat.Instrument\n object.\n\n Note\n ----\n The *_labels should be set to the values in the file, if present.\n Note that once the meta object returned from this function is attached\n to a pysat.Instrument object then the *_labels on the Instrument\n are assigned to the newly attached Meta object.\n\n The pysat Meta object will use data with labels that match the patterns\n in *_labels even if the case does not match.\n\n \"\"\"\n\n # Copy data.\n cdata = self.data.copy()\n\n # Create a dictionary of the labels for use in initializing\n # the Metadata.\n labels = {'units': (units_label, str), 'name': (name_label, str),\n 'notes': (notes_label, str), 'desc': (desc_label, str),\n 'plot': (plot_label, str), 'axis': (axis_label, str),\n 'scale': ('scale', str), 'min_val': (min_label, float),\n 'max_val': (max_label, float),\n 'fill_val': (fill_label, float)}\n\n # Create pysat.Meta object using data above\n # and utilize the attribute labels provided by the user.\n meta = pysat.Meta(pandas.DataFrame.from_dict(self.meta,\n orient='index'),\n labels=labels)\n\n # Account for different possible cases for Epoch, epoch, EPOCH, epOch.\n lower_names = [name.lower() for name in meta.keys()]\n for name, true_name in zip(lower_names, meta.keys()):\n if name == 'epoch':\n meta.data.rename(index={true_name: 'Epoch'}, inplace=True)\n epoch = cdata.pop(true_name)\n cdata['Epoch'] = epoch\n\n # Ready to format data, iterate over all of the data names\n # and put into a pandas DataFrame.\n two_d_data = []\n drop_list = []\n for name in cdata.keys():\n temp = np.shape(cdata[name])\n\n # Treat 2 dimensional data differently.\n if len(temp) == 2:\n if not flatten_twod:\n # Put 2D data into a Frame at each time,\n # remove data from dict when adding to the DataFrame.\n frame = pandas.DataFrame(cdata[name].flatten(),\n columns=[name])\n drop_list.append(name)\n\n step = temp[0]\n new_list = []\n new_index = np.arange(step)\n for i in np.arange(len(epoch)):\n new_list.append(frame.iloc[i * step:(i + 1) * step, :])\n new_list[-1].index = new_index\n\n new_frame = pandas.Series(new_list, index=epoch, name=name)\n two_d_data.append(new_frame)\n\n else:\n # Flatten 2D into series of 1D columns.\n new_names = [name + '_{i}'.format(i=i)\n for i in np.arange(temp[0] - 2)]\n new_names.append(name + '_end')\n new_names.insert(0, name)\n\n # Remove data from dict when adding to the DataFrame.\n drop_list.append(name)\n frame = pandas.DataFrame(cdata[name].T,\n index=epoch,\n columns=new_names)\n two_d_data.append(frame)\n\n for name in drop_list:\n _ = cdata.pop(name)\n\n # All of the data left over is 1D, add as Series\n data = pandas.DataFrame(cdata, index=epoch)\n two_d_data.append(data)\n data = pandas.concat(two_d_data, axis=1)\n data.drop('Epoch', axis=1, inplace=True)\n return data, meta\n\n\nclass chameleon(object):\n \"\"\"Provides multiple access mechanisms for larger CDF object.\n\n Supports spacepy access pattern along with pysatCDF native\n data access pattern.\n\n \"\"\"\n\n def __init__(self, fname, name, data, attr, info):\n self.fname = fname\n self.data = data\n self.attrs = attr\n self.name = name\n self.info = info\n\n def __getitem__(self, key):\n if key is Ellipsis:\n return self.data\n else:\n return self.data[key]\n\n def __repr__(self):\n out = 'CDF filename : ' + self.fname + '\\n'\n out += 'CDF variable name: ' + self.name + '\\n'\n for key in self.info.keys():\n out += key + \" : \" + str(self.info[key]) + '\\n'\n\n return out\n"
]
| [
[
"pandas.concat",
"pandas.Series",
"numpy.arange",
"pandas.DataFrame",
"numpy.shape",
"pandas.DataFrame.from_dict",
"numpy.array",
"numpy.where"
]
]
|
Near32/comaze_gym | [
"296ae8295ffac5222f085e3482d0b976ea987d66"
]
| [
"comaze_gym/metrics/message_policy.py"
]
| [
"from typing import List, Dict, Optional\n\nimport torch\nimport torch.nn as nn\n\nclass MessagePolicy(nn.Module):\n def __init__(self, model:nn.Module):\n super(MessagePolicy, self).__init__()\n self.model = model\n\n def parameters(self):\n return self.model.parameters()\n\n def save_inner_state(self):\n raise NotImplementedError\n\n def restore_inner_state(self):\n raise NotImplementedError\n\n def clone(self, training=False):\n return MessagePolicy(model=self.model.clone(training=training))\n\n def forward(self, x:object):\n \"\"\"\n :param x:\n Object representing the observation of the current agent.\n e.g.: the object can be a kwargs argument containing\n expected argument to the model.\n :return log_a:\n torch.Tensor of logits over actions \n (as a Discrete OpenAI's action space).\n \"\"\"\n raise NotImplementedError\n\n #def reset(self, batch_size:int=None):\n def reset(self, batch_size:int, training:Optional[bool]=False):\n \"\"\"\n Call at the beginning of each episode.\n \"\"\"\n raise NotImplementedError\n\n\nfrom comaze_gym.utils import RuleBasedAgentWrapper\n\nclass RuleBasedMessagePolicy(MessagePolicy):\n def __init__(\n self, \n wrapped_rule_based_agent:RuleBasedAgentWrapper,\n combined_action_space:bool = False):\n \"\"\"\n \n :param combined_action_space:\n If True, then the message and actions performed\n by the current agent are treated as belonging to\n the same OpenAI's Discrete action space of size \n n= #messages * #actions.\n Else, n = # actions : directional actions.\n \"\"\"\n super(RuleBasedMessagePolicy, self).__init__(\n model=wrapped_rule_based_agent\n )\n self.combined_action_space = combined_action_space\n \n def clone(self, training=False):\n return RuleBasedMessagePolicy(\n wrapped_rule_based_agent=self.model.clone(training=training), \n combined_action_space=self.combined_action_space\n )\n \n def reset(self, batch_size:int, training:Optional[bool]=False):\n self.model.set_nbr_actor(batch_size)\n\n def get_nbr_actor(self):\n return self.model.get_nbr_actor()\n\n def save_inner_state(self):\n self.saved_inner_state = self.model.get_rnn_states()\n\n def restore_inner_state(self):\n self.model.set_rnn_states(self.saved_inner_state)\n \n def forward(self, x:object):\n \"\"\"\n :param x:\n Object representing the observation of the current agent.\n e.g.: the object can be a kwargs argument containing\n expected argument to the model.\n\n Here, x:Dict containing the keys:\n -'state': torch.Tensor containing the environment state.\n -'infos': Dict containing the entry 'abstract_repr' that is\n actually used by the :param model:RuleBasedAgentWrapper.\n \n :return log_m:\n torch.Tensor of logits over messages \n (as a Discrete OpenAI's action space).\n\n Here, depending on :attr combined_action_space:,\n we either marginalized over possible actions or not.\n \"\"\"\n actions_idx = self.model.take_action(**x)\n # batch_size x 1\n\n batch_size = actions_idx.shape[0]\n self.action_space_dim = self.model.action_space_dim \n \n # giving some entropy...\n #p_m = torch.ones((batch_size, self.action_space_dim)) #.to(actions_idx.device)\n p_m = torch.zeros((batch_size, self.action_space_dim)) #.to(actions_idx.device)\n\n for bidx in range(batch_size):\n p_m[bidx, int(actions_idx[bidx])] = 10.0\n\n if self.combined_action_space:\n return p_m.log_softmax(dim=-1)\n\n # Otherwise, we sum over the messages dimension (excluding the NOOP action):\n self.vocab_size = (self.action_space_dim-1)//5\n # There are 5 possible directional actions:\n p_m = p_m[...,:-1].reshape((batch_size, 5, self.vocab_size)).sum(dim=1)\n # batch_size x vocab_size\n\n return p_m.log_softmax(dim=1)\n \n\n "
]
| [
[
"torch.zeros"
]
]
|
tacohen125/chachies | [
"fb850757414408e7bff1342edd785db578502429"
]
| [
"examples/Images/chachifuncs_sepcd.py"
]
| [
"import glob \nfrom math import isclose \nimport numpy as np\nimport os \nimport pandas as pd\nfrom pandas import ExcelWriter\nimport requests\nimport scipy.io\nimport scipy.signal\n\n################################\n### OVERALL Wrapper Function ###\n################################\n\ndef get_all_data(rootdir, path_to_raw_data_folder):\n '''Gets all raw data from the rootdir (ie 'data/') and specified folder (path_to_raw_data_folder), i.e. 'Source_Data' (which is within , and then \n 1. separates it into raw cycles and puts them in a folder (data/Separated_Cycles/)\n 2. cleans those separated cycles and puts them in a folder (data/Clean_Separated_Cycles/)\n 3. recombines the cleaned, separated cycles and saves those data sets in a folder (data/Clean_Whole_Sets/)\n These folders do not have to have existed previously. '''\n if not os.path.exists(rootdir):\n print('The specified rootdir does not exist.')\n if not os.path.exists(rootdir+'Separated_Cycles/'):\n os.makedirs(rootdir+'Separated_Cycles/')\n if not os.path.exists(rootdir+'Clean_Separated_Cycles/'):\n os.makedirs(rootdir + 'Clean_Separated_Cycles/')\n if not os.path.exists(rootdir + 'Clean_Whole_Sets/'):\n os.makedirs(rootdir + 'Clean_Whole_Sets/')\n load_sep_cycles(rootdir + path_to_raw_data_folder, rootdir+ 'Separated_Cycles/')\n get_clean_cycles(rootdir + 'Separated_Cycles/', rootdir +'Clean_Separated_Cycles/')\n get_clean_sets(rootdir +'Clean_Separated_Cycles/', rootdir+'Clean_Whole_Sets/')\n return \n\n############################\n### Sub - Wrapper Functions\n############################\n\ndef load_sep_cycles(getdata_filepath, savedata_filepath):\n \"\"\"Get data from a specified filepath, separates out data into cycles and saves those cycles as .xlsx files in specified filepath (must be an existing folder)\"\"\"\n dfdict = get_data(getdata_filepath)\n for key in dfdict:\n all_cycles_df = dfdict[key]\n cycle_dict = sep_cycles(all_cycles_df)\n battname = key \n save_sep_cycles_xlsx(cycle_dict, battname, savedata_filepath) \n print('All data separated into cycles and saved in folder \"data/Separated_Cycles\". ')\n return \n\ndef clean_calc_sep_smooth(dataframe, windowlength, polyorder):\n \"\"\"Takes one cycle dataframe, calculates dq/dv, cleans the data, separates out charge and discharge, and applies sav-golay filter. Returns two dataframes, one charge and one discharge.\n Windowlength and polyorder are for the sav-golay filter.\"\"\"\n df1 = calc_dv_dqdv(dataframe)\n raw_charge = df1[df1['Current(A)'] > 0]\n raw_charge = raw_charge.reset_index(drop = True)\n raw_discharge = df1[df1['Current(A)'] < 0]\n raw_discharge = raw_discharge.reset_index(drop = True)\n clean_charge2 = drop_0_dv(raw_charge)\n #clean_charge2 = clean_charge2.reset_index(drop = True)\n #clean_charge2 = clean_charge2.drop(index = 0)\n #want to delete first datapoint in charge row here instead?\n clean_discharge2 = drop_0_dv(raw_discharge)\n clean_charge2 = clean_charge2.sort_values(['Voltage(V)'], ascending = True)\n clean_discharge2 = clean_discharge2.sort_values(['Voltage(V)'], ascending = False)\n #clean_discharge2 = clean_discharge2[:-1]\n cleandf2 = clean_charge2.append(clean_discharge2, ignore_index = True)\n #cleandf2 = cleandf2.sort_values(['Voltage(V)'], ascending = True)\n cleandf2 = cleandf2.reset_index(drop = True)\n charge, discharge = sep_char_dis(cleandf2)\n if len(discharge) > windowlength:\n smooth_discharge = my_savgolay(discharge, windowlength, polyorder)\n else:\n discharge['Smoothed_dQ/dV'] = discharge['dQ/dV']\n smooth_discharge = discharge\n if len(charge) > windowlength: \n smooth_charge = my_savgolay(charge, windowlength, polyorder)\n else:\n charge['Smoothed_dQ/dV'] = charge['dQ/dV']\n smooth_charge = charge \n return smooth_charge, smooth_discharge\n\ndef get_clean_cycles(import_filepath, save_filepath): \n \"\"\"Imports all separated out cycles in given path and cleans them and saves them in the specified filepath\"\"\"\n rootdir = import_filepath\n file_list = [f for f in glob.glob(os.path.join(rootdir,'*.xlsx'))] #iterate through dir to get excel files \n d = {} #initiate dict for data storage\n count = 0\n for file in file_list:\n count += 1\n name = os.path.split(file)[1].split('.')[0]\n data = pd.read_excel(file)\n charge, discharge = clean_calc_sep_smooth(data, 9, 3)\n clean_data = discharge.append(charge, ignore_index = True)\n \n #clean_data = clean_data.sort_values(['Voltage'], ascending = True)\n clean_data = clean_data.reset_index(drop = True)\n #clean_data = my_savgolay(clean_data1, 21, 3)\n #####################################################################################3\n clean_cycle = {name : clean_data}\n d.update(clean_cycle)\n # print(\"adding file to dictionary\" + str(count) + ' ' + str(name))\n for key in d:\n clean_cycle_df = d[key]\n cyclename = key \n writer = ExcelWriter(save_filepath + cyclename + 'Clean'+ '.xlsx')\n clean_cycle_df.to_excel(writer)\n writer.save() \n print('All cycles cleaned and saved in folder \"data/Clean_Separated_Cycles\".')\n return \n\ndef get_clean_sets(import_filepath, save_filepath): \n \"\"\"Imports all clean cycles of data from import path and appends them into complete sets of battery data, saved into save_filepath\"\"\"\n rootdir = import_filepath\n file_list = [f for f in glob.glob(os.path.join(rootdir,'*.xlsx'))] #iterate through dir to get excel files \n d = {} #initiate dict for data storage\n count = 0\n list_bats = [] \n for file in file_list:\n count += 1\n name = os.path.split(file)[1].split('.')[0]\n batname = name.split('-')[0]\n if batname not in list_bats:\n list_bats.append(batname)\n else: None \n \n set_dict = {}\n \n for i in range(len(list_bats)): \n batID = list_bats[i] \n setdf = pd.DataFrame()\n for file in file_list:\n name = os.path.split(file)[1].split('.')[0]\n batname = name.split('-')[0]\n if batname == batID:\n df = pd.read_excel(file)\n setdf = setdf.append(df, ignore_index=True)\n else:\n None \n setdf = setdf.sort_values(['Voltage(V)'], ascending = True)\n setdf = setdf.reset_index(drop = True)\n newset = {batID : setdf}\n set_dict.update(newset) \n \n for key, value in set_dict.items():\n writer = ExcelWriter(save_filepath + key + 'CleanSet'+'.xlsx')\n value.to_excel(writer)\n writer.save() \n \n print('All clean cycles recombined and saved in folder \"data/Clean_Whole_Sets\".')\n return\n############################\n# Component Functions\n############################\n\ndef get_data(filepath): \n \"\"\"Imports all data in given path\"\"\"\n assert type(filepath) == str, 'Input must be a string'\n rootdir = filepath\n file_list = [f for f in glob.glob(os.path.join(rootdir,'*.xlsx'))] #iterate through dir to get excel files \n \n d = {} #initiate dict for data storage\n count = 0\n for file in file_list:\n count += 1\n name = os.path.split(file)[1].split('.')[0]\n data = pd.read_excel(file,1)\n new_set = {name : data}\n d.update(new_set)\n # print(\"adding file \" + str(count) + ' ' + str(name))\n return d\n### ADD UNIT TEST:There are 23 files in the CS2 directory, so we should have 23 entries in the dictionary - add unit test for this, super EASY check \n\n#separate out dataframes into cycles\ndef sep_cycles(dataframe):\n \"\"\"This function separates out the cycles in the battery dataframe by grouping by the 'Cycle_Index' column, and putting them in a dictionary. \"\"\"\n assert type(dataframe) == pd.DataFrame, 'Input must be a dataframe' \n gb = dataframe.groupby(by = ['Cycle_Index'])\n cycle_dict = dict(iter(gb))\n return cycle_dict\n\ndef save_sep_cycles_xlsx(cycle_dict, battname, path_to_folder):\n \"\"\"This saves the separated out cycles into different excel files, beginning with the battery name. Battname and path to folder must be strings.\"\"\"\n assert type(cycle_dict) == dict, 'First entry must be a dictionary'\n assert type(battname) == str, 'Second entry must be a string'\n assert type(path_to_folder) == str, 'Path to output must be a string'\n for i in range(1, len(cycle_dict)+1):\n cycle_dict[i]['Battery_Label'] = battname\n for i in range(1,len(cycle_dict)+1):\n writer = ExcelWriter(path_to_folder + battname + '-'+'Cycle' + str(i) + '.xlsx')\n cycle_dict[i].to_excel(writer)\n writer.save()\n return \n\n\ndef calc_dv_dqdv(cycle_df):\n \"\"\"This function calculates the dv and the dq/dv for a dataframe.\"\"\"\n cycle_df = cycle_df.reset_index(drop = True)\n cycle_df['dV'] = None \n cycle_df['Discharge_dQ'] = None\n cycle_df['Charge_dQ'] = None \n cycle_df['Discharge_dQ/dV'] = None\n cycle_df['Charge_dQ/dV'] = None \n for i in range(1,len(cycle_df)): \n cycle_df.loc[i, ('dV')] = cycle_df.loc[i, ('Voltage(V)')] - cycle_df.loc[i-1, ('Voltage(V)')] \n cycle_df.loc[i, ('Discharge_dQ')] = cycle_df.loc[i, ('Discharge_Capacity(Ah)')] - cycle_df.loc[i-1, ('Discharge_Capacity(Ah)')]\n cycle_df.loc[i, ('Charge_dQ')] = cycle_df.loc[i, ('Charge_Capacity(Ah)')] - cycle_df.loc[i-1, ('Charge_Capacity(Ah)')]\n #calculate dq/dv based off of discharge capacity - might change this later so user can choose to use charge or discharge cap. \n cycle_df['Discharge_dQ/dV'] = cycle_df['Discharge_dQ']/cycle_df['dV']\n cycle_df['Charge_dQ/dV'] = cycle_df['Charge_dQ']/cycle_df['dV']\n return cycle_df\n\n\ndef drop_0_dv(cycle_df_dv): \n '''Drop rows where dv=0 (or about 0) in a dataframe that has already had dv calculated. Then recalculate dv and calculate dq/dv'''\n #this will clean up the data points around V = 4.2V (since they are holding it at 4.2V for a while).\n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n \n cycle_df_dv['dv_close_to_zero'] = None\n \n\n\n for i in range(1, len(cycle_df_dv)):\n \t#if (cycle_df_dv.loc[i, ('dV/dt(V/s)')] == 0 or isclose(cycle_df_dv.loc[i, ('Current(A)')], 0, abs_tol = 10**-3) or isclose(cycle_df_dv.loc[i, ('Voltage(V)')], 4.2, abs_tol = 10**-3)):\n \t#\tcycle_df_dv = cycle_df_dv.drop(index = i)\n \tif isclose(cycle_df_dv.loc[i, ('Current(A)')], 0, abs_tol = 10**-3):\n \t\tcycle_df_dv = cycle_df_dv.drop(index = i)\n\n cycle_df_dv = cycle_df_dv.reset_index(drop = True) \n switch_cd_index = np.where(np.diff(np.sign(cycle_df_dv['Current(A)'])))\n for i in switch_cd_index:\n cycle_df_dv = cycle_df_dv.drop(cycle_df_dv.index[i+1]) \n\n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n\n for i in range(1, len(cycle_df_dv)):\n if isclose(cycle_df_dv.loc[i, ('dV')], 0, abs_tol = 10**-3): #was -3.5 before\n cycle_df_dv.loc[i,('dv_close_to_zero')] = False\n else:\n cycle_df_dv.loc[i,('dv_close_to_zero')]= True \n \n \n while (False in cycle_df_dv['dv_close_to_zero'].values or cycle_df_dv['dV'].max() > 0.7 or cycle_df_dv['dV'].min() < -0.7): \n \n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n \n for i in range(1, len(cycle_df_dv)):\n if isclose(cycle_df_dv.loc[i, ('dV')], 0, abs_tol = 10**-3): \n cycle_df_dv = cycle_df_dv.drop(index = i)\n #if i-1 in cycle_df_dv.index:\n #\tcycle_df_dv = cycle_df_dv.drop(index = i-1)\n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n\n separate_dis_char = np.where(np.diff(np.sign(cycle_df_dv['Current(A)'])))\n \n for i in range(1, len(cycle_df_dv)):\n if (cycle_df_dv.loc[i,('dV')] > 0.7 or cycle_df_dv.loc[i,('dV')] < -0.7):\n cycle_df_dv = cycle_df_dv.drop(index = i) \n \n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n \n for i in range(1, len(cycle_df_dv)): \n cycle_df_dv.loc[i, ('dV')] = cycle_df_dv.loc[i, ('Voltage(V)')] - cycle_df_dv.loc[i-1, ('Voltage(V)')] \n if isclose(cycle_df_dv.loc[i, ('dV')], 0, abs_tol = 10**-3):\n cycle_df_dv.loc[i,('dv_close_to_zero')] = False\n else:\n cycle_df_dv.loc[i,('dv_close_to_zero')]= True\n \n cycle_df_dv = cycle_df_dv.reset_index(drop = True) \n \n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n \n #recalculating dv and dq's after dropping rows\n for i in range(1, len(cycle_df_dv)): \n cycle_df_dv.loc[i, ('dV')] = cycle_df_dv.loc[i, ('Voltage(V)')] - cycle_df_dv.loc[i-1, ('Voltage(V)')]\n cycle_df_dv.loc[i, ('Discharge_dQ')] = cycle_df_dv.loc[i, ('Discharge_Capacity(Ah)')] - cycle_df_dv.loc[i-1, ('Discharge_Capacity(Ah)')]\n cycle_df_dv.loc[i, ('Charge_dQ')] = cycle_df_dv.loc[i, ('Charge_Capacity(Ah)')] - cycle_df_dv.loc[i-1, ('Charge_Capacity(Ah)')]\n #recalculate dq/dv \n cycle_df_dv['Discharge_dQ/dV'] = cycle_df_dv['Discharge_dQ']/cycle_df_dv['dV']\n cycle_df_dv['Charge_dQ/dV'] = cycle_df_dv['Charge_dQ']/cycle_df_dv['dV']\n cycle_df_dv = cycle_df_dv.dropna(subset=['Discharge_dQ/dV'])\n cycle_df_dv = cycle_df_dv.dropna(subset=['Charge_dQ/dV'])\n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n # cycle_df_dv = cycle_df_dv[:-1]\n \n \n\n\n cycle_df_dv = cycle_df_dv.reset_index(drop = True)\n return cycle_df_dv \n\ndef sep_char_dis(df_dqdv):\n '''Takes a dataframe of one cycle with calculated dq/dv and separates into charge and discharge differential capacity curves'''\n charge = df_dqdv[df_dqdv['Current(A)'] > 0]\n charge.is_copy = None\n charge = charge.reset_index(drop = True)\n charge['dQ/dV'] = charge['Charge_dQ/dV']\n for i in range(1, len(charge)):\n if charge.loc[i, ('dQ/dV')] == 0: \n charge = charge.drop(index = i)\n charge = charge.reset_index(drop = True)\n #charge = charge.iloc[1:]\n charge = charge.reset_index(drop = True)\n discharge = df_dqdv[df_dqdv['Current(A)'] < 0] \n discharge.is_copy = None \n discharge['dQ/dV'] = discharge['Discharge_dQ/dV']\n discharge = discharge.reset_index(drop = True)\n for i in range(1, len(discharge)):\n if discharge.loc[i, ('dQ/dV')] == 0: \n discharge = discharge.drop(index = i)\n discharge = discharge.reset_index(drop = True)\n #discharge = discharge.iloc[:-1]\n #discharge = discharge.iloc[2:]\n discharge = discharge.reset_index(drop = True)\n \n return charge, discharge\n \ndef my_savgolay(dataframe, windowlength, polyorder):\n \"\"\"Takes battery dataframe with a dQ/dV column and applies a sav_golay filter to it, returning the dataframe with a new column called Smoothed_dQ/dV\"\"\"\n unfilt = pd.concat([dataframe['dQ/dV']])\n unfiltar = unfilt.values\n #converts into an array \n dataframe['Smoothed_dQ/dV'] = scipy.signal.savgol_filter(unfiltar, windowlength, polyorder)\n #had windowlength = 21 and polyorder = 3 before\n return dataframe\n\n\n\n\n\n\n\n"
]
| [
[
"pandas.concat",
"pandas.read_excel",
"pandas.DataFrame",
"numpy.sign",
"pandas.ExcelWriter"
]
]
|
dajebbar/Reproductible-Jupyter-Workflow | [
"180a5b800c79127507fa66a744d8e5600573afce"
]
| [
"jupyterworkflow/tests/test_data.py"
]
| [
"from jupyterworkflow.data import check_fremont_data\nimport pandas as pd\nimport numpy as np\n\n\ndef test_fremont_data():\n data = check_fremont_data()\n assert all(data.columns == ['Total', 'East', 'West'])\n assert isinstance(data.index, pd.DatetimeIndex)\n assert len(np.unique(data.index.time) == 24)\n"
]
| [
[
"numpy.unique"
]
]
|
MakeBigBigMoney/czsc | [
"8450c8912904b1d66a5c6e78d42c1b7d4b3d1777"
]
| [
"examples/third_buy_use_tdxfile.py"
]
| [
"# coding: utf-8\n\nimport pandas as pd\nfrom datetime import datetime\nfrom typing import List\nfrom czsc.analyze import CZSC, RawBar\nfrom czsc.enum import Signals\nimport struct\nimport os\n\n\nTDX_DIR = r\"D:\\new_jyplug\" # 首先要设置通达信的安装目录\n\n# 从通达信目录读入数据\ndef get_data_from_tdxfile(stock_code, type) -> List[RawBar]:\n '''\n stock_code:股票代码 600667\n type:市场代码,sh沪市,sz深市\n '''\n bars = []\n filepath = TDX_DIR + r'\\vipdoc\\\\' + type + r'\\lday\\sh' + stock_code + '.day'\n with open(filepath, 'rb') as f:\n while True:\n stock_date = f.read(4)\n stock_open = f.read(4)\n stock_high = f.read(4)\n stock_low = f.read(4)\n stock_close = f.read(4)\n stock_amount = f.read(4)\n stock_vol = f.read(4)\n stock_reservation = f.read(4)\n if not stock_date:\n break\n stock_date = struct.unpack(\"l\", stock_date) # 4字节 如20091229\n stock_open = struct.unpack(\"l\", stock_open) # 开盘价*100\n stock_high = struct.unpack(\"l\", stock_high) # 最高价*100\n stock_low = struct.unpack(\"l\", stock_low) # 最低价*100\n stock_close = struct.unpack(\"l\", stock_close) # 收盘价*100\n stock_amount = struct.unpack(\"f\", stock_amount) # 成交额\n stock_vol = struct.unpack(\"l\", stock_vol) # 成交量\n stock_reservation = struct.unpack(\"l\", stock_reservation) # 保留值\n date_format = datetime.strptime(str(stock_date[0]), '%Y%M%d') # 格式化日期\n date_format = date_format.strftime('%Y-%M-%d')\n\n bar = RawBar(symbol=stock_code, dt=pd.to_datetime(date_format), open=stock_open[0] / 100,\n close=stock_close[0] / 100.0, high=stock_high[0] / 100.0, low=stock_low[0] / 100.0,\n vol=stock_vol[0])\n bars.append(bar)\n return bars\n\n\ndef is_third_buy(stock_code, type):\n bars = get_data_from_tdxfile(stock_code, type)\n c = CZSC(bars, freq=\"日线\")\n if c.signals['倒1形态'] in [Signals.LI0.value]:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n # 找出沪市6开头的,中三买的票\n rootdir = TDX_DIR + r\"\\vipdoc\\sh\\lday\"\n list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件\n for i in range(0, len(list)):\n scode=list[i][2:8]\n if scode.startswith(\"6\"):\n if is_third_buy(scode,\"sh\"):\n print(\"{} - 日线三买\".format(scode))\n\n # 找出深圳中0开头的三买的票\n rootdir = TDX_DIR + r\"\\vipdoc\\sz\\lday\"\n list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件\n for i in range(0, len(list)):\n scode=list[i][2:8]\n if scode.startswith(\"0\"):\n if is_third_buy(scode,\"sz\"):\n print(\"{} - 日线三买\".format(scode))\n"
]
| [
[
"pandas.to_datetime"
]
]
|
an99990/SeMask-Segmentation | [
"786f395fab4e156970628134cb49eb3547d7287b"
]
| [
"SeMask-FPN/mmseg/models/segmentors/base.py"
]
| [
"import logging\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom mmcv.runner import auto_fp16\nimport os\nimport matplotlib.pyplot as plt\n\nignore_label= 255\n\nid_to_trainid = {-1: -1, 0: ignore_label, 1: ignore_label, 2: ignore_label, \n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label, \n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4, \n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5, \n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, \n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\n\n\ndef id2trainId(label, id_to_trainid, reverse=False):\n label_copy = label.copy()\n if reverse:\n for v, k in id_to_trainid.items():\n label_copy[label == k] = v\n else:\n for k, v in id_to_trainid.items():\n label_copy[label == k] = v\n return label_copy\n\nclass BaseSegmentor(nn.Module):\n \"\"\"Base class for segmentors.\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(BaseSegmentor, self).__init__()\n self.fp16_enabled = False\n\n @property\n def with_neck(self):\n \"\"\"bool: whether the segmentor has neck\"\"\"\n return hasattr(self, 'neck') and self.neck is not None\n\n @property\n def with_auxiliary_head(self):\n \"\"\"bool: whether the segmentor has auxiliary head\"\"\"\n return hasattr(self,\n 'auxiliary_head') and self.auxiliary_head is not None\n\n @property\n def with_decode_head(self):\n \"\"\"bool: whether the segmentor has decode head\"\"\"\n return hasattr(self, 'decode_head') and self.decode_head is not None\n\n @abstractmethod\n def extract_feat(self, imgs):\n \"\"\"Placeholder for extract features from images.\"\"\"\n pass\n\n @abstractmethod\n def encode_decode(self, img, img_metas):\n \"\"\"Placeholder for encode images with backbone and decode into a\n semantic segmentation map of the same size as input.\"\"\"\n pass\n\n @abstractmethod\n def forward_train(self, imgs, img_metas, **kwargs):\n \"\"\"Placeholder for Forward function for training.\"\"\"\n pass\n\n @abstractmethod\n def simple_test(self, img, img_meta, **kwargs):\n \"\"\"Placeholder for single image test.\"\"\"\n pass\n\n @abstractmethod\n def aug_test(self, imgs, img_metas, **kwargs):\n \"\"\"Placeholder for augmentation test.\"\"\"\n pass\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in segmentor.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n if pretrained is not None:\n logger = logging.getLogger()\n logger.info(f'load model from: {pretrained}')\n\n def forward_test(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n \"\"\"\n for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError(f'{name} must be a list, but got '\n f'{type(var)}')\n\n num_augs = len(imgs)\n if num_augs != len(img_metas):\n raise ValueError(f'num of augmentations ({len(imgs)}) != '\n f'num of image meta ({len(img_metas)})')\n # all images in the same aug batch all of the same ori_shape and pad\n # shape\n for img_meta in img_metas:\n ori_shapes = [_['ori_shape'] for _ in img_meta]\n assert all(shape == ori_shapes[0] for shape in ori_shapes)\n img_shapes = [_['img_shape'] for _ in img_meta]\n assert all(shape == img_shapes[0] for shape in img_shapes)\n pad_shapes = [_['pad_shape'] for _ in img_meta]\n assert all(shape == pad_shapes[0] for shape in pad_shapes)\n\n if num_augs == 1:\n return self.simple_test(imgs[0], img_metas[0], **kwargs)\n else:\n return self.aug_test(imgs, img_metas, **kwargs)\n\n @auto_fp16(apply_to=('img', ))\n def forward(self, img, img_metas, return_loss=True, **kwargs):\n \"\"\"Calls either :func:`forward_train` or :func:`forward_test` depending\n on whether ``return_loss`` is ``True``.\n\n Note this setting will change the expected inputs. When\n ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor\n and List[dict]), and when ``resturn_loss=False``, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n \"\"\"\n if return_loss:\n return self.forward_train(img, img_metas, **kwargs)\n else:\n return self.forward_test(img, img_metas, **kwargs)\n\n def train_step(self, data_batch, optimizer, **kwargs):\n \"\"\"The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``,\n ``num_samples``.\n ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n ``log_vars`` contains all the variables to be sent to the\n logger.\n ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n \"\"\"\n losses = self(**data_batch)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(data_batch['img'].data))\n\n return outputs\n\n def val_step(self, data_batch, **kwargs):\n \"\"\"The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n \"\"\"\n output = self(**data_batch, **kwargs)\n return output\n\n @staticmethod\n def _parse_losses(losses):\n \"\"\"Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor\n which may be a weighted sum of all losses, log_vars contains\n all the variables to be sent to the logger.\n \"\"\"\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars\n\n def show_result(self,\n i,\n img,\n result,\n palette=None,\n win_name='',\n show=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (Tensor): The semantic segmentation results to draw over\n `img`.\n palette (list[list[int]]] | np.ndarray | None): The palette of\n segmentation map. If None is given, random palette will be\n generated. Default: None\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`\n \"\"\"\n \n assert len(self.CLASSES) in [19, 150, 171]\n \n img = mmcv.imread(img)\n img = img.copy()\n h, w = img.shape[:2]\n seg = result[0]\n seg = mmcv.imresize(seg, (w, h), interpolation='nearest')\n if palette is None:\n if self.PALETTE is None:\n palette = np.random.randint(\n 0, 255, size=(len(self.CLASSES), 3))\n else:\n palette = self.PALETTE\n palette = np.array(palette)\n assert palette.shape[0] == len(self.CLASSES)\n assert palette.shape[1] == 3\n assert len(palette.shape) == 2\n color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)\n for label, color in enumerate(palette):\n color_seg[seg == label, :] = color\n # convert to BGR\n color_seg = color_seg[..., ::-1]\n\n pred = color_seg.copy()\n color_seg = img * 0.5 + color_seg * 0.5\n img = img.astype(np.uint8)\n color_seg = color_seg.astype(np.uint8)\n pred = pred.astype(np.uint8)\n # if out_file specified, do not show image in window\n if out_file is not None:\n show = False\n\n save_file_pred = os.path.join(out_file[0], str(i) + '_PRED.png')\n save_file_img = os.path.join(out_file[0], str(i) + '_IMG.png')\n save_file_gt = os.path.join(out_file[0], str(i) + '_GT.png')\n save_file_overlap = os.path.join(out_file[0], str(i) + '_OVERLAP.png')\n\n if len(self.CLASSES) == 19:\n gt_file = out_file[1].replace('_leftImg8bit.png', '_gtFine_labelIds.png')\n gt_file = gt_file.replace('/leftImg8bit/', '/gtFine/') \n elif len(self.CLASSES) == 150:\n gt_file = out_file[1].replace('/images/', '/annotations/')\n gt_file = gt_file.replace('.jpg', '.png')\n elif len(self.CLASSES) == 171:\n gt_file = out_file[1].replace('/images/', '/annotations/')\n gt_file = gt_file.replace('.jpg', '_labelTrainIds.png')\n \n gt = mmcv.imread(gt_file, flag='grayscale')\n gt = mmcv.imresize(gt, (w, h), interpolation='nearest')\n \n if len(self.CLASSES) == 19:\n gt = id2trainId(gt, id_to_trainid)\n elif len(self.CLASSES) == 150:\n gt = gt - 1\n elif len(self.CLASSES) == 171:\n gt = gt - 1\n \n color_gt = np.zeros((gt.shape[0], gt.shape[1], 3), dtype=np.uint8)\n for label, color in enumerate(palette):\n color_gt[gt == label, :] = color\n # convert to BGR\n color_gt = color_gt[..., ::-1]\n\n color_gt = color_gt.astype(np.uint8)\n\n if show:\n mmcv.imshow(color_seg, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(color_seg, save_file_overlap)\n mmcv.imwrite(img, save_file_img)\n mmcv.imwrite(color_gt, save_file_gt)\n mmcv.imwrite(pred, save_file_pred)\n\n if not (show or out_file):\n warnings.warn('show==False and out_file is not specified, only '\n 'result image will be returned')\n return img\n \n def grid_maps(self, feats):\n c = feats.shape[1]\n \n gh = int(np.sqrt(c))\n gw = c // gh\n idx = gw * gh\n\n max_num = torch.max(feats[:, :idx]).item()\n min_num = torch.min(feats[:, :idx]).item()\n feats = feats[:,:idx].cpu() * 255 / (max_num - min_num) \n feats = np.asarray(feats, dtype=np.float32)\n feats = np.rint(feats).clip(0, 255).astype(np.uint8)\n\n _N, C, H, W = feats.shape\n\n feats = feats.reshape(gh, gw, 1, H, W)\n feats = feats.transpose(0, 3, 1, 4, 2)\n feats = feats.reshape(gh * H, gw * W, 1)\n\n return feats[:, :, 0], str(H)\n\n def save_maps(self,\n i,\n feat_maps,\n sem_maps,\n out_file=None):\n \n \n for ft in feat_maps:\n ft, dim = self.grid_maps(ft)\n save_file_ft = os.path.join(out_file, str(i) + f'_{dim}_FEAT.png')\n plt.imsave(save_file_ft, ft, cmap=plt.cm.viridis)\n\n for sem_ft in sem_maps:\n sem_ft, dim = self.grid_maps(sem_ft)\n save_file_ft = os.path.join(out_file, str(i) + f'_{dim}_SEM.png')\n plt.imsave(save_file_ft, sem_ft, cmap=plt.cm.viridis)\n\n"
]
| [
[
"matplotlib.pyplot.imsave",
"numpy.sqrt",
"torch.max",
"numpy.asarray",
"torch.min",
"numpy.rint",
"torch.distributed.is_initialized",
"torch.distributed.is_available",
"torch.distributed.get_world_size",
"numpy.array",
"numpy.zeros"
]
]
|
QuinnQiao/pytorch-cnn-visualizations | [
"bcbaf39899a976eb80ff2cc2f078c1445f2fa6a4"
]
| [
"src/misc_functions.py"
]
| [
"\"\"\"\nCreated on Thu Oct 21 11:09:09 2017\n\n@author: Utku Ozbulak - github.com/utkuozbulak\n\"\"\"\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.cm as mpl_color_map\n\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision import models\n\n\ndef convert_to_grayscale(im_as_arr):\n \"\"\"\n Converts 3d image to grayscale\n\n Args:\n im_as_arr (numpy arr): RGB image with shape (D,W,H)\n\n returns:\n grayscale_im (numpy_arr): Grayscale image with shape (1,W,D)\n \"\"\"\n grayscale_im = np.sum(np.abs(im_as_arr), axis=0)\n im_max = np.percentile(grayscale_im, 99)\n im_min = np.min(grayscale_im)\n grayscale_im = (np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1))\n grayscale_im = np.expand_dims(grayscale_im, axis=0)\n return grayscale_im\n\n\ndef save_gradient_images(gradient, file_name):\n \"\"\"\n Exports the original gradient image\n\n Args:\n gradient (np arr): Numpy array of the gradient with shape (3, 224, 224)\n file_name (str): File name to be exported\n \"\"\"\n if not os.path.exists('../results'):\n os.makedirs('../results')\n # Normalize\n gradient = gradient - gradient.min()\n gradient /= gradient.max()\n # Save image\n path_to_file = os.path.join('../results', file_name + '.jpg')\n save_image(gradient, path_to_file)\n\n\ndef save_class_activation_images(org_img, activation_map, file_name):\n \"\"\"\n Saves cam activation map and activation map on the original image\n\n Args:\n org_img (PIL img): Original image\n activation_map (numpy arr): Activation map (grayscale) 0-255\n file_name (str): File name of the exported image\n \"\"\"\n if not os.path.exists('../results'):\n os.makedirs('../results')\n # Grayscale activation map\n heatmap, heatmap_on_image = apply_colormap_on_image(org_img, activation_map, 'rainbow')\n # Save colored heatmap\n path_to_file = os.path.join('../results', file_name+'_Cam_Heatmap.png')\n save_image(heatmap, path_to_file)\n # Save heatmap on iamge\n path_to_file = os.path.join('../results', file_name+'_Cam_On_Image.png')\n save_image(heatmap_on_image, path_to_file)\n # SAve grayscale heatmap\n path_to_file = os.path.join('../results', file_name+'_Cam_Grayscale.png')\n save_image(activation_map, path_to_file)\n\n\ndef apply_colormap_on_image(org_im, activation, colormap_name):\n \"\"\"\n Apply heatmap on image\n Args:\n org_img (PIL img): Original image\n activation_map (numpy arr): Activation map (grayscale) 0-255\n colormap_name (str): Name of the colormap\n \"\"\"\n # Get colormap\n color_map = mpl_color_map.get_cmap(colormap_name)\n no_trans_heatmap = color_map(activation)\n # Change alpha channel in colormap to make sure original image is displayed\n heatmap = copy.copy(no_trans_heatmap)\n heatmap[:, :, 3] = 0.4\n heatmap = Image.fromarray((heatmap*255).astype(np.uint8))\n no_trans_heatmap = Image.fromarray((no_trans_heatmap*255).astype(np.uint8))\n\n # Apply heatmap on iamge\n heatmap_on_image = Image.new(\"RGBA\", org_im.size)\n heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))\n heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)\n return no_trans_heatmap, heatmap_on_image\n\n\ndef format_np_output(np_arr):\n \"\"\"\n This is a (kind of) bandaid fix to streamline saving procedure.\n It converts all the outputs to the same format which is 3xWxH\n with using sucecssive if clauses.\n Args:\n im_as_arr (Numpy array): Matrix of shape 1xWxH or WxH or 3xWxH\n \"\"\"\n # Phase/Case 1: The np arr only has 2 dimensions\n # Result: Add a dimension at the beginning\n if len(np_arr.shape) == 2:\n np_arr = np.expand_dims(np_arr, axis=0)\n # Phase/Case 2: Np arr has only 1 channel (assuming first dim is channel)\n # Result: Repeat first channel and convert 1xWxH to 3xWxH\n if np_arr.shape[0] == 1:\n np_arr = np.repeat(np_arr, 3, axis=0)\n # Phase/Case 3: Np arr is of shape 3xWxH\n # Result: Convert it to WxHx3 in order to make it saveable by PIL\n if np_arr.shape[0] == 3:\n np_arr = np_arr.transpose(1, 2, 0)\n # Phase/Case 4: NP arr is normalized between 0-1\n # Result: Multiply with 255 and change type to make it saveable by PIL\n if np.max(np_arr) <= 1:\n np_arr = (np_arr*255).astype(np.uint8)\n return np_arr\n\n\ndef save_image(im, path):\n \"\"\"\n Saves a numpy matrix or PIL image as an image\n Args:\n im_as_arr (Numpy array): Matrix of shape DxWxH\n path (str): Path to the image\n \"\"\"\n if isinstance(im, (np.ndarray, np.generic)):\n im = format_np_output(im)\n im = Image.fromarray(im)\n im.save(path)\n\n\ndef preprocess_image(pil_im, resize_im=True):\n \"\"\"\n Processes image for CNNs\n\n Args:\n PIL_img (PIL_img): Image to process\n resize_im (bool): Resize to 224 or not\n returns:\n im_as_var (torch variable): Variable that contains processed float tensor\n \"\"\"\n # mean and std list for channels (Imagenet)\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n # Resize image\n if resize_im:\n pil_im.thumbnail((224, 224))\n im_as_arr = np.float32(pil_im)\n im_as_arr = im_as_arr.transpose(2, 0, 1) # Convert array to D,W,H\n # Normalize the channels\n for channel, _ in enumerate(im_as_arr):\n im_as_arr[channel] /= 255\n im_as_arr[channel] -= mean[channel]\n im_as_arr[channel] /= std[channel]\n # Convert to float tensor\n im_as_ten = torch.from_numpy(im_as_arr).float()\n # Add one more channel to the beginning. Tensor shape = 1,3,224,224\n im_as_ten.unsqueeze_(0)\n # Convert to Pytorch variable\n im_as_var = Variable(im_as_ten, requires_grad=True)\n return im_as_var\n\n\ndef recreate_image(im_as_var):\n \"\"\"\n Recreates images from a torch variable, sort of reverse preprocessing\n Args:\n im_as_var (torch variable): Image to recreate\n returns:\n recreated_im (numpy arr): Recreated image in array\n \"\"\"\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n recreated_im = copy.copy(im_as_var.data.numpy()[0])\n for c in range(3):\n recreated_im[c] /= reverse_std[c]\n recreated_im[c] -= reverse_mean[c]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im).transpose(1, 2, 0)\n return recreated_im\n\n\ndef get_positive_negative_saliency(gradient):\n \"\"\"\n Generates positive and negative saliency maps based on the gradient\n Args:\n gradient (numpy arr): Gradient of the operation to visualize\n\n returns:\n pos_saliency ( )\n \"\"\"\n pos_saliency = (np.maximum(0, gradient) / gradient.max())\n neg_saliency = (np.maximum(0, -gradient) / -gradient.min())\n return pos_saliency, neg_saliency\n\n\ndef get_example_params(example_index, example_network=None):\n \"\"\"\n Gets used variables for almost all visualizations, like the image, model etc.\n\n Args:\n example_index (int): Image id to use from examples\n\n returns:\n original_image (numpy arr): Original image read from the file\n prep_img (numpy_arr): Processed image\n target_class (int): Target class for the image\n file_name_to_export (string): File name to export the visualizations\n pretrained_model(Pytorch model): Model to use for the operations\n \"\"\"\n # Pick one of the examples\n example_list = (('../input_images/snake.jpg', 56),\n ('../input_images/cat_dog.png', 243),\n ('../input_images/spider.png', 72))\n img_path = example_list[example_index][0]\n target_class = example_list[example_index][1]\n file_name_to_export = img_path[img_path.rfind('/')+1:img_path.rfind('.')]\n # Read image\n original_image = Image.open(img_path).convert('RGB')\n # Process image\n prep_img = preprocess_image(original_image)\n # Define model\n if example_network is None:\n pretrained_model = models.alexnet(pretrained=True)\n elif example_network == 'vgg16':\n pretrained_model = models.vgg16(pretrained=True)\n elif example_network == 'resnet50':\n pretrained_model = models.resnet50(pretrained=True)\n else:\n assert 0, 'Unsupported network'\n return (original_image,\n prep_img,\n target_class,\n file_name_to_export,\n pretrained_model)\n"
]
| [
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.abs",
"numpy.clip",
"numpy.min",
"numpy.uint8",
"torch.from_numpy",
"numpy.percentile",
"numpy.round",
"numpy.max",
"numpy.float32",
"matplotlib.cm.get_cmap",
"numpy.repeat",
"torch.autograd.Variable"
]
]
|
dgrinwald93/pytorch_bnns | [
"09bf7504f30373229579f15e847f1a7f87cf6ef0"
]
| [
"scripts/train_mcdo_places365.py"
]
| [
"import argparse\r\nimport os\r\nimport sys\r\nimport random\r\nimport json\r\nimport shutil\r\nimport time\r\nimport warnings\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.parallel\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.distributed as dist\r\nimport torch.optim\r\nimport torch.multiprocessing as mp\r\nimport torch.utils.data\r\nimport torch.utils.data.distributed\r\nimport torchvision.transforms as transforms\r\nimport torchvision.datasets as datasets\r\nimport torchvision.models as models\r\n\r\nfrom pbnn.datasets import get_datasets\r\nfrom pbnn.models.mcdo_resnets import mcdo_resnet18, mcdo_resnet152, mcdo_resnet50\r\n\r\nbest_acc1 = True\r\n# Load config\r\nconf_path = './confs/training_mcdo_conf.json'\r\nwith open(conf_path, 'r') as j:\r\n xp_conf = json.loads(j.read())['resnet18_mcdo']\r\n\r\n### XP Setup ###\r\ntrain_flag = bool(xp_conf[\"train_flag\"])\r\nprefix = xp_conf[\"prefix\"]\r\nds = xp_conf[\"ds\"]\r\n\r\nif ds == 'imagenet':\r\n num_classes=1000\r\nelif ds == 'imagenet_mini':\r\n num_classes=10\r\nelif ds == 'places365':\r\n num_classes=365\r\nelse:\r\n num_classes=0\r\n\r\nprint_freq = xp_conf[\"print_freq\"]\r\nepochs = xp_conf[\"epochs\"]\r\nseed = xp_conf[\"seed\"]\r\nif seed is not \"\":\r\n seed=seed\r\nelse:\r\n seed=None\r\nlr = xp_conf[\"lr\"]\r\nbs = xp_conf[\"bs\"]\r\nmomentum = xp_conf[\"momentum\"]\r\nweight_decay = xp_conf[\"weight_decay\"]\r\np = xp_conf[\"p\"]\r\nworkers = xp_conf[\"workers\"]\r\n\r\n\r\n# Setup distributed training\r\ndist_backend = 'nccl'\r\nworld_size = 1\r\nmp_str = 'store_false'\r\n### CONFIG ###\r\n\r\ndata_path = f'/home/dgrinwald/datasets/dissect/places/'\r\nfile_path = f'models/{ds}/{prefix}_bs_{bs}_lr_{lr}_dropout_{p}_wd_{weight_decay}_mo_{momentum}_dist_training/'\r\n\r\nif not os.path.exists(file_path):\r\n os.makedirs(file_path)\r\n\r\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\r\n\r\nparser.add_argument('--data', default=data_path,\r\n help='path to dataset')\r\n\r\nparser.add_argument('-j', '--workers', default=workers, type=int, metavar='N',\r\n help='number of data loading workers (default: 4)')\r\n\r\nparser.add_argument('--epochs', default=epochs, type=int, metavar='N',\r\n help='number of total epochs to run')\r\n\r\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\r\n help='manual epoch number (useful on restarts)')\r\n\r\nparser.add_argument('-b', '--batch-size', default=bs, type=int,\r\n metavar='N',\r\n help='mini-batch size (default: 256), this is the total '\r\n 'batch size of all GPUs on the current node when '\r\n 'using Data Parallel or Distributed Data Parallel')\r\n\r\nparser.add_argument('--lr', '--learning-rate', default=lr, type=float,\r\n metavar='LR', help='initial learning rate', dest='lr')\r\n\r\nparser.add_argument('--momentum', default=momentum, type=float, metavar='M',\r\n help='momentum')\r\n\r\nparser.add_argument('--wd', '--weight-decay', default=weight_decay, type=float,\r\n metavar='W', help='weight decay (default: 1e-4)',\r\n dest='weight_decay')\r\n\r\nparser.add_argument('-pf', '--print-freq', default=print_freq, type=int,\r\n metavar='N', help='print frequency (default: 10)')\r\n\r\nparser.add_argument('-p', '--p', default=p, type=float,\r\n help='Dropout rate')\r\n\r\nparser.add_argument('-nc', '--num_classes', default=num_classes, type=int,\r\n help='Number of output classes')\r\n\r\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\r\n help='path to latest checkpoint (default: none)')\r\n\r\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\r\n help='evaluate model on validation set')\r\n\r\nparser.add_argument('--world-size', default=world_size, type=int,\r\n help='number of nodes for distributed training')\r\n\r\nparser.add_argument('--dist-backend', default=dist_backend, type=str,\r\n help='distributed backend')\r\n\r\nparser.add_argument('--file_path', default=file_path, type=str,\r\n help='file path for outputs')\r\n\r\nparser.add_argument('--seed', default=seed, type=int,\r\n help='seed for initializing training. ')\r\n\r\nparser.add_argument('--gpu', default=None, type=int,\r\n help='GPU id to use.')\r\n\r\nparser.add_argument('--multiprocessing-distributed', action=mp_str,\r\n help='Use multi-processing distributed training to launch '\r\n 'N processes per node, which has N GPUs. This is the '\r\n 'fastest way to use PyTorch for either single node or '\r\n 'multi node data parallel training')\r\n\r\nbest_acc1 = 0\r\n\r\ndef main():\r\n args = parser.parse_args()\r\n\r\n if args.seed is not None:\r\n random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n cudnn.deterministic = True\r\n warnings.warn('You have chosen to seed training. '\r\n 'This will turn on the CUDNN deterministic setting, '\r\n 'which can slow down your training considerably! '\r\n 'You may see unexpected behavior when restarting '\r\n 'from checkpoints.')\r\n\r\n if args.gpu is not None:\r\n warnings.warn('You have chosen a specific GPU. This will completely '\r\n 'disable data parallelism.')\r\n\r\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\r\n\r\n ngpus_per_node = torch.cuda.device_count()\r\n print(f'Number of GPUs: {ngpus_per_node}')\r\n\r\n if args.multiprocessing_distributed:\r\n # Since we have ngpus_per_node processes per node, the total world_size\r\n # needs to be adjusted accordingly\r\n args.world_size = ngpus_per_node * args.world_size\r\n # Use torch.multiprocessing.spawn to launch distributed processes: the\r\n # main_worker process function\r\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\r\n else:\r\n # Simply call main_worker function\r\n main_worker(args.gpu, ngpus_per_node, args)\r\n\r\ndef setup(rank, world_size, dist_backend):\r\n os.environ['MASTER_ADDR'] = 'localhost'\r\n os.environ['MASTER_PORT'] = '12355'\r\n\r\n # initialize the process group\r\n dist.init_process_group(dist_backend, rank=rank, world_size=world_size)\r\n\r\ndef cleanup():\r\n dist.destroy_process_group()\r\n\r\ndef main_worker(gpu, ngpus_per_node, args):\r\n\r\n global best_acc1\r\n args.gpu = gpu\r\n\r\n print(f'GPU: {args.gpu}')\r\n\r\n if args.gpu is not None:\r\n print(\"Use GPU: {} for training\".format(args.gpu))\r\n\r\n if args.distributed:\r\n if args.multiprocessing_distributed:\r\n setup(gpu, ngpus_per_node, args.dist_backend)\r\n\r\n #model = resnet18_mcdo(p=args.p, num_classes=args.num_classes)\r\n #model = mcdo_resnet152(p=args.p, num_classes=args.num_classes)\r\n model = mcdo_resnet50(p=args.p, num_classes=args.num_classes)\r\n\r\n if not torch.cuda.is_available():\r\n print('using CPU, this will be slow')\r\n elif args.distributed:\r\n # For multiprocessing distributed, DistributedDataParallel constructor\r\n # should always set the single device scope, otherwise,\r\n # DistributedDataParallel will use all available devices.\r\n if args.gpu is not None:\r\n torch.cuda.set_device(args.gpu)\r\n model.cuda(args.gpu)\r\n # When using a single GPU per process and per\r\n # DistributedDataParallel, we need to divide the batch size\r\n # ourselves based on the total number of GPUs we have\r\n args.batch_size = int(args.batch_size / ngpus_per_node)\r\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\r\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\r\n else:\r\n model.cuda()\r\n # DistributedDataParallel will divide and allocate batch_size to all\r\n # available GPUs if device_ids are not set\r\n model = torch.nn.parallel.DistributedDataParallel(model)\r\n elif args.gpu is not None:\r\n torch.cuda.set_device(args.gpu)\r\n model = model.cuda(args.gpu)\r\n else:\r\n model = torch.nn.DataParallel(model).cuda()\r\n\r\n # define loss function (criterion) and optimizer\r\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\r\n\r\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\r\n momentum=args.momentum,\r\n weight_decay=args.weight_decay)\r\n\r\n # optionally resume from a checkpoint\r\n if args.resume:\r\n if os.path.isfile(args.resume):\r\n print(\"=> loading checkpoint '{}'\".format(args.resume))\r\n if args.gpu is None:\r\n checkpoint = torch.load(args.resume)\r\n else:\r\n # Map model to be loaded to specified single gpu.\r\n loc = 'cuda:{}'.format(args.gpu)\r\n checkpoint = torch.load(args.resume, map_location=loc)\r\n args.start_epoch = checkpoint['epoch']\r\n best_acc1 = checkpoint['best_acc1']\r\n if args.gpu is not None:\r\n # best_acc1 may be from a checkpoint from a different GPU\r\n best_acc1 = best_acc1.to(args.gpu)\r\n model.load_state_dict(checkpoint['state_dict'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n print(\"=> loaded checkpoint '{}' (epoch {})\"\r\n .format(args.resume, checkpoint['epoch']))\r\n else:\r\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\r\n\r\n cudnn.benchmark = True\r\n\r\n # Data loading code\r\n traindir = os.path.join(args.data, 'train')\r\n valdir = os.path.join(args.data, 'val')\r\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n\r\n train_dataset = datasets.ImageFolder(\r\n traindir,\r\n transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n normalize,\r\n ]))\r\n\r\n if args.distributed:\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\r\n else:\r\n train_sampler = None\r\n\r\n train_loader = torch.utils.data.DataLoader(\r\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\r\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\r\n\r\n val_loader = torch.utils.data.DataLoader(\r\n datasets.ImageFolder(valdir, transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n normalize,\r\n ])),\r\n batch_size=args.batch_size, shuffle=False,\r\n num_workers=args.workers, pin_memory=True)\r\n\r\n if args.evaluate:\r\n validate(val_loader, model, criterion, args)\r\n return\r\n\r\n for epoch in range(args.start_epoch, args.epochs):\r\n if args.distributed:\r\n train_sampler.set_epoch(epoch)\r\n adjust_learning_rate(optimizer, epoch, args)\r\n\r\n # train for one epoch\r\n train(train_loader, model, criterion, optimizer, epoch, args)\r\n\r\n # evaluate on validation set\r\n acc1 = validate(val_loader, model, criterion, args)\r\n\r\n # remember best acc@1 and save checkpoint\r\n is_best = acc1 > best_acc1\r\n best_acc1 = max(acc1, best_acc1)\r\n\r\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\r\n and args.gpu == 0):\r\n \r\n print(f'GPU {args.gpu} saving model ... ')\r\n save_checkpoint({\r\n 'epoch': epoch + 1,\r\n 'state_dict': model.state_dict(),\r\n 'best_acc1': best_acc1,\r\n 'optimizer' : optimizer.state_dict(),\r\n }, is_best, file_path)\r\n\r\n cleanup()\r\n\r\n\r\ndef train(train_loader, model, criterion, optimizer, epoch, args):\r\n batch_time = AverageMeter('Time', ':6.3f')\r\n data_time = AverageMeter('Data', ':6.3f')\r\n losses = AverageMeter('Loss', ':.4e')\r\n top1 = AverageMeter('Acc@1', ':6.2f')\r\n top5 = AverageMeter('Acc@5', ':6.2f')\r\n progress = ProgressMeter(\r\n len(train_loader),\r\n [batch_time, data_time, losses, top1, top5],\r\n prefix=\"Epoch: [{}]\".format(epoch))\r\n\r\n # switch to train mode\r\n model.train()\r\n\r\n end = time.time()\r\n for i, (images, target) in enumerate(train_loader):\r\n # measure data loading time\r\n data_time.update(time.time() - end)\r\n\r\n if args.gpu is not None:\r\n images = images.cuda(args.gpu, non_blocking=True)\r\n if torch.cuda.is_available():\r\n target = target.cuda(args.gpu, non_blocking=True)\r\n\r\n # compute output\r\n output = model(images)\r\n loss = criterion(output, target)\r\n\r\n # measure accuracy and record loss\r\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\r\n losses.update(loss.item(), images.size(0))\r\n top1.update(acc1[0], images.size(0))\r\n top5.update(acc5[0], images.size(0))\r\n\r\n # compute gradient and do SGD step\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # measure elapsed time\r\n batch_time.update(time.time() - end)\r\n end = time.time()\r\n\r\n if i % args.print_freq == 0:\r\n progress.display(i)\r\n\r\n\r\ndef validate(val_loader, model, criterion, args):\r\n batch_time = AverageMeter('Time', ':6.3f')\r\n losses = AverageMeter('Loss', ':.4e')\r\n top1 = AverageMeter('Acc@1', ':6.2f')\r\n top5 = AverageMeter('Acc@5', ':6.2f')\r\n progress = ProgressMeter(\r\n len(val_loader),\r\n [batch_time, losses, top1, top5],\r\n prefix='Test: ')\r\n\r\n # switch to evaluate mode\r\n model.eval()\r\n\r\n with torch.no_grad():\r\n end = time.time()\r\n for i, (images, target) in enumerate(val_loader):\r\n if args.gpu is not None:\r\n images = images.cuda(args.gpu, non_blocking=True)\r\n if torch.cuda.is_available():\r\n target = target.cuda(args.gpu, non_blocking=True)\r\n\r\n # compute output\r\n output = model(images)\r\n loss = criterion(output, target)\r\n\r\n # measure accuracy and record loss\r\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\r\n losses.update(loss.item(), images.size(0))\r\n top1.update(acc1[0], images.size(0))\r\n top5.update(acc5[0], images.size(0))\r\n\r\n # measure elapsed time\r\n batch_time.update(time.time() - end)\r\n end = time.time()\r\n\r\n if i % args.print_freq == 0:\r\n progress.display(i)\r\n\r\n # TODO: this should also be done with the ProgressMeter\r\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\r\n .format(top1=top1, top5=top5))\r\n\r\n return top1.avg\r\n\r\n\r\ndef save_checkpoint(state, is_best, file_path=''):\r\n torch.save(state, file_path+'model_checkpoint.pth.tar')\r\n if is_best:\r\n shutil.copyfile(file_path+'model_checkpoint.pth.tar', file_path+'model_best.pth.tar')\r\n\r\n\r\nclass AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self, name, fmt=':f'):\r\n self.name = name\r\n self.fmt = fmt\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n def __str__(self):\r\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\r\n return fmtstr.format(**self.__dict__)\r\n\r\n\r\nclass ProgressMeter(object):\r\n def __init__(self, num_batches, meters, prefix=\"\"):\r\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\r\n self.meters = meters\r\n self.prefix = prefix\r\n\r\n def display(self, batch):\r\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\r\n entries += [str(meter) for meter in self.meters]\r\n print('\\t'.join(entries))\r\n\r\n def _get_batch_fmtstr(self, num_batches):\r\n num_digits = len(str(num_batches // 1))\r\n fmt = '{:' + str(num_digits) + 'd}'\r\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\r\n\r\n\r\ndef adjust_learning_rate(optimizer, epoch, args):\r\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n\r\ndef accuracy(output, target, topk=(1,)):\r\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\r\n with torch.no_grad():\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
]
| [
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.is_available",
"torch.distributed.destroy_process_group",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
]
]
|
MadElf1337/Enquiry_bot | [
"707edcde0bd6e4f35611626faebe1a894f6d0b4b"
]
| [
"predict_bot.py"
]
| [
"import nltk\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport pickle\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nimport json\r\nimport random\r\n\r\nintents = json.loads(open('intents.json', encoding=\"utf8\").read())\r\nwords = pickle.load(open('words.pkl', 'rb'))\r\nclasses = pickle.load(open('classes.pkl', 'rb'))\r\nlemmatizer = WordNetLemmatizer()\r\nmodel = load_model('chatbot_model.h5')\r\n\r\nsentence = \" \" #using an arbitrary variable as an example and for further usage in functions\r\n\r\n\r\ndef clean_up_sentence(sentence):\r\n # tokenize the pattern - split words into array\r\n sentence_words = nltk.word_tokenize(sentence)\r\n # stem each word - create short form for word\r\n sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]\r\n return sentence_words\r\n\r\n\r\n# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\r\n\r\ndef bow(sentence, words, show_details=True):\r\n # tokenize the pattern\r\n sentence_words = clean_up_sentence(sentence)\r\n # bag of words - matrix of N words, vocabulary matrix\r\n bag = [0] * len(words)\r\n for s in sentence_words:\r\n for i, w in enumerate(words):\r\n if w == s:\r\n # assign 1 if current word is in the vocabulary position\r\n bag[i] = 1\r\n if show_details:\r\n print(\"found in bag: %s\" % w)\r\n return (np.array(bag))\r\n\r\n\r\ndef predict_class(sentence, model):\r\n # filter out predictions below a threshold\r\n p = bow(sentence, words, show_details=False)\r\n res = model.predict(np.array([p]))[0]\r\n ERROR_THRESHOLD = 0.25\r\n results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]\r\n # sort by strength of probability\r\n results.sort(key=lambda x: x[1], reverse=True)\r\n return_list = []\r\n for r in results:\r\n return_list.append({\"intent\": classes[r[0]], \"probability\": str(r[1])})\r\n return return_list\r\n\r\n\r\ndef getResponse(ints, intents_json):\r\n tag = ints[0]['intent']\r\n list_of_intents = intents_json['intents']\r\n for i in list_of_intents:\r\n if (i['tag'] == tag):\r\n result = random.choice(i['responses'])\r\n break\r\n return result\r\n\r\n\r\ndef chatbot_response(msg):\r\n ints = predict_class(msg, model)\r\n res = getResponse(ints, intents)\r\n return res\r\n\r\n\r\nbot_name = \"\" #add any random bot name here\r\nimport tkinter as tk\r\n\r\n\r\nclass Chat:\r\n def __init__(self):\r\n self.window = tk.Tk()\r\n self._setup_window()\r\n\r\n def _setup_window(self):\r\n self.window.title(\"enquiry_bot\")\r\n self.window.resizable(width=False, height=False)\r\n _width, _height = 400, 550\r\n self.window.configure(width=_width, height=_height, bg='grey')\r\n\r\n # Center the window\r\n screen_width = self.window.winfo_screenwidth()\r\n screen_height = self.window.winfo_screenheight()\r\n # For left-align\r\n left = (screen_width / 2) - (_width / 2)\r\n # For right-align\r\n top = (screen_height / 2) - (_height / 2)\r\n # For top and bottom\r\n self.window.geometry('%dx%d+%d+%d' % (_width, _height,\r\n left, top))\r\n\r\n top_label = tk.Label(self.window, bg='grey', fg='black',\r\n text='MMCOE Chat Bot', pady=6, font='12', )\r\n top_label.place(relwidth=1)\r\n\r\n divide_line = tk.Label(self.window, width=400, bg='green')\r\n divide_line.place(relwidth=1, rely=0.06, relheight=0.012)\r\n\r\n # text instance variable\r\n self.text_widget = tk.Text(self.window, width=20, height=2,\r\n bg='grey13', padx=5, pady=5, fg='white', wrap='word',\r\n font='Courier 12')\r\n self.text_widget.place(relheight=0.745, relwidth=0.97, rely=0.07)\r\n self.text_widget.configure(state=tk.DISABLED, cursor='arrow')\r\n\r\n scrollbar = tk.Scrollbar(self.window)\r\n scrollbar.place(relheight=0.744, relx=0.97, relwidth=0.03, rely=0.07)\r\n scrollbar.configure(command=self.text_widget.yview)\r\n self.text_widget.config(yscrollcommand=scrollbar.set)\r\n\r\n bottom_label = tk.Label(self.window, bg='grey', height=80)\r\n bottom_label.place(relwidth=1, rely=0.825)\r\n\r\n # message instance variable\r\n self.message_entry = tk.Entry(bottom_label, bg='white', font='12')\r\n self.message_entry.place(relwidth=0.75, relheight=0.05, rely=0.012,\r\n relx=0.011)\r\n self.message_entry.focus() #focus app window post launch\r\n self.message_entry.bind(\"<Return>\", self._on_return_pressed)\r\n\r\n send_button = tk.Button(bottom_label, text='Send', width=20,\r\n bg='green', command=lambda: self._on_return_pressed(None),\r\n font='12', )\r\n send_button.place(relx=0.78, rely=0.012, relheight=0.05, relwidth=0.20)\r\n\r\n def _on_return_pressed(self, event=None):\r\n global _message\r\n _message = self.message_entry.get()\r\n self._insert_message(_message, 'You')\r\n # self.window.after(1500, self._insert_answer) # delay the answer\r\n\r\n def _insert_message(self, _message, sender):\r\n if not _message:\r\n return\r\n\r\n self.message_entry.delete(0, tk.END)\r\n msg1 = f\"{sender}: {_message}\\n\\n\"\r\n self.text_widget.configure(state=tk.NORMAL)\r\n self.text_widget.insert(tk.END, msg1)\r\n self.text_widget.configure(state=tk.DISABLED)\r\n\r\n msg2 = f\"{bot_name}: {chatbot_response(_message)}\\n\\n\"\r\n self.text_widget.configure(state=tk.NORMAL)\r\n self.text_widget.insert(tk.END, msg2, 'bot')\r\n self.text_widget.configure(state=tk.DISABLED)\r\n\r\n self.text_widget.see(tk.END)\r\n\r\n def run(self):\r\n self.window.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n chat = Chat()\r\n chat.run()\r\n\r\n\r\n"
]
| [
[
"numpy.array"
]
]
|
research-manuscripts/MA_Felix_Rittler | [
"b5f7305e7a0c4bbbfe500df86ffce620e8a6822a"
]
| [
"autoencoder/test_performance.py"
]
| [
"import torch\nfrom autoencoder import Autoencoder\nimport time\nimport numpy as np\n\n# Begin of variables that can be edited by the user\ntrained_models = [\n (Autoencoder.Autoencoder1(), \"trained_autoencoders/a1.pt\"),\n (Autoencoder.Autoencoder2(), \"trained_autoencoders/a2.pt\"),\n (Autoencoder.Autoencoder3(), \"trained_autoencoders/a3.pt\"),\n (Autoencoder.Autoencoder4(), \"trained_autoencoders/a4.pt\")\n]\n# End of variables that can be edited by the user\n\ndummy_input = torch.randn(1, 3, 935, 900, dtype=torch.float)\ntorch.save(dummy_input, 'performance_test_tensor.pt')\n\n# load device and push to device\ndevice = 'cuda:0'\ndummy_input = dummy_input.to(device)\nprint(device)\n\nfor (model, autoencoder_path) in trained_models:\n print(autoencoder_path)\n # Load trained autoencoder\n model.load_state_dict(torch.load(autoencoder_path, device))\n model.to(device)\n dummy_input = dummy_input.to(device)\n\n f = open(\"performance_test_results.txt\", \"a\")\n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n repetitions = 300\n timings_gpu = np.zeros((repetitions,1))\n\n # GPU-WARM-UP\n for _ in range(10):\n _ = model(dummy_input)\n\n print(\"Warm-Up Complete\")\n # MEASURE PERFORMANCE\n with torch.no_grad():\n for rep in range(repetitions):\n starter.record()\n _ = model(dummy_input)\n ender.record()\n # WAIT FOR GPU SYNC\n torch.cuda.synchronize()\n curr_time = starter.elapsed_time(ender)\n print(curr_time)\n timings_gpu[rep] = curr_time\n mean_syn = np.sum(timings_gpu) / repetitions\n std_syn = np.std(timings_gpu)\n print(mean_syn)\n f.write(\"Autoencoder {} on {}: Mean: {}, Std: {}\\n\".format(autoencoder_path, device, mean_syn, std_syn))\n\n device = 'cpu'\n dummy_input = dummy_input.to(device)\n model.to(device)\n\n timings_cpu = np.zeros((repetitions,1))\n with torch.no_grad():\n for rep in range(repetitions):\n startTime = time.time()\n _ = model(dummy_input)\n executionTime = (time.time() - startTime)\n timings_cpu[rep] = executionTime\n\n mean_syn = np.sum(timings_cpu) / repetitions\n std_syn = np.std(timings_cpu)\n print(mean_syn)\n f.write(\"Autoencoder {} on {}: Mean: {}, Std: {}\\n\".format(autoencoder_path, device, mean_syn, std_syn))\n f.close()\n"
]
| [
[
"torch.cuda.synchronize",
"torch.load",
"torch.randn",
"torch.cuda.Event",
"numpy.std",
"torch.no_grad",
"numpy.zeros",
"numpy.sum",
"torch.save"
]
]
|
FoolMasque/LBPH-FaceNet-Face-Recognition | [
"6bf1c9a0b010f1528ce478cf7951b56e50aee429"
]
| [
"devtube/LBPHRecognizer/faces-train.py"
]
| [
"import cv2\nimport os\nimport numpy as np\nfrom PIL import Image\nimport pickle\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nimage_dir = os.path.join(BASE_DIR, \"images\")\n\nface_cascade =cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\ncurrent_id = 0\nlabel_ids = {}\ny_labels = []\nx_train = []\n\nfor root, dirs, files in os.walk(image_dir):\n\tfor file in files:\n\t\tif file.endswith(\"png\") or file.endswith(\"jpg\"):\n\t\t\tpath = os.path.join(root, file)\n\t\t\tlabel = os.path.basename(os.path.dirname(path)).replace(\" \", \"-\").lower()\n\t\t\t#print(label, path)\n\t\t\tif not label in label_ids:\n\t\t\t\tlabel_ids[label] = current_id\n\t\t\t\tcurrent_id += 1\n\t\t\tid_ = label_ids[label]\n\t\t\t#print(label_ids)\n\n\n\t\t\tpil_image = Image.open(path).convert(\"L\")\n\t\t\tsize = (550,550)\n\t\t\tfinal_image = pil_image.resize(size, Image.ANTIALIAS)\n\t\t\timage_array = np.array(final_image, \"uint8\")\n\t\t\t#print(image_array)\n\t\t\t#检测图像中的脸\n\t\t\tfaces = face_cascade.detectMultiScale(image_array,scaleFactor=1.5,minNeighbors=5)\n\n\t\t\tfor(x, y, w, h) in faces:\n\t\t\t\troi = image_array[y:y+h, x:x+w]\n\t\t\t\tx_train.append(roi)\n\t\t\t\ty_labels.append(id_)\n\n\n#print(y_labels)\n#print(x_train)\n\nwith open(\"labels.pickle\", 'wb') as file:\n\tpickle.dump(label_ids, file)\n\nrecognizer.train(x_train, np.array(y_labels))\nrecognizer.save(\"trainner.yml\")"
]
| [
[
"numpy.array"
]
]
|
Preeti240/DoctorBOT | [
"16242441ae8de79542747d0d20ad40cc9aa0300b"
]
| [
"Question_Maker.py"
]
| [
"import pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\nimport prediction2\n\ndef fillter1(s):\n nt_data = pd.read_csv(\"Scraped-Data/test2.csv\", encoding =\"ISO-8859-1\",)\n df = pd.DataFrame(nt_data)\n \n try: \n df_filtered = df[ (df[s[0]]== 1) & (df[s[1]]== 1) ]\n mylist=df_filtered.columns[(df_filtered == 1).iloc[0]]\n mylist2=[]\n mylist2 = mylist.values.tolist()\n mylist3=list(set(mylist2)-set(s))\n l=len(mylist3)\n for i in range(l):\n mylist3[i] = mylist3[i].replace('Â', '') \n mylist3[i] = mylist3[i].replace('Ã', '') \n mylist3[i] = mylist3[i].replace('\\xa0', ' ')\n mylist3[i] = mylist3[i].replace('\\x82', '')\n if not mylist3:\n return s,[]\n else:\n return mylist3,[]\n except: \n df_filtered = df[(df[s[0]]== 1)]\n mylist2=[]\n mylist=df_filtered.columns[(df_filtered == 1).iloc[0]] \n mylist2 = mylist.values.tolist()\n mylist3=list(set(mylist2)-set(s))\n \n s1=mylist3[0]\n df_filtered = df[(df[s[1]]== 1)]\n \n mylist4=[]\n mylist=df_filtered.columns[(df_filtered == 1).iloc[0]] \n mylist4 = mylist.values.tolist()\n mylist5=list(set(mylist4)-set(s))\n \n s2=mylist5[0]\n mylist6=[]\n mylist7=[]\n mylist6.append(s1)\n mylist6.append(s[0])\n \n mylist7.append(s2)\n mylist7.append(s[1])\n \n return mylist6,mylist7\n\n\ndef fillter3(s):\n nt_data = pd.read_csv(\"Scraped-Data/test2.csv\", encoding =\"ISO-8859-1\",)\n df = pd.DataFrame(nt_data)\n df_filtered = df[(df[s[0]]== 1)]\n mylist=df_filtered.columns[(df_filtered == 1).iloc[0]]\n mylist2=[]\n mylist2 = mylist.values.tolist()\n mylist3=list(set(mylist2)-set(s))\n l=len(mylist3)\n for i in range(l):\n mylist3[i]= mylist3[i].replace('Â', '')\n mylist3[i]= mylist3[i].replace('Ã', '')\n mylist3[i]= mylist3[i].replace('\\xa0', ' ')\n mylist3[i]= mylist3[i].replace('\\x82', '')\n if not mylist3:\n return s,[]\n else: \n return mylist3,[]\n\n\n\ndef fillter(mylist):\n try:\n mylist3 = fillter1(mylist)\n return mylist3\n except:\n prediction2.predict(mylist)\n return fillter3(mylist)\n"
]
| [
[
"pandas.read_csv",
"pandas.DataFrame"
]
]
|
csachs/mfisp | [
"90251933aa4b15d910ef076550d111c075d358be"
]
| [
"mfisp/boxcrop/__main__.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nautomatically registers, derotates and box-crops images,\nby Christian C. Sachs, uses molyso's registration functionality ( https://github.com/modsim/molyso )\nhttps://dx.doi.org/10.1371/journal.pone.0163453\nand a boxdetection routine from https://github.com/csachs/mfisp-boxdetection\n\"\"\"\n\nfrom molyso.imageio.imagestack import MultiImageStack\ntry:\n from molyso.imageio.imagestack_ometiff import OMETiffStack\n from molyso.imageio.imagestack_czi import CziStack\n from molyso.imageio.imagestack_nd2 import ND2Stack\nexcept ImportError:\n pass\n\nimport argparse\nimport numpy\n\nfrom tifffile import TiffWriter\n\nfrom molyso.generic.registration import translation_2x1d, shift_image\nfrom molyso.generic.rotation import find_rotation, rotate_image\n\nfrom mfisp_boxdetection import find_box\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"boxcrop tool [using techniques of https://dx.doi.org/10.1371/journal.pone.0163453 ]\"\n )\n\n parser.add_argument(\"input\", type=str, help=\"input file name\")\n parser.add_argument(\"--output\", type=str, default=\"\", help=\"output file name\")\n parser.add_argument(\"--channel\", type=int, default=0, help=\"bright field channel\")\n\n args = parser.parse_args()\n if args.output == '':\n args.output = args.input + '_registered-%04d.tif'\n\n ims = MultiImageStack.open(args.input)\n\n mp = ims.get_meta('multipoints')\n\n channels = ims.get_meta('channels')\n\n bright_field_channel = args.channel\n\n for p in range(mp):\n first = ims.get_image(t=0, pos=p, channel=bright_field_channel)\n\n angle = find_rotation(first)\n\n rotated = rotate_image(first, angle)\n\n top, bottom, left, right = find_box(rotated, subsample=1, debug=False)\n\n first_box = rotated[top:bottom, left:right]\n\n buffer = numpy.zeros((channels,) + first_box.shape, dtype=first.dtype)\n\n try:\n output_name = args.output % p\n except TypeError:\n output_name = args.output\n\n with TiffWriter(output_name, imagej=True) as tiff:\n for t in range(ims.get_meta('timepoints')):\n\n current = ims.get_image(t=t, pos=p, channel=bright_field_channel)\n shift, = translation_2x1d(first, current)\n\n for c in range(channels):\n if c == bright_field_channel:\n shifted = shift_image(current, shift, background='blank')\n else:\n shifted = shift_image(ims.get_image(t=t, pos=p, channel=c), shift, background='blank')\n\n rotated = rotate_image(shifted, angle)\n\n buffer[c, :, :] = rotated[top:bottom, left:right]\n\n tiff.save(buffer)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.zeros"
]
]
|
borg-project/cargo | [
"79e5ac3a6f267dcdc2179fc1a7c49504bafb6e0f"
]
| [
"src/python/cargo/plot.py"
]
| [
"\"\"\"\n@author: Bryan Silverthorn <[email protected]>\n\"\"\"\n\ndef get_color_list(n, saturation = 1.0, value = 0.75):\n \"\"\"\n Get an optimally-spaced list of (RGB) color values.\n \"\"\"\n\n import numpy\n\n from matplotlib.colors import hsv_to_rgb\n\n hsv_colors = numpy.empty((1, n, 3))\n\n hsv_colors[:, :, 0] = numpy.r_[0.0:1.0 - 1.0 / n:complex(0, n)]\n hsv_colors[:, :, 1] = 1.0\n hsv_colors[:, :, 2] = 0.75\n\n (rgb_colors,) = hsv_to_rgb(hsv_colors)\n\n return rgb_colors\n\n"
]
| [
[
"matplotlib.colors.hsv_to_rgb",
"numpy.empty"
]
]
|
evelynpurse/CTA_summart | [
"90bc2b59f45d0e9d02b4c2b5986b21d7781c3908"
]
| [
"check_vol/check_vol.py"
]
| [
"import pandas as pd\nimport numpy as np\n\n#####if 20-day volume avg >10000,can be traded in the next period\n\nfuture_price=pd.read_csv(\"../data_extraction/future_price.csv\")\nfuture_info=pd.read_csv(\"../data_extraction/future_info.csv\")\ncombined=future_price.set_index(['order_book_id'])[['date','volume']].join(future_info.set_index('order_book_id')['underlying_symbol'])\ncombined=combined.reset_index()\ncombined=combined.groupby(by=['date','underlying_symbol'])['volume'].sum()\ncombined=combined.reset_index()\ncombined=combined.set_index(['underlying_symbol','date'])\ncombined=combined.unstack(level=0)\ncombined.columns=combined.columns.droplevel(0)\n\n#rolling 20d mean\ncheck_vol=combined.rolling(20).mean()\n#if 20d volume mean>10000, set value=1\ncheck_vol[check_vol>=10000]=1\ncheck_vol[check_vol!=1]=0\ncheck_vol=check_vol.reset_index()\ncheck_vol.to_csv(\"check_vol.csv\",index=None)"
]
| [
[
"pandas.read_csv"
]
]
|
inspo13/ML-Reserve | [
"26b583f58135fcabb65e815c85760c5eea70f2a0"
]
| [
"Noise Suppression/app.py"
]
| [
"# Importing project dependencies\r\n\r\nimport streamlit as st\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy.io.wavfile import write\r\nimport util_functions as ufs\r\nimport time\r\n\r\n# Setting config option for deployment\r\n\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\nst.title('Noise-Suppressor')\r\nst.subheader('Removes background-noise from audio samples')\r\n\r\n# UI design\r\n\r\nnav_choice = st.sidebar.radio('Navigation', ['Home'], index=0)\r\n\r\n_param_dict = {} # Used for getting plot related information\r\n_path_to_model = 'utils/models/auto_encoders_for_noise_removal_production.h5' # Path to pre-trained model\r\n_targe_file = 'utils/outputs/preds.wav' # target file for storing model.output\r\n\r\nif nav_choice == 'Home':\r\n st.image('utils/images/header.jpg', width=450, height=500)\r\n\r\n st.info('Upload your audio sample below')\r\n audio_sample = st.file_uploader('Audio Sample', ['wav']) # Get audio sample as an input from users\r\n if audio_sample:\r\n try:\r\n prog = st.progress(0)\r\n model = ufs.load_model(_path_to_model) # call to the utility module to cache the model\r\n audio = tf.audio.decode_wav(audio_sample.read(), desired_channels=1)\r\n # decoding audio waveform by using tf.audio.decode_wav as a mono sound wave\r\n _param_dict.update({'audio_sample': audio.audio})\r\n flag = 1\r\n for i in range(100):\r\n time.sleep(0.001)\r\n prog.progress(i + 1)\r\n st.info('Uploaded audio sample')\r\n st.audio(audio_sample)\r\n with st.spinner('Wait for it...'):\r\n time.sleep(1)\r\n preds = model.predict(tf.expand_dims(audio.audio, 0)) # using this EagerTensor to suppress te noie\r\n preds = tf.reshape(preds, (-1, 1))\r\n _param_dict.update({'predicted_outcomes': preds})\r\n preds = np.array(preds)\r\n write(_targe_file, 44100, preds) # writing the output file to play\r\n st.success('Audio after noise removal')\r\n st.audio(_targe_file)\r\n\r\n # Visual Representation of model's prediction using sync plots\r\n\r\n prediction_stats = st.checkbox('Prediction Plots')\r\n noise_rem = st.checkbox('Noise Removal Plots')\r\n if noise_rem:\r\n fig, axes = plt.subplots(2, 1, figsize=(10, 6))\r\n axes[0].plot(np.arange(len(_param_dict['audio_sample'])), _param_dict['audio_sample'], c='r')\r\n axes[0].set_title('Original audio sample')\r\n axes[1].plot(np.arange(len(_param_dict['predicted_outcomes'])), _param_dict['predicted_outcomes'],\r\n c='b')\r\n axes[1].set_title('Noise suppressed audio output')\r\n st.pyplot()\r\n\r\n if prediction_stats:\r\n plt.figure(figsize=(10, 6))\r\n plt.plot(np.arange(len(_param_dict['audio_sample'])), _param_dict['audio_sample'], c='r',\r\n label='Original audio sample')\r\n plt.plot(np.arange(len(_param_dict['predicted_outcomes'])), _param_dict['predicted_outcomes'], c='b',\r\n label='Noise suppressed audio output')\r\n plt.legend()\r\n st.pyplot()\r\n\r\n except Exception as e:\r\n print(e, type(e))\r\n"
]
| [
[
"matplotlib.pyplot.legend",
"scipy.io.wavfile.write",
"tensorflow.reshape",
"matplotlib.pyplot.subplots",
"tensorflow.expand_dims",
"numpy.array",
"matplotlib.pyplot.figure"
]
]
|
felipevicens/tng-probes | [
"ec27bf95a975f20edf0f17660d9c1b07dc0609ef"
]
| [
"probe_moongen/plot-bat-csv.py"
]
| [
"import plotly\n# from plotly.graph_objs import Scatter, Layout\nimport pandas as pd\nimport numpy as np\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport socket\nimport os\nimport time\n\n#UDP_IP = \"10.10.1.15\"\n#UDP_PORT = 55555\n#sock = socket.socket(socket.AF_INET, # Internet\n# socket.SOCK_DGRAM) # UDP\n#sock.bind((UDP_IP, UDP_PORT))\n\n\n#while True:\n#data, addr = sock.recvfrom(2048) # buffer size is 1024 bytes\n\n\n# df = pd.read_csv('line_data.csv')\ndf = pd.read_csv(\"/home/localadmin/MoonGen/histogram.csv\", header=None, skipinitialspace=True, delimiter=',', encoding=\"utf-8-sig\")\ndf.head()\n\n\n\ntrace1 = go.Histogram(\n #x=df['Latency'], y=df['Packets'], # Data\n x=df[0], y=df[1],\n # xbins=dict(\n # start=2000,\n # end=2200,\n # size=30\n # ),\n name='Latency Histo' # Additional options\n )\n#trace2 = go.Scatter(\n# x=df['Time'], y=df['Avg RX Packet Rate'], # Data\n# mode='lines', name='Avg RX Packet Rate Name' # Additional options\n# )\n\n\n\nlayout = go.Layout(title='Forwarding Delay',xaxis=dict(title='Latency (nsec)',titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f')),yaxis=dict(title='# of Packets',titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f')),plot_bgcolor='rgb(230, 230,230)')\n\n#fig = go.Figure(data=[trace1,trace2], layout=layout)\nfig = go.Figure(data=[trace1], layout=layout)\n\n# Plot data in the notebook\n# py.iplot(fig, filename='simple-plot-from-csv')\n\nconfig={'showLink': False}\n\nplotly.offline.plot( { \"data\": [trace1], \"layout\": layout }, config=config, auto_open=False, filename=\"/var/www/html/latest.html\")"
]
| [
[
"pandas.read_csv"
]
]
|
Tony607/keras_mixup_generator | [
"a510b3f7f909055b64a308bb8e94dc9e0d66e29c"
]
| [
"mixup_generator.py"
]
| [
"import numpy as np\n\n\nclass MixupImageDataGenerator():\n def __init__(self, generator, directory, batch_size, img_height, img_width, alpha=0.2, subset=None):\n \"\"\"Constructor for mixup image data generator.\n\n Arguments:\n generator {object} -- An instance of Keras ImageDataGenerator.\n directory {str} -- Image directory.\n batch_size {int} -- Batch size.\n img_height {int} -- Image height in pixels.\n img_width {int} -- Image width in pixels.\n\n Keyword Arguments:\n alpha {float} -- Mixup beta distribution alpha parameter. (default: {0.2})\n subset {str} -- 'training' or 'validation' if validation_split is specified in\n `generator` (ImageDataGenerator).(default: {None})\n \"\"\"\n\n self.batch_index = 0\n self.batch_size = batch_size\n self.alpha = alpha\n\n # First iterator yielding tuples of (x, y)\n self.generator1 = generator.flow_from_directory(directory,\n target_size=(\n img_height, img_width),\n class_mode=\"categorical\",\n batch_size=batch_size,\n shuffle=True,\n subset=subset)\n\n # Second iterator yielding tuples of (x, y)\n self.generator2 = generator.flow_from_directory(directory,\n target_size=(\n img_height, img_width),\n class_mode=\"categorical\",\n batch_size=batch_size,\n shuffle=True,\n subset=subset)\n\n # Number of images across all classes in image directory.\n self.n = self.generator1.samples\n\n def reset_index(self):\n \"\"\"Reset the generator indexes array.\n \"\"\"\n\n self.generator1._set_index_array()\n self.generator2._set_index_array()\n\n def on_epoch_end(self):\n self.reset_index()\n\n def reset(self):\n self.batch_index = 0\n\n def __len__(self):\n # round up\n return (self.n + self.batch_size - 1) // self.batch_size\n\n def get_steps_per_epoch(self):\n \"\"\"Get number of steps per epoch based on batch size and\n number of images.\n\n Returns:\n int -- steps per epoch.\n \"\"\"\n\n return self.n // self.batch_size\n\n def __next__(self):\n \"\"\"Get next batch input/output pair.\n\n Returns:\n tuple -- batch of input/output pair, (inputs, outputs).\n \"\"\"\n\n if self.batch_index == 0:\n self.reset_index()\n\n current_index = (self.batch_index * self.batch_size) % self.n\n if self.n > current_index + self.batch_size:\n self.batch_index += 1\n else:\n self.batch_index = 0\n\n # random sample the lambda value from beta distribution.\n l = np.random.beta(self.alpha, self.alpha, self.batch_size)\n\n X_l = l.reshape(self.batch_size, 1, 1, 1)\n y_l = l.reshape(self.batch_size, 1)\n\n # Get a pair of inputs and outputs from two iterators.\n X1, y1 = self.generator1.next()\n X2, y2 = self.generator2.next()\n\n # Perform the mixup.\n X = X1 * X_l + X2 * (1 - X_l)\n y = y1 * y_l + y2 * (1 - y_l)\n return X, y\n\n def __iter__(self):\n while True:\n yield next(self)\n"
]
| [
[
"numpy.random.beta"
]
]
|
Nitin-Mane/MMDET | [
"7410b25f27c200719482955cb4a8a1c381e67e04"
]
| [
"mmdet/datasets/imagenet.py"
]
| [
"import numpy as np\n\nfrom .custom import CustomDataset\nfrom .custom_pair import CustomPairDataset\nfrom .custom_block import CustomBlockDataset\nfrom .registry import DATASETS\n\n\[email protected]_module\nclass ImageNetDETVIDDataset(CustomDataset):\n\n CLASSES = ('airplane','antelope','bear','bicycle','bird','bus',\n 'car','cattle','dog','domestic_cat','elephant','fox',\n 'giant_panda','hamster','horse','lion','lizard','monkey',\n 'motorcycle','rabbit','red_panda','sheep','snake','squirrel',\n 'tiger','train','turtle','watercraft','whale','zebra')\n \n def __init__(self,*args,**kargs):\n super().__init__(*args,**kargs)\n self.img_ids = list(range(len(self.img_infos)))\n self.cat_ids = list(range(len(self.CLASSES)))\n\n def get_ann_info(self, idx):\n ann = self.img_infos[idx]['ann']\n # modify type if necessary.\n if not isinstance(ann['bboxes'],np.ndarray):\n ann['bboxes'] = np.array(ann['bboxes'], dtype=np.float32).reshape(-1, 4)\n if not isinstance(ann['labels'], np.ndarray):\n ann['labels'] = np.array(ann['labels'], dtype=np.int64)#.reshape(-1, 1)\n self.img_infos[idx]['ann']=ann\n return ann\n\n\[email protected]_module\nclass ImageNetVIDBlockDataset(CustomBlockDataset):\n CLASSES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',\n 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox',\n 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey',\n 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel',\n 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra')\n\n def __init__(self, *args, **kargs):\n super().__init__(*args, **kargs)\n self.img_ids = list(range(len(self.img_infos)))\n self.cat_ids = list(range(len(self.CLASSES)))\n\n def get_ann_info(self, idx):\n ann = self.img_infos[idx]['ann']\n # modify type if necessary.\n if not isinstance(ann['bboxes'], np.ndarray):\n ann['bboxes'] = np.array(ann['bboxes'], dtype=np.float32).reshape(-1, 4)\n if not isinstance(ann['labels'], np.ndarray):\n ann['labels'] = np.array(ann['labels'], dtype=np.int64) # .reshape(-1, 1)\n self.img_infos[idx]['ann'] = ann\n return ann\n\n\[email protected]_module\nclass ImageNetVIDPairDataset(CustomPairDataset):\n CLASSES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',\n 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox',\n 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey',\n 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel',\n 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra')\n\n def __init__(self, *args, **kargs):\n super().__init__(*args, **kargs)\n self.img_ids = list(range(len(self.img_infos)))\n self.cat_ids = list(range(len(self.CLASSES)))\n\n def get_ann_info(self, idx):\n ann1 = self.img_infos[idx]['ann1']\n ann2 = self.img_infos[idx]['ann2']\n # modify type if necessary.\n if not isinstance(ann1['bboxes'], np.ndarray):\n ann1['bboxes'] = np.array(ann1['bboxes'], dtype=np.float32).reshape(-1, 4)\n if not isinstance(ann1['labels'], np.ndarray):\n ann1['labels'] = np.array(ann1['labels'], dtype=np.int64)\n if not isinstance(ann1['trackids'], np.ndarray):\n ann1['trackids'] = np.array(ann1['trackids'], dtype=np.int64)\n self.img_infos[idx]['ann1'] = ann1\n\n if not isinstance(ann2['bboxes'], np.ndarray):\n ann2['bboxes'] = np.array(ann2['bboxes'], dtype=np.float32).reshape(-1, 4)\n if not isinstance(ann2['labels'], np.ndarray):\n ann2['labels'] = np.array(ann2['labels'], dtype=np.int64)\n if not isinstance(ann2['trackids'], np.ndarray):\n ann2['trackids'] = np.array(ann2['trackids'], dtype=np.int64)\n self.img_infos[idx]['ann2'] = ann2\n return ann1, ann2"
]
| [
[
"numpy.array"
]
]
|
YuJiang01/n2nnmn | [
"f0d751313ca756fe40ece1a7bbb0205ab899adf8"
]
| [
"exp_clevr/train_clevr_rl_gt_layout.py"
]
| [
"from __future__ import absolute_import, division, print_function\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu_id', type=int, default=0)\nparser.add_argument(\n '--pretrained_model',\n default='./exp_clevr/tfmodel/clevr_gt_layout/00050000')\nargs = parser.parse_args()\n\ngpu_id = args.gpu_id # set GPU id to use\nimport os; os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n\nimport numpy as np\nimport tensorflow as tf\n# Start the session BEFORE importing tensorflow_fold\n# to avoid taking up all GPU memory\nsess = tf.Session(config=tf.ConfigProto(\n gpu_options=tf.GPUOptions(allow_growth=True),\n allow_soft_placement=False, log_device_placement=False))\n\nfrom models_clevr.nmn3_assembler import Assembler\nfrom models_clevr.nmn3_model import NMN3Model\nfrom util.clevr_train.data_reader import DataReader\n\n# Module parameters\nH_feat = 10\nW_feat = 15\nD_feat = 512\nembed_dim_txt = 300\nembed_dim_nmn = 300\nlstm_dim = 512\nnum_layers = 2\nencoder_dropout = False\ndecoder_dropout = False\ndecoder_sampling = True\nT_encoder = 45\nT_decoder = 10\nN = 64\nprune_filter_module = True\n\n# Training parameters\ninvalid_expr_loss = 0.5 # loss value when the layout is invalid\nlambda_entropy = 0.005\nweight_decay = 5e-6\nbaseline_decay = 0.99\nmax_grad_l2_norm = 10\nmax_iter = 80000\nsnapshot_interval = 10000\nexp_name = \"clevr_rl_gt_layout\"\npretrained_model = args.pretrained_model\nsnapshot_dir = './exp_clevr/tfmodel/%s/' % exp_name\n\n# Log params\nlog_interval = 20\nlog_dir = './exp_clevr/tb/%s/' % exp_name\n\n# Data files\nvocab_question_file = './exp_clevr/data/vocabulary_clevr.txt'\nvocab_layout_file = './exp_clevr/data/vocabulary_layout.txt'\nvocab_answer_file = './exp_clevr/data/answers_clevr.txt'\n\nimdb_file_trn = './exp_clevr/data/imdb/imdb_trn.npy'\nimdb_file_tst = './exp_clevr/data/imdb/imdb_val.npy'\n\nassembler = Assembler(vocab_layout_file)\n\ndata_reader_trn = DataReader(imdb_file_trn, shuffle=True, one_pass=False,\n batch_size=N,\n T_encoder=T_encoder,\n T_decoder=T_decoder,\n assembler=assembler,\n vocab_question_file=vocab_question_file,\n vocab_answer_file=vocab_answer_file,\n prune_filter_module=prune_filter_module)\n\nnum_vocab_txt = data_reader_trn.batch_loader.vocab_dict.num_vocab\nnum_vocab_nmn = len(assembler.module_names)\nnum_choices = data_reader_trn.batch_loader.answer_dict.num_vocab\n\n# Network inputs\ninput_seq_batch = tf.placeholder(tf.int32, [None, None])\nseq_length_batch = tf.placeholder(tf.int32, [None])\nimage_feat_batch = tf.placeholder(tf.float32, [None, H_feat, W_feat, D_feat])\nexpr_validity_batch = tf.placeholder(tf.bool, [None])\nanswer_label_batch = tf.placeholder(tf.int32, [None])\n\n# The model for training\nnmn3_model_trn = NMN3Model(\n image_feat_batch, input_seq_batch,\n seq_length_batch, T_decoder=T_decoder,\n num_vocab_txt=num_vocab_txt, embed_dim_txt=embed_dim_txt,\n num_vocab_nmn=num_vocab_nmn, embed_dim_nmn=embed_dim_nmn,\n lstm_dim=lstm_dim, num_layers=num_layers,\n assembler=assembler,\n encoder_dropout=encoder_dropout,\n decoder_dropout=decoder_dropout,\n decoder_sampling=decoder_sampling,\n num_choices=num_choices)\n\nfinetune_lr = 1e-4 # 1/10 of the default 1e-3 for adam\n\ncompiler = nmn3_model_trn.compiler\nscores = nmn3_model_trn.scores\nlog_seq_prob = nmn3_model_trn.log_seq_prob\n\n# Loss function\nsoftmax_loss_per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=scores, labels=answer_label_batch)\n# The final per-sample loss, which is vqa loss for valid expr\n# and invalid_expr_loss for invalid expr\nfinal_loss_per_sample = tf.where(expr_validity_batch,\n softmax_loss_per_sample,\n tf.ones_like(softmax_loss_per_sample) * invalid_expr_loss)\n\n# Totoal training loss:\n# loss = E[ (C - b) * \\diff[log(p(x))] + \\diff[C] ]\n# (where C = -R is the cost/loss; b is baseline)\navg_sample_loss = tf.reduce_mean(final_loss_per_sample)\nbaseline = tf.Variable(invalid_expr_loss, trainable=False, dtype=tf.float32)\nbaseline_update_op = tf.assign_add(baseline,\n (1-baseline_decay) * (avg_sample_loss-baseline))\npolicy_gradient_loss = tf.reduce_mean(\n tf.stop_gradient(final_loss_per_sample-baseline)*log_seq_prob)\n\ntotal_training_loss = policy_gradient_loss + avg_sample_loss\ntotal_loss = tf.add_n([total_training_loss,\n lambda_entropy * nmn3_model_trn.entropy_reg,\n weight_decay * nmn3_model_trn.l2_reg])\n\n# Train with Adam\nsolver = tf.train.AdamOptimizer(learning_rate=finetune_lr)\ngradients = solver.compute_gradients(total_loss)\n\n# Clip gradient by L2 norm\n# gradients = gradients_part1+gradients_part2\ngradients = [(tf.clip_by_norm(g, max_grad_l2_norm), v)\n for g, v in gradients]\nsolver_op = solver.apply_gradients(gradients)\n\n# Training operation\n# Partial-run can't fetch training operations\n# some workaround to make partial-run work\nwith tf.control_dependencies([solver_op, baseline_update_op]):\n train_step = tf.constant(0)\n\n# Write summary to TensorBoard\nos.makedirs(log_dir, exist_ok=True)\nlog_writer = tf.summary.FileWriter(log_dir, tf.get_default_graph())\n\nloss_ph = tf.placeholder(tf.float32, [])\nentropy_ph = tf.placeholder(tf.float32, [])\naccuracy_ph = tf.placeholder(tf.float32, [])\nbaseline_ph = tf.placeholder(tf.float32, [])\nvalidity_ph = tf.placeholder(tf.float32, [])\nsummary_trn = []\nsummary_trn.append(tf.summary.scalar(\"avg_sample_loss\", loss_ph))\nsummary_trn.append(tf.summary.scalar(\"entropy\", entropy_ph))\nsummary_trn.append(tf.summary.scalar(\"avg_accuracy\", accuracy_ph))\n# summary_trn.append(tf.summary.scalar(\"baseline\", baseline_ph))\nsummary_trn.append(tf.summary.scalar(\"validity\", validity_ph))\nlog_step_trn = tf.summary.merge(summary_trn)\n\ntst_answer_accuracy_ph = tf.placeholder(tf.float32, [])\ntst_layout_accuracy_ph = tf.placeholder(tf.float32, [])\ntst_layout_validity_ph = tf.placeholder(tf.float32, [])\nsummary_tst = []\nsummary_tst.append(tf.summary.scalar(\"test_answer_accuracy\", tst_answer_accuracy_ph))\nsummary_tst.append(tf.summary.scalar(\"test_layout_accuracy\", tst_layout_accuracy_ph))\nsummary_tst.append(tf.summary.scalar(\"test_layout_validity\", tst_layout_validity_ph))\nlog_step_tst = tf.summary.merge(summary_tst)\n\nos.makedirs(snapshot_dir, exist_ok=True)\nsnapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots\nsess.run(tf.global_variables_initializer())\n\n# Load previous model\nsnapshot_loader = tf.train.Saver([v for v in tf.global_variables() if v != baseline])\nsnapshot_loader.restore(sess, pretrained_model)\n\ndef run_training(max_iter, dataset_trn):\n avg_accuracy = 0\n accuracy_decay = 0.99\n for n_iter, batch in enumerate(dataset_trn.batches()):\n if n_iter >= max_iter:\n break\n # set up input and output tensors\n h = sess.partial_run_setup(\n [nmn3_model_trn.predicted_tokens, nmn3_model_trn.entropy_reg,\n scores, avg_sample_loss, train_step],\n [input_seq_batch, seq_length_batch, image_feat_batch,\n compiler.loom_input_tensor, expr_validity_batch,\n answer_label_batch])\n\n # Part 0 & 1: Run Convnet and generate module layout\n tokens, entropy_reg_val = sess.partial_run(h,\n (nmn3_model_trn.predicted_tokens, nmn3_model_trn.entropy_reg),\n feed_dict={input_seq_batch: batch['input_seq_batch'],\n seq_length_batch: batch['seq_length_batch'],\n image_feat_batch: batch['image_feat_batch']})\n # Assemble the layout tokens into network structure\n expr_list, expr_validity_array = assembler.assemble(tokens)\n # all exprs should be valid (as it's in the decoder)\n assert(np.all(expr_validity_array))\n\n labels = batch['answer_label_batch']\n # Build TensorFlow Fold input for NMN\n expr_feed = compiler.build_feed_dict(expr_list)\n expr_feed[expr_validity_batch] = expr_validity_array\n expr_feed[answer_label_batch] = labels\n\n # Part 2: Run NMN and learning steps\n scores_val, avg_sample_loss_val, _ = sess.partial_run(\n h, (scores, avg_sample_loss, train_step), feed_dict=expr_feed)\n\n # compute accuracy\n predictions = np.argmax(scores_val, axis=1)\n accuracy = np.mean(np.logical_and(expr_validity_array,\n predictions == labels))\n avg_accuracy += (1-accuracy_decay) * (accuracy-avg_accuracy)\n validity = np.mean(expr_validity_array)\n\n # Add to TensorBoard summary\n if (n_iter+1) % log_interval == 0 or (n_iter+1) == max_iter:\n print(\"iter = %d\\n\\tloss = %f, accuracy (cur) = %f, \"\n \"accuracy (avg) = %f, entropy = %f, validity = %f\" %\n (n_iter+1, avg_sample_loss_val, accuracy,\n avg_accuracy, -entropy_reg_val, validity))\n summary = sess.run(log_step_trn, {\n loss_ph: avg_sample_loss_val,\n entropy_ph: -entropy_reg_val,\n accuracy_ph: avg_accuracy,\n # baseline_ph: sess.run(baseline),\n validity_ph: validity})\n log_writer.add_summary(summary, n_iter+1)\n\n # Save snapshot\n if (n_iter+1) % snapshot_interval == 0 or (n_iter+1) == max_iter:\n snapshot_file = os.path.join(snapshot_dir, \"%08d\" % (n_iter+1))\n snapshot_saver.save(sess, snapshot_file, write_meta_graph=False)\n print('snapshot saved to ' + snapshot_file)\n\nrun_training(max_iter, data_reader_trn)\n"
]
| [
[
"tensorflow.control_dependencies",
"tensorflow.global_variables",
"numpy.all",
"numpy.mean",
"tensorflow.GPUOptions",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_graph",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.assign_add",
"tensorflow.Variable",
"tensorflow.stop_gradient",
"tensorflow.clip_by_norm",
"numpy.argmax",
"tensorflow.train.Saver",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.logical_and",
"tensorflow.summary.merge",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.ones_like"
]
]
|
bgraille/pylbm | [
"fd4419933e05b85be364232fddedfcb4f7275e1f"
]
| [
"demo/2D/lid_driven_cavity_2.py"
]
| [
"# Authors:\n# Loic Gouarin <[email protected]>\n# Benjamin Graille <[email protected]>\n#\n# License: BSD 3 clause\n\n\"\"\"\nSimulate the lid driven cavity\n\ndt rho + dx qx + dy qy = 0\ndt qx + dx (qx^2/rho + c^2 rho) + dy (qx*qy/rho) = 0\ndt qy + dx (qx*qy/rho) + dy (qy^2/rho + c^2 rho) = 0\n\"\"\"\n\nimport numpy as np\nimport sympy as sp\nimport matplotlib.pyplot as plt\nimport pylbm\n\n# pylint: disable=redefined-outer-name\n\nX, Y = sp.symbols('X, Y')\nRHO, QX, QY = sp.symbols('rho, qx, qy')\nLA = sp.symbols('lambda', constants=True)\n\n\n# pylint: disable=unused-argument\ndef bc_up(f, m, x, y, rho_o, driven_velocity):\n \"\"\"\n boundary values on the top bound\n \"\"\"\n m[RHO] = rho_o\n m[QX] = rho_o * driven_velocity\n m[QY] = 0.\n\n\ndef vorticity(sol):\n \"\"\"\n compute the vorticity of the solution\n \"\"\"\n qx_n = sol.m[QX] / sol.m[RHO]\n qy_n = sol.m[QY] / sol.m[RHO]\n vort = np.abs(\n qx_n[1:-1, 2:] - qx_n[1:-1, :-2] -\n qy_n[2:, 1:-1] + qy_n[:-2, 1:-1]\n )\n return vort\n\n\ndef flow_lines(sol, nlines, time_length, scale=0.5):\n \"\"\"\n compute the flow lines of the solution\n\n Parameters\n ----------\n sol : :py:class:`Simulation<pylbm.simulation.Simulation>`\n the solution given by pylbm\n nlines : int (number of flow lines)\n time_length : double (time during which we follow the lines)\n scale : double (velocity scale (default 0.5))\n\n Returns\n -------\n list\n lines\n \"\"\"\n u_x = sol.m[QX] / sol.m[RHO]\n u_y = sol.m[QY] / sol.m[RHO]\n # if scale is None:\n # scale = max(np.linalg.norm(u_x, np.inf), np.linalg.norm(u_y, np.inf))\n lines = []\n xmin, xmax = sol.domain.geom.bounds[0]\n ymin, ymax = sol.domain.geom.bounds[1]\n dx = sol.domain.dx\n nx, ny = sol.domain.shape_in\n for _ in range(nlines):\n # begin a new line\n cont = True # boolean to continue the line\n x = xmin + (xmax-xmin) * np.random.rand()\n y = ymin + (ymax-ymin) * np.random.rand()\n line_x, line_y = [x], [y]\n t = 0\n while cont:\n i, j = int((x-xmin)/(xmax-xmin)*nx), int((y-ymin)/(ymax-ymin)*ny)\n uxij, uyij = u_x[i, j], u_y[i, j]\n if uxij == 0 and uyij == 0:\n cont = False\n else:\n dt = dx*scale / np.sqrt(uxij**2+uyij**2)\n x += uxij*dt\n y += uyij*dt\n t += dt\n if x < xmin or x >= xmax or y < ymin or y >= ymax:\n cont = False\n else:\n line_x.append(x)\n line_y.append(y)\n if t >= time_length:\n cont = False\n lines.append([np.array(line_x), np.array(line_y)])\n return lines\n\n\n# pylint: disable=invalid-name\ndef run(space_step,\n final_time,\n generator=\"cython\",\n sorder=None,\n with_plot=True):\n \"\"\"\n Parameters\n ----------\n\n space_step: double\n spatial step\n\n final_time: double\n final time\n\n generator: string\n pylbm generator\n\n sorder: list\n storage order\n\n with_plot: boolean\n if True plot the solution otherwise just compute the solution\n\n\n Returns\n -------\n\n sol\n <class 'pylbm.simulation.Simulation'>\n\n \"\"\"\n # parameters\n scheme_name = 'Geier'\n xmin, xmax, ymin, ymax = 0., 1., 0., 1. # bounds of the domain\n la = 1. # velocity of the scheme\n rho_o = 1. # reference value of the mass\n driven_velocity = 0.05 # boundary value of the velocity\n mu = 5.e-6 # bulk viscosity\n zeta = 100*mu # shear viscosity\n\n def moments_choice(scheme_name, mu, zeta):\n if scheme_name == 'dHumiere':\n dummy = 1./rho_o\n QX2 = dummy*QX**2\n QY2 = dummy*QY**2\n Q2 = QX2+QY2\n QXY = dummy*QX*QY\n polynomials = [\n 1,\n X, Y,\n 3*(X**2+Y**2)-4*LA**2,\n 0.5*(9*(X**2+Y**2)**2-21*(X**2+Y**2)*LA**2+8*LA**4),\n 3*X*(X**2+Y**2)-5*X*LA**2, 3*Y*(X**2+Y**2)-5*Y*LA**2,\n X**2-Y**2, X*Y\n ]\n equilibrium = [\n RHO,\n QX, QY,\n -2*RHO*LA**2 + 3*Q2,\n RHO*LA**2 - 3*Q2,\n -QX*LA**2, -QY*LA**2,\n QX2 - QY2, QXY\n ]\n dummy = 3.0/(la*rho_o*space_step)\n sigma_1 = dummy*zeta\n sigma_2 = dummy*mu\n s_1 = 1/(.5+sigma_1)\n s_2 = 1/(.5+sigma_2)\n\n if scheme_name == 'Geier':\n UX, UY = QX/RHO, QY/RHO\n RHOU2 = RHO * (UX**2 + UY**2)\n polynomials = [\n 1, X, Y,\n X**2 + Y**2,\n X*Y**2,\n Y*X**2,\n X**2*Y**2,\n X**2 - Y**2,\n X*Y,\n ]\n equilibrium = [\n RHO, QX, QY,\n RHOU2 + 2/3*RHO*LA**2,\n QX*(LA**2/3+UY**2),\n QY*(LA**2/3+UX**2),\n RHO*(LA**2/3+UX**2)*(LA**2/3+UY**2),\n RHO*(UX**2 - UY**2),\n RHO*UX*UY,\n ]\n dummy = 3.0/(la*rho_o*space_step)\n sigma_1 = dummy*(zeta - 2*mu/3)\n sigma_2 = dummy*mu\n s_1 = 1/(.5+sigma_1)\n s_2 = 1/(.5+sigma_2)\n\n if scheme_name == 'Lallemand':\n dummy = 1./rho_o\n QX2 = dummy*QX**2\n QY2 = dummy*QY**2\n Q2 = QX2+QY2\n QXY = dummy*QX*QY\n polynomials = [\n 1, X, Y,\n X**2 + Y**2,\n X*(X**2+Y**2),\n Y*(X**2+Y**2),\n (X**2+Y**2)**2,\n X**2 - Y**2,\n X*Y,\n ]\n equilibrium = [\n RHO,\n QX, QY,\n Q2+2/3*LA**2*RHO,\n 4/3*QX*LA**2,\n 4/3*QY*LA**2,\n ((21*Q2+6*RHO*LA**2)*LA**2 - (6*Q2-2*RHO*LA**2))/9,\n QX2-QY2,\n QXY,\n ]\n dummy = 3.0/(la*rho_o*space_step)\n sigma_1 = dummy*zeta\n sigma_2 = dummy*mu\n s_1 = 1/(.5+sigma_1)\n s_2 = 1/(.5+sigma_2)\n\n s = [0., 0., 0., s_1, s_1, s_1, s_1, s_2, s_2]\n return polynomials, equilibrium, s\n\n polynomials, equilibrium, s = moments_choice(scheme_name, mu, zeta)\n\n simu_cfg = {\n 'parameters': {LA: la},\n 'box': {\n 'x': [xmin, xmax],\n 'y': [ymin, ymax],\n 'label': [0, 0, 0, 1]\n },\n 'space_step': space_step,\n 'scheme_velocity': LA,\n 'schemes': [\n {\n 'velocities': list(range(9)),\n 'polynomials': polynomials,\n 'relaxation_parameters': s,\n 'equilibrium': equilibrium,\n 'conserved_moments': [RHO, QX, QY],\n },\n ],\n 'init': {RHO: rho_o,\n QX: 0.,\n QY: 0.},\n 'boundary_conditions': {\n 0: {'method': {0: pylbm.bc.BouzidiBounceBack}},\n 1: {\n 'method': {0: pylbm.bc.BouzidiBounceBack},\n 'value': (bc_up, (rho_o, driven_velocity))\n }\n },\n 'generator': generator,\n 'relative_velocity': [QX/RHO, QY/RHO],\n # 'show_code': True,\n }\n\n sol = pylbm.Simulation(simu_cfg, sorder=sorder)\n while sol.t < final_time:\n sol.one_time_step()\n\n viewer = pylbm.viewer.matplotlib_viewer\n fig = viewer.Fig()\n\n axe = fig[0]\n axe.grid(visible=False)\n axe.xaxis_set_visible(False)\n axe.yaxis_set_visible(False)\n axe.SurfaceImage(\n vorticity(sol),\n cmap='jet', clim=[0, .1], alpha=0.25,\n )\n lines = flow_lines(sol, 10, 2)\n for linek in lines:\n axe.CurveLine(linek[0], linek[1], alpha=1)\n\n plt.show()\n return sol\n\nif __name__ == '__main__':\n # pylint: disable=invalid-name\n space_step = 1./128\n final_time = 100\n run(space_step, final_time)\n"
]
| [
[
"numpy.abs",
"numpy.sqrt",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.show"
]
]
|
rddy/ReQueST | [
"fffe8e60eae1550843da3e32ff1a0e81ef35938a"
]
| [
"rqst/envs.py"
]
| [
"# python3\n# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Four domains for evaluation:\n - 2D contextual bandit\n - MNIST classification\n - 2D navigation\n - Car Racing\n\"\"\"\n\nfrom __future__ import division\n\nfrom copy import deepcopy\nimport pickle\nimport types\nimport uuid\nimport os\nimport sys\n\nfrom sklearn import neighbors\nfrom gym.envs.box2d.car_dynamics import Car\nfrom gym import spaces\nimport numpy as np\nimport gym\nimport tensorflow as tf\nimport scipy\n\nfrom rqst.reward_models import RewardModel\nfrom rqst import utils\n\nsys.path.append(utils.wm_dir)\nimport model as carracing_model\n\n\nclass Env(gym.Env):\n\n def make_noisy_expert_policy(self, eps=0.5):\n return utils.make_noisy_expert_policy(\n self.make_expert_policy(), self.action_space, eps=eps)\n\n def prob_succ(self, obses):\n return np.zeros(len(obses))\n\n def prob_crash(self, obses):\n return np.zeros(len(obses))\n\n\nclass BanditEnv(Env):\n \"\"\"Contextual bandit with 2D features and binary actions\"\"\"\n\n def __init__(self, domain_shift=False):\n\n self.domain_shift = domain_shift\n self.n_obs_dim = 2\n self.n_act_dim = 2\n self.observation_space = spaces.Box(-np.ones(self.n_obs_dim),\n np.ones(self.n_obs_dim))\n self.action_space = spaces.Box(-utils.inf * np.ones(self.n_act_dim),\n utils.inf * np.ones(self.n_act_dim))\n self.name = 'bandit'\n self.max_ep_len = 1\n self.expert_policy = self.make_expert_policy()\n self.subopt_policy = self.make_noisy_expert_policy(eps=0.5)\n self.default_init_obs = np.zeros(self.n_obs_dim)\n self.rew_classes = None\n self.only_terminal_reward = True\n\n self.absorbing_state = np.zeros(self.n_obs_dim)\n self.raw_absorbing_state = np.zeros(self.n_obs_dim)\n\n self.w = np.array([0., 0.5])\n\n def sample_context(self):\n context = np.random.random(self.n_obs_dim)\n if self.domain_shift:\n context = context * 2 - 1\n return context\n\n def reward_func(self, obses, acts, next_obses, eps=1e-6):\n act_labels = np.array(\n [self.expert_policy(obses[i, :]) for i in range(obses.shape[0])])\n act_labels -= scipy.misc.logsumexp(act_labels, axis=1, keepdims=True)\n acts -= scipy.misc.logsumexp(acts, axis=1, keepdims=True)\n xent = -(acts * np.exp(act_labels)).sum(axis=1)\n return -xent\n\n def step(self, action):\n action_label = self.expert_policy(self.prev_obs)\n succ = np.argmax(action) == np.argmax(action_label)\n\n obs = self.raw_absorbing_state\n r = self.reward_func(self.prev_obs[np.newaxis, :], action[np.newaxis, :],\n obs[np.newaxis, :])[0]\n done = True\n info = {'succ': succ, 'crash': False}\n\n return obs, r, done, info\n\n def reset(self):\n self.prev_obs = self.sample_context()\n return self.prev_obs\n\n def make_expert_policy(self):\n\n def policy(obs):\n a = 1 if self.w.dot(obs) > 0 else 0\n return utils.onehot_encode(a, self.n_act_dim) * utils.inf\n\n return policy\n\n\nclass CLFBanditEnv(BanditEnv):\n \"\"\"Turn supervised classification problem into contextual bandit\"\"\"\n\n def __init__(self, dataset, domain_shift=False):\n\n self.dataset = dataset\n self.domain_shift = domain_shift\n\n self.n_z_dim = self.dataset['n_z_dim']\n self.n_obs_dim = self.n_z_dim\n self.n_act_dim = self.dataset['n_classes']\n self.observation_space = spaces.Box(self.dataset['feat_lows'],\n self.dataset['feat_highs'])\n self.action_space = spaces.Box(-utils.inf * np.ones(self.n_act_dim),\n utils.inf * np.ones(self.n_act_dim))\n self.name = 'clfbandit'\n self.max_ep_len = 1\n self.make_expert_policy = lambda: self.dataset['make_expert_policy'](\n self.dataset)\n\n self.expert_policy = None # initializing here is expensive, so just initialize as needed\n self.subopt_policy = None\n\n self.default_init_obs = None\n self.rew_classes = None\n self.only_terminal_reward = True\n\n self.raw_absorbing_state = np.zeros(self.dataset['feat_shape'])\n self.absorbing_state = np.zeros(self.n_obs_dim)\n\n def sample_context(self):\n idxes = self.dataset['val_idxes'] if self.domain_shift else self.dataset[\n 'train_idxes']\n idx = np.random.choice(idxes)\n return self.dataset['feats'][idx, :]\n\n def set_expert_policy(self, encoder):\n self.expert_policy = self.dataset['make_expert_policy'](self.dataset,\n encoder)\n self.subopt_policy = None # initializing here is expensive, so just initialize as needed\n\n\nclass PointMassEnv(Env):\n \"\"\"Navigate to target while avoiding trap\n Not allowed to go out of bounds (unit square)\n Episode terminates upon reaching target or trap\n Continuous actions (velocity vector)\n 2D position observations\n \"\"\"\n\n def __init__(self,\n max_ep_len=1000,\n goal_dist_thresh=0.15,\n trap_dist_thresh=0.15,\n succ_rew_bonus=1.,\n crash_rew_penalty=-10.,\n max_speed=0.01,\n goal=None,\n trap=None,\n init_pos=None):\n \"\"\"\n Args:\n init_pos: a np.array with dimensions (2)\n None -> random initialization for each episode\n \"\"\"\n\n if goal is None:\n goal = np.array([0.5, 0.5])\n\n if trap is None:\n trap = np.array([0.25, 0.25])\n\n self.max_ep_len = max_ep_len\n self.goal_dist_thresh = goal_dist_thresh\n self.trap_dist_thresh = trap_dist_thresh\n self.succ_rew_bonus = succ_rew_bonus\n self.crash_rew_penalty = crash_rew_penalty\n self.max_speed = max_speed\n self.init_pos = init_pos\n self.goal = goal\n self.trap = trap\n\n # non-overlapping target/trap\n if np.linalg.norm(self.goal - self.trap) < 2 * self.goal_dist_thresh:\n raise ValueError\n\n self.n_act_dim = 2 # angle, speed\n self.n_obs_dim = 2 # position\n self.observation_space = spaces.Box(\n np.zeros(self.n_obs_dim), np.ones(self.n_obs_dim))\n self.action_space = spaces.Box(\n np.zeros(2), np.array([2 * np.pi, self.max_speed]))\n self.name = 'pointmass'\n self.expert_policy = self.make_expert_policy()\n self.subopt_policy = self.make_noisy_expert_policy(eps=0.75)\n self.default_init_obs = init_pos if init_pos is not None else np.zeros(2)\n self.rew_classes = np.array(\n [self.crash_rew_penalty, 0., self.succ_rew_bonus])\n self.only_terminal_reward = True\n\n self.pos = None\n\n def prob_succ(self, obses):\n at_goal = np.linalg.norm(obses - self.goal, axis=1) <= self.goal_dist_thresh\n return at_goal.astype(float)\n\n def prob_crash(self, obses):\n at_trap = np.linalg.norm(obses - self.trap, axis=1) <= self.trap_dist_thresh\n return at_trap.astype(float)\n\n def reward_func(self, obses, acts, next_obses):\n r = self.succ_rew_bonus * self.prob_succ(next_obses)\n r += self.crash_rew_penalty * self.prob_crash(next_obses)\n return r\n\n def obs(self):\n return self.pos\n\n def cart_to_polar(self, v):\n return np.array([np.arctan2(v[1], v[0]), np.linalg.norm(v)])\n\n def normalize_ang(self, a):\n return (2 * np.pi - abs(a) % (2 * np.pi)) if a < 0 else (abs(a) %\n (2 * np.pi))\n\n def normalize_polar(self, v):\n return np.array([self.normalize_ang(v[0]), min(v[1], self.max_speed)])\n\n def polar_to_cart(self, v):\n return v[1] * np.array([np.cos(v[0]), np.sin(v[0])])\n\n def step(self, action):\n action = self.polar_to_cart(self.normalize_polar(action))\n if (self.pos + action >= 0).all() and (self.pos + action <\n 1).all(): # stay in bounds\n self.pos += action\n\n self.succ = np.linalg.norm(self.pos - self.goal) <= self.goal_dist_thresh\n self.crash = np.linalg.norm(self.pos - self.trap) <= self.trap_dist_thresh\n\n self.timestep += 1\n\n obs = self.obs()\n r = self.reward_func(self.prev_obs[np.newaxis, :], action[np.newaxis, :],\n obs[np.newaxis, :])[0]\n done = self.succ or self.crash\n info = {'goal': self.goal, 'succ': self.succ, 'crash': self.crash}\n\n self.prev_obs = obs\n\n return obs, r, done, info\n\n def reset(self):\n self.pos = np.random.random(2) if self.init_pos is None else deepcopy(\n self.init_pos)\n self.prev_obs = self.obs()\n self.timestep = 0\n return self.prev_obs\n\n def make_expert_policy(self, noise=0, safety_margin=0.05):\n \"\"\"Expert goes directly to target, swings around trap if necessary\"\"\"\n\n def policy(obs):\n u = self.goal - obs\n w = self.cart_to_polar(u)\n v = self.trap - obs\n p = v.dot(u)\n x = obs + u / np.linalg.norm(u) * p\n if p > 0 and np.linalg.norm(\n v) < self.trap_dist_thresh + safety_margin and np.linalg.norm(\n x - self.trap) < self.trap_dist_thresh + safety_margin:\n w[0] = self.cart_to_polar(v)[0] + 0.5 * np.pi\n w[0] += np.random.normal(0, 1) * noise\n return w\n\n return policy\n\n\ndef make_bandit_env():\n return BanditEnv(domain_shift=False)\n\n\ndef make_bandit_trans_env(env):\n return BanditEnv(domain_shift=True)\n\n\ndef make_mnist_expert_policy(dataset, encoder=None):\n obses = dataset['feats']\n if encoder is not None:\n obses = encoder.encode_batch_frames(obses)\n else:\n obses = obses.reshape((obses.shape[0], obses.shape[1] * obses.shape[2]))\n\n clf_path = os.path.join(utils.clfbandit_data_dir,\n 'clf-%d.pkl' % obses.shape[1])\n if os.path.exists(clf_path):\n with open(clf_path, 'rb') as f:\n clf = pickle.load(f)\n else:\n clf = neighbors.KNeighborsClassifier(n_neighbors=4, weights='distance', n_jobs=1)\n clf.fit(obses, dataset['labels'])\n with open(clf_path, 'wb') as f:\n pickle.dump(clf, f, pickle.HIGHEST_PROTOCOL)\n\n log_proba_of_obs = {\n tuple(obses[i].ravel()):\n (utils.onehot_encode(label, dataset['n_classes']) * utils.inf)\n for i, label in enumerate(dataset['labels'])\n }\n\n def policy(obs):\n if len(obs.shape) == 3 and encoder is not None:\n obs = encoder.encode_frame(obs)\n flat_obs = obs.ravel()\n try:\n return log_proba_of_obs[tuple(flat_obs)]\n except KeyError:\n return np.log(1e-9 + clf.predict_proba(flat_obs[np.newaxis, :])[0, :])\n\n return policy\n\n\ndef make_mnist_dataset(verbose=False):\n load = lambda fname: np.load(os.path.join(utils.mnist_dir, fname))\n\n def load_imgs(X):\n X = X.T\n d = int(np.sqrt(X.shape[1]))\n X = X.reshape((X.shape[0], d, d))\n return X\n\n load_labels = lambda X: X.T.ravel().astype(int)\n X = load('mnist.npz')\n train_imgs = load_imgs(X['train'])\n train_labels = load_labels(X['train_labels'])\n test_imgs = load_imgs(X['test'])\n test_labels = load_labels(X['test_labels'])\n\n imgs = np.concatenate((train_imgs, test_imgs), axis=0)\n labels = np.concatenate((train_labels, test_labels))\n\n n_classes = len(np.unique(labels))\n\n feats = imgs[:, :, :, np.newaxis] / 255.\n\n feat_shape = feats.shape[1:]\n feat_lows = np.zeros(feat_shape)\n feat_highs = np.ones(feat_shape)\n\n # uncomment to do a train-val split with no domain shift\n #train_idxes = list(range(train_labels.size))\n #val_idxes = list(range(train_labels.size, train_labels.size + test_labels.size))\n\n # domain shift\n train_label_distrn = np.zeros(n_classes)\n n_src_classes = n_classes // 2\n train_label_distrn[:n_src_classes] = 0.\n train_label_distrn[n_src_classes:] = 1.\n\n train_idxes = []\n val_idxes = []\n\n idxes = np.arange(0, train_labels.size + test_labels.size, 1)\n idxes_of_label = [[] for _ in range(n_classes)]\n # assume np.unique(labels) = [0, 1, 2, ..., n_classes-1]\n # assume labels is balanced\n for idx, label in enumerate(labels):\n idxes_of_label[label].append(idx)\n for label, frac in enumerate(train_label_distrn):\n label_idxes = idxes_of_label[label]\n n_train_ex_with_label = int(frac * len(label_idxes))\n train_idxes.extend(label_idxes[:n_train_ex_with_label])\n val_idxes.extend(label_idxes[n_train_ex_with_label:])\n\n if verbose:\n train_distrn = np.zeros(n_classes)\n val_distrn = np.zeros(n_classes)\n for idx in train_idxes:\n train_distrn[labels[idx]] += 1\n for idx in val_idxes:\n val_distrn[labels[idx]] += 1\n train_distrn /= train_distrn.sum()\n val_distrn /= val_distrn.sum()\n print('Training label distribution: ' + str(train_distrn))\n print('Validation label distribution: ' + str(val_distrn))\n print('Number of training examples: %d' % len(train_idxes))\n print('Number of validation examples: %d' % len(val_idxes))\n\n dataset = {\n 'feat_shape': feat_shape,\n 'n_z_dim': 8,\n 'n_classes': n_classes,\n 'feat_lows': feat_lows,\n 'feat_highs': feat_highs,\n 'feats': feats,\n 'labels': labels,\n 'train_idxes': train_idxes,\n 'val_idxes': val_idxes,\n 'make_expert_policy': make_mnist_expert_policy\n }\n\n return dataset\n\n\ndef make_clfbandit_env(**make_dataset_kwargs):\n dataset = make_mnist_dataset(**make_dataset_kwargs)\n return CLFBanditEnv(dataset, domain_shift=False)\n\n\ndef make_clfbandit_trans_env(env):\n trans_env = deepcopy(env)\n trans_env.domain_shift = True\n return trans_env\n\n\ndef make_pointmass_env(goal=None, trap=None, init_pos=None):\n\n if goal is None:\n goal = np.array([0.3, 0.3])\n\n if trap is None:\n trap = np.array([0.7, 0.7])\n\n env = PointMassEnv(goal=goal, trap=trap, init_pos=init_pos)\n return env\n\n\ndef make_pointmass_trans_env(env, trans_init_pos=None):\n \"\"\"make transfer env for reward_models.evaluate_reward_model\n \"\"\"\n if trans_init_pos is None:\n trans_init_pos = np.array([0.99, 0.99])\n\n trans_env = PointMassEnv(\n goal=env.goal, trap=env.trap, init_pos=trans_init_pos)\n return trans_env\n\ndef make_carracing_rew(sess,\n env,\n sketch_data=None,\n reward_init_kwargs=None,\n reward_train_kwargs=None):\n \"\"\"carracing doesn't have a reward function that can be evaluated on encoded frames,\n so we train a ground-truth reward model by doing supervised learning on ground-truth rewards\n Args:\n sketch_data:\n None -> try to load trained reward model\n else -> train reward model on sketch_data\n \"\"\"\n if reward_init_kwargs is None:\n reward_init_kwargs = {\n 'n_rew_nets_in_ensemble':\n 4,\n 'n_layers':\n 1,\n 'layer_size':\n 256,\n 'scope':\n str(uuid.uuid4()) if sketch_data is not None else None,\n 'scope_file':\n os.path.join(utils.carracing_data_dir, 'true_rew_scope.pkl'),\n 'tf_file':\n os.path.join(utils.carracing_data_dir, 'true_rew.tf'),\n 'rew_func_input':\n \"s'\",\n 'use_discrete_rewards':\n True\n }\n\n if reward_train_kwargs is None:\n reward_train_kwargs = {\n 'demo_coeff': 1,\n 'sketch_coeff': 1,\n 'iterations': 5000,\n 'ftol': 1e-4,\n 'batch_size': 32,\n 'learning_rate': 1e-3,\n 'val_update_freq': 100,\n 'verbose': True\n }\n\n true_reward_model = RewardModel(sess, env, **reward_init_kwargs)\n\n if sketch_data is not None:\n true_reward_model.train(\n demo_data=None,\n sketch_data=sketch_data,\n pref_data=None,\n **reward_train_kwargs)\n true_reward_model.save()\n else:\n true_reward_model.load()\n\n # reward_func(traj, act_seq) = array of rewards (one rew per timestep)\n prob_succ = lambda obses: utils.normalize_logits(\n true_reward_model.compute_raw_of_transes(None, None, obses))[:, 2]\n prob_crash = lambda obses: utils.normalize_logits(\n true_reward_model.compute_raw_of_transes(None, None, obses))[:, 0]\n\n def reward_func(*args):\n raws = true_reward_model.compute_raw_of_transes(*args)\n disc_rews = np.argmax(raws, axis=1)\n return [env.rew_classes[r] for r in disc_rews]\n\n return {\n 'prob_succ': prob_succ,\n 'prob_crash': prob_crash,\n 'reward_func': reward_func\n }\n\ndef make_carracing_env(sess,\n load_reward=False,\n n_z_dim=32,\n rnn_size=256,\n succ_rew_bonus=10.,\n crash_rew_penalty=-1.):\n \"\"\"\n Args:\n n_z_dim: number of latent features for encoded frames\n rnn_size: size of hidden layer in mdn-rnn dynamics model\n \"\"\"\n\n env = gym.make('CarRacing-v0')\n env.n_act_dim = 3 # steer, gas, brake\n env.max_ep_len = 1000\n env.name = 'carracing'\n env.default_init_obs = None\n env.succ_rew_bonus = succ_rew_bonus\n env.crash_rew_penalty = crash_rew_penalty\n env.rew_classes = np.array([env.crash_rew_penalty, 0., env.succ_rew_bonus])\n env.only_terminal_reward = False\n\n env.n_z_dim = n_z_dim\n env.rnn_size = rnn_size\n # concatenate latents with hidden states from mdnrnn dynamics model\n env.n_obs_dim = n_z_dim + 2 * rnn_size\n if load_reward:\n data = make_carracing_rew(sess, env)\n env.__dict__.update(data)\n\n filename = os.path.join(utils.wm_dir, 'log', 'carracing.cma.16.64.best.json')\n expert_model = carracing_model.make_model()\n expert_model.load_model(filename)\n\n def encode_obs(obs):\n if len(obs.shape) == 3:\n return expert_model.encode_obs(obs)[0]\n else:\n expert_model.state = tf.nn.rnn_cell.LSTMStateTuple(\n c=obs[env.n_z_dim:env.n_z_dim + env.rnn_size][np.newaxis, :],\n h=obs[-env.rnn_size:][np.newaxis, :])\n return obs[:env.n_z_dim]\n\n env.expert_policy = lambda obs: expert_model.get_action(encode_obs(obs))\n env.subopt_policy = utils.LaggyPolicy(env.expert_policy, lag_prob=0.75)\n\n return env\n\ndef make_carracing_trans_env(*args, init_rotation=None, **kwargs):\n \"\"\"Make transfer env for rqst.reward_models.evaluate_reward_model\n \"\"\"\n if init_rotation is None:\n init_rotation = 0.25 * np.pi\n\n trans_env = make_carracing_env(*args, **kwargs)\n\n # monkeypatch init rotation\n trans_env.orig_reset = trans_env.reset\n\n def reset(self, *args, **kwargs):\n self.orig_reset(*args, **kwargs)\n # rotate init car state\n self.car = Car(self.world, self.track[0][1] + init_rotation,\n *self.track[0][2:4])\n return self.step(None)[0]\n\n trans_env.reset = types.MethodType(reset, trans_env)\n\n trans_env.default_init_obs = None\n\n return trans_env\n"
]
| [
[
"numpy.sqrt",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"numpy.concatenate",
"numpy.arctan2",
"numpy.exp",
"numpy.unique",
"numpy.arange",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.sin",
"numpy.argmax",
"numpy.zeros",
"numpy.random.choice",
"numpy.array",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.cos",
"numpy.ones",
"numpy.random.normal",
"scipy.misc.logsumexp"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.