repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
clifu/SNR | [
"8f049a60efc0ab51fd277c7d0b70ab3a8b78615f"
]
| [
"Z2/confusionMatrix.py"
]
| [
"from prerequisites import *\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport re\nimport seaborn as sns\nimport pandas as pd\n\ndef print_confusion_matrix(_test_generator, _test_steps, feature='learning_rate_'):\n y_preds = dict()\n files_list = [f'results/{f}' for f in os.listdir(RESULTS_DIR) if isfile(join(RESULTS_DIR, f)) and 'h5' in f]\n for file_name in files_list:\n research = re.search(rf'({feature}.*)\\.h5', file_name)\n if research is None:\n continue\n name = research.group(1)\n model = models.load_model(file_name)\n Y_pred = model.predict_generator(_test_generator, _test_steps)\n y_preds[name] = np.argmax(Y_pred, axis=1)\n\n del model\n K.clear_session()\n gc.collect()\n\n # print Confusion Matrix\n classes = np.arange(class_count)\n for name, y_pred in y_preds.items():\n con_mat = confusion_matrix(_test_generator.classes, y_pred)\n print(f'\\n\\n{name}')\n print('Confusion Matrix')\n print(con_mat)\n \n # normalization & heat map\n con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)\n\n con_mat_df = pd.DataFrame(con_mat_norm,\n index = classes, \n columns = classes)\n\n figure = plt.figure(figsize=(8, 8))\n sns.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n\n\n# get dataset\nbatch_size = 15\nclass_count, _, _, _, _, test_datagen, test_generator = get_data(batch_size)\ntest_steps = len(test_generator.filenames) // batch_size\n\nprint_confusion_matrix(test_generator, test_steps, 'adam_2c_batch_v1_')"
]
| [
[
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"numpy.arange",
"numpy.argmax"
]
]
|
elifesciences-publications/Antinucci_Dumitrescu_et_al_2020 | [
"b2cf5596536aa423058abeda6f58096b5c58180d"
]
| [
"exponentialFitGetTau.py"
]
| [
"\"\"\"\nCreated on Wed April 17 2019\n@author: oliver.mirat\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\ndef expFunc(x, a, b, c):\n return a * np.exp( -(1/b) * x) + c\n \ndef getExponentialPart(x, y, nbPointsForFit):\n minIndex = y.argmin(axis=0)\n if nbPointsForFit == 0:\n nbPointsForFit = len(y)\n else:\n nbPointsForFit = minIndex + nbPointsForFit\n xExpPart = x[minIndex:nbPointsForFit] \n yExpPart = y[minIndex:nbPointsForFit]\n return [xExpPart, yExpPart]\n\ndef exponentialFitGetTau(x, y, showPlot=0, nbPointsForFit=0):\n \"\"\"\n This function outputs a monoexponential fit to points denoted by x and y. \n It works for downward slopes as it finds the min of y as starting point.\n 3rd term = show plot of monoexponential fit (1) or not (0)\n 4th term = number of points for which to do the fit\n \"\"\"\n [xExpPart, yExpPart] = getExponentialPart(x, y, nbPointsForFit)\n popt, pcov = curve_fit(expFunc, xExpPart, yExpPart, p0=[np.amin(yExpPart), 200, 0])\n if showPlot:\n print('Monoexponential fit is superimposed (red) on raw data (blue)')\n plt.plot(xExpPart, yExpPart)\n plt.plot(xExpPart, expFunc(xExpPart, *popt), 'r-',label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))\n plt.show()\n return popt[1]\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.amin"
]
]
|
TJUsym/TJU_Advanced_CV_Homework | [
"2d85943390e9ba53b80988e0ab8d50aef0cd17da"
]
| [
"mmdet/models/necks/bfp.py"
]
| [
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\n\nfrom ..plugins import NonLocal2D\nfrom ..registry import NECKS\nfrom ..utils import ConvModule\n\n\[email protected]_module\nclass BFP(nn.Module):\n \"\"\"BFP (Balanced Feature Pyrmamids)\n\n BFP takes multi-level features as inputs and gather them into a single one,\n then refine the gathered feature and scatter the refined results to\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n https://arxiv.org/pdf/1904.02701.pdf for details.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n num_levels (int): Number of input feature levels.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n refine_level (int): Index of integration and refine level of BSF in\n multi-level features from bottom to top.\n refine_type (str): Type of the refine op, currently support\n [None, 'conv', 'non_local'].\n \"\"\"\n\n def __init__(self,\n in_channels,\n num_levels,\n refine_level=2,\n refine_type=None,\n conv_cfg=None,\n norm_cfg=None):\n super(BFP, self).__init__()\n assert refine_type in [None, 'conv', 'non_local']\n\n self.in_channels = in_channels\n self.num_levels = num_levels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n self.refine_level = refine_level\n self.refine_type = refine_type\n assert 0 <= self.refine_level < self.num_levels\n\n if self.refine_type == 'conv':\n self.refine = ConvModule(\n self.in_channels,\n self.in_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg)\n elif self.refine_type == 'non_local':\n self.refine = NonLocal2D(\n self.in_channels,\n reduction=1,\n use_scale=False,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n def forward(self, inputs):\n assert len(inputs) == self.num_levels\n\n # step 1: gather multi-level features by resize and average\n feats = []\n gather_size = inputs[self.refine_level].size()[2:]\n for i in range(self.num_levels):\n if i < self.refine_level:\n gathered = F.adaptive_max_pool2d(\n inputs[i], output_size=gather_size)\n else:\n gathered = F.interpolate(\n inputs[i], size=gather_size, mode='nearest')\n feats.append(gathered)\n\n bsf = sum(feats) / len(feats)\n\n # step 2: refine gathered features\n if self.refine_type is not None:\n bsf = self.refine(bsf)\n\n # step 3: scatter refined features to multi-levels by a residual path\n outs = []\n for i in range(self.num_levels):\n out_size = inputs[i].size()[2:]\n if i < self.refine_level:\n residual = F.interpolate(bsf, size=out_size, mode='nearest')\n else:\n residual = F.adaptive_max_pool2d(bsf, output_size=out_size)\n outs.append(residual + inputs[i])\n\n return tuple(outs)\n"
]
| [
[
"torch.nn.functional.adaptive_max_pool2d",
"torch.nn.functional.interpolate"
]
]
|
ryanQc1216/detr3d_eval | [
"545684053cffb1bf027d43c42b20ed245fcee119"
]
| [
"tools/analysis_tools/flops_tools.py"
]
| [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport torch\nfrom mmcv import Config, DictAction\nimport torch.nn as nn\nimport ptflops\nimport thop\n\n\ndef prepare_inputs(shape_nchw):\n x = torch.zeros(shape_nchw)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride):\n super(Net, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass TestProfile:\n def __init__(self,\n shape_nchw=(1, 128, 32, 32),\n in_channels=128,\n out_channels=128,\n kernel_size=3,\n stride=1):\n self.shape_nchw = shape_nchw\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n print('(n,c,h,w) is :', shape_nchw)\n input_height = shape_nchw[2] - kernel_size + 1\n input_width = shape_nchw[3] - kernel_size + 1\n default_flops = 2*input_height*input_width*(in_channels*kernel_size*kernel_size+1)*out_channels\n default_mflops = default_flops/1e6\n default_macs = default_mflops/2\n print('manual flops is %.4f MFLOPs' % default_mflops)\n print('manual macs is %.4f MFLOPs' % default_macs)\n\n def init_model(self):\n model = Net(self.in_channels, self.out_channels, self.kernel_size, self.stride)\n input_buffer = prepare_inputs(self.shape_nchw)\n return model, input_buffer\n\n def profile_thop(self, show_params=False):\n model, input_buffer = self.init_model()\n with torch.no_grad():\n macs, params = thop.profile(model, inputs=(input_buffer, ), verbose=True)\n from thop import clever_format\n macs, params = clever_format([macs, params], \"%.3f\")\n print('THOP macs : %s' % macs)\n if show_params:\n print('THOP params : %s' % params)\n\n def profile_pt(self, show_params=False):\n model, input_buffer = self.init_model()\n with torch.no_grad():\n macs, params = ptflops.get_model_complexity_info(model,\n input_res=(self.shape_nchw[1], self.shape_nchw[2], self.shape_nchw[3], ),\n as_strings=True,\n print_per_layer_stat=True, verbose=True)\n print('ptflops {:<30} {:<8}'.format('Computational complexity: ', macs))\n if show_params:\n print('ptflops {:<30} {:<8}'.format('Number of parameters: ', params))\n\n\n def profile_pytorch(self):\n model, input_buffer = self.init_model()\n with torch.no_grad():\n with torch.autograd.profiler.profile(with_stack=False, enabled=True, use_cuda=False, record_shapes=True,\n with_flops=True, profile_memory=True) as prof:\n outputs = model(input_buffer)\n print(prof.key_averages(group_by_stack_n=5).table(row_limit=-1))\n\nif __name__ == '__main__':\n test_tool = TestProfile()\n # test_tool.profile_pytorch()\n # test_tool.profile_thop()\n test_tool.profile_pt()\n pass\n"
]
| [
[
"torch.zeros",
"torch.autograd.profiler.profile",
"torch.nn.Conv2d",
"torch.no_grad"
]
]
|
cankunqiu/tensorlayer2 | [
"423283ef96d6db485e431d01e360535d1803f34d"
]
| [
"tensorlayer/layers/convolution/super_resolution.py"
]
| [
"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\nfrom tensorlayer.layers.core import Layer\n\nfrom tensorlayer import logging\n\nfrom tensorlayer.decorators import deprecated_alias\nfrom tensorlayer.decorators import private_method\n\n__all__ = [\n 'SubpixelConv1d',\n 'SubpixelConv2d',\n]\n\n\nclass SubpixelConv2d(Layer):\n \"\"\"It is a 2D sub-pixel up-sampling layer, usually be used\n for Super-Resolution applications, see `SRGAN <https://github.com/tensorlayer/srgan/>`__ for example.\n\n Parameters\n ------------\n scale : int\n The up-scaling ratio, a wrong setting will lead to dimension size error.\n n_out_channel : int or None\n The number of output channels.\n - If None, automatically set n_out_channel == the number of input channels / (scale x scale).\n - The number of input channels == (scale x scale) x The number of output channels.\n act : activation function\n The activation function of this layer.\n name : str\n A unique layer name.\n\n Examples\n ---------\n >>> # examples here just want to tell you how to set the n_out_channel.\n >>> import numpy as np\n >>> import tensorflow as tf\n >>> import tensorlayer as tl\n >>> x = np.random.rand(2, 16, 16, 4)\n >>> X = tf.placeholder(\"float32\", shape=(2, 16, 16, 4), name=\"X\")\n >>> net = tl.layers.InputLayer(X, name='input')\n >>> net = tl.layers.SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d')\n >>> sess = tf.Session()\n >>> y = sess.run(net.outputs, feed_dict={X: x})\n >>> print(x.shape, y.shape)\n (2, 16, 16, 4) (2, 32, 32, 1)\n\n >>> x = np.random.rand(2, 16, 16, 4*10)\n >>> X = tf.placeholder(\"float32\", shape=(2, 16, 16, 4*10), name=\"X\")\n >>> net = tl.layers.InputLayer(X, name='input2')\n >>> net = tl.layers.SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2')\n >>> y = sess.run(net.outputs, feed_dict={X: x})\n >>> print(x.shape, y.shape)\n (2, 16, 16, 40) (2, 32, 32, 10)\n\n >>> x = np.random.rand(2, 16, 16, 25*10)\n >>> X = tf.placeholder(\"float32\", shape=(2, 16, 16, 25*10), name=\"X\")\n >>> net = tl.layers.InputLayer(X, name='input3')\n >>> net = tl.layers.SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3')\n >>> y = sess.run(net.outputs, feed_dict={X: x})\n >>> print(x.shape, y.shape)\n (2, 16, 16, 250) (2, 80, 80, 10)\n\n References\n ------------\n - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/pdf/1609.05158.pdf>`__\n\n \"\"\"\n\n # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py\n def __init__(self, scale=2, n_out_channel=None, act=None, name=None):#'subpixel_conv2d'):\n\n # super(SubpixelConv2d, self).__init__(prev_layer=prev_layer, act=act, name=name)\n super().__init__(name)\n self.scale = scale\n self.n_out_channel = n_out_channel\n self.act = act\n if n_out_channel is None:\n\n if int(self.inputs.get_shape()[-1]) / (scale**2) % 1 != 0:\n raise Exception(\n \"SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels\"\n )\n\n n_out_channel = int(int(self.inputs.get_shape()[-1]) / (scale**2))\n\n logging.info(\n \"SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s\" %\n (self.name, scale, n_out_channel, self.act.__name__ if self.act is not None else 'No Activation')\n )\n\n def build(self, inputs):\n pass\n\n def forward(self, inputs):\n \"\"\"\n prev_layer : :class:`Layer`\n Previous layer,\n \"\"\"\n # with tf.variable_scope(name):\n # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale, n_out_channels=n_out_channel))\n outputs = self.act(self._PS(inputs, r=self.scale, n_out_channels=self.n_out_channel))\n return outputs\n\n @private_method\n def _PS(self, X, r, n_out_channels):\n\n _err_log = \"SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels\"\n\n if n_out_channels >= 1:\n if int(X.get_shape()[-1]) != (r**2) * n_out_channels:\n raise Exception(_err_log)\n # bsize, a, b, c = X.get_shape().as_list()\n # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim\n # Xs=tf.split(X,r,3) #b*h*w*r*r\n # Xr=tf.concat(Xs,2) #b*h*(r*w)*r\n # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c\n\n X = tf.depth_to_space(X, r)\n else:\n raise RuntimeError(_err_log)\n\n return X\n\n\nclass SubpixelConv1d(Layer):\n \"\"\"It is a 1D sub-pixel up-sampling layer.\n\n Calls a TensorFlow function that directly implements this functionality.\n We assume input has dim (batch, width, r)\n\n Parameters\n ------------\n scale : int\n The up-scaling ratio, a wrong setting will lead to Dimension size error.\n act : activation function\n The activation function of this layer.\n name : str\n A unique layer name.\n\n Examples\n ----------\n >>> import tensorflow as tf\n >>> import tensorlayer as tl\n >>> t_signal = tf.placeholder('float32', [10, 100, 4], name='x')\n >>> n = tl.layers.InputLayer(t_signal, name='in')\n >>> n = tl.layers.SubpixelConv1d(n, scale=2, name='s')\n >>> print(n.outputs.shape)\n (10, 200, 2)\n\n References\n -----------\n `Audio Super Resolution Implementation <https://github.com/kuleshov/audio-super-res/blob/master/src/models/layers/subpixel.py>`__.\n\n \"\"\"\n\n def __init__(self, scale=2, act=None, name=None):#'subpixel_conv1d'):\n\n # super(SubpixelConv1d, self).__init__(prev_layer=prev_layer, act=act, name=name)\n super().__init__(name)\n self.scale = scale\n self.act = act\n logging.info(\n \"SubpixelConv1d %s: scale: %d act: %s\" %\n (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation')\n )\n\n def build(self, inputs):\n pass\n\n def forward(self, inputs):\n \"\"\"\n Parameters\n ------------\n net : :class:`Layer`\n Previous layer with output shape of (batch, width, r).\n \"\"\"\n # with tf.name_scope(name):\n # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale))\n\n outputs = self.act(self._PS(inputs, r=self.scale))\n return outputs\n\n @private_method\n def _PS(self, I, r):\n X = tf.transpose(I, [2, 1, 0]) # (r, w, b)\n X = tf.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b)\n X = tf.transpose(X, [2, 1, 0])\n return X\n"
]
| [
[
"tensorflow.batch_to_space_nd",
"tensorflow.transpose",
"tensorflow.depth_to_space"
]
]
|
wwagner4/kaggle | [
"c006aca6a4922b6fa84b0a2c2e1b344e1e205847"
]
| [
"m5fa/pyspark/analyse/analyse.py"
]
| [
"import os\nimport time\nfrom pathlib import Path\nfrom typing import Tuple, Callable\n\nimport matplotlib.pyplot as plt\nimport pyspark.sql.types as t\nimport pyspark.sql.functions as f\nfrom operator import add\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom pyspark import RDD\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.feature import StringIndexer, OneHotEncoderEstimator, VectorAssembler\nfrom pyspark.sql import DataFrame, SparkSession\n\n\ndef preprocessing(spark: SparkSession, pppath: Path, datadir: Path):\n def prepro(s5: DataFrame) -> DataFrame:\n stages = []\n catvars = ['dept_id', 'item_id', 'store_id', 'wday']\n for v in catvars:\n stages += [StringIndexer(inputCol=v,\n outputCol=f\"i{v}\")]\n stages += [OneHotEncoderEstimator(inputCols=[f\"i{v}\" for v in catvars],\n outputCols=[f\"v{v}\" for v in catvars])]\n stages += [VectorAssembler(inputCols=['vwday', 'vitem_id', 'vdept_id', 'vstore_id', 'flag_ram',\n 'snap', 'dn', 'month', 'year'],\n outputCol='features')]\n\n pip: Pipeline = Pipeline(stages=stages)\n pipm = pip.fit(s5)\n df: DataFrame = pipm.transform(s5)\n return df.drop('idept_id', 'iitem_id', 'istore_id', 'iwday', 'vdept_id', 'vtem_id', 'vstore_id', 'vwday')\n\n print(\"--- preprocessing -----------------------\")\n\n schema = t.StructType([\n t.StructField('year', t.IntegerType(), True),\n t.StructField('month', t.IntegerType(), True),\n t.StructField('dn', t.IntegerType(), True),\n t.StructField('wday', t.IntegerType(), True),\n t.StructField('snap', t.IntegerType(), True),\n t.StructField('dept_id', t.StringType(), True),\n t.StructField('item_id', t.StringType(), True),\n t.StructField('store_id', t.StringType(), True),\n t.StructField('sales', t.DoubleType(), True),\n t.StructField('flag_ram', t.IntegerType(), True),\n t.StructField('Sales_Pred', t.DoubleType(), True)\n ])\n\n csv_path = datadir / \"Sales5_Ab2011_InklPred.csv\"\n print(f\"--- Reading: '{csv_path}'\")\n\n sales5: DataFrame = spark.read.csv(str(csv_path), header='true', schema=schema) \\\n .withColumn(\"label\", f.col('sales'))\n\n ppdf = prepro(sales5)\n print(f\"--- Writing: '{pppath}'\")\n ppdf.write \\\n .format(\"parquet\") \\\n .mode(\"overwrite\") \\\n .save(str(pppath))\n\n\ndef analyse01(spark: SparkSession, pppath: Path, pdatdir: Path):\n def key_top(r: t.Row) -> Tuple:\n k = t.Row(year=r['year'], month=r['month'], wday=r['wday'], store_id=r['store_id'], snap=r['snap'],\n flag_ram=r['flag_ram'], dept_id=r['dept_id'])\n return k, r['label']\n\n def key_wday(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.wday:02d}\"\n return k, top[1]\n\n def key_dept_id(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.dept_id}\"\n return k, top[1]\n\n def key_dept_id_snap(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.dept_id} {topkey.snap}\"\n return k, top[1]\n\n def key_dept_id_flag_ram(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.dept_id} {topkey.flag_ram}\"\n return k, top[1]\n\n def key_store_id(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.store_id}\"\n return k, top[1]\n\n def key_store_id_snap(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.store_id} {topkey.snap}\"\n return k, top[1]\n\n def key_store_id_flag_ram(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.store_id} {topkey.flag_ram}\"\n return k, top[1]\n\n def key_year(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.year:04d}\"\n return k, top[1]\n\n def key_month(top: Tuple) -> Tuple:\n topkey = top[0]\n k = f\"{topkey.month:02d}\"\n return k, top[1]\n\n def plot(top: RDD, name: str, desc: str, fkey: Callable[[Tuple], Tuple], xrot=0):\n ts: RDD = top \\\n .map(fkey) \\\n .groupByKey() \\\n .sortBy(lambda tu: tu[0])\n\n pbase = Path(\"/opt/data\")\n pplot = pbase / \"plot\"\n if not pplot.exists():\n pplot.mkdir()\n\n fig: Figure = plt.figure()\n fig.set_tight_layout(\"True\")\n ax: Axes = fig.add_subplot(1, 1, 1)\n ax.set_title(desc)\n labs = ts.map(lambda tu: tu[0]).collect()\n vals = ts.map(lambda tu: tu[1]).map(list).collect()\n ax.boxplot(vals, labels=labs)\n fig.autofmt_xdate()\n\n pf1 = pplot / f\"box_{name}.png\"\n fig.autofmt_xdate(rotation=xrot)\n fig.savefig(pf1)\n print(f\"wrote to {pf1}\")\n\n print(\"--- analyse01 -----------------------\")\n print(f\"--- Reading: '{pppath}'\")\n df: DataFrame = spark.read \\\n .parquet(str(pppath))\n\n ptop = pdatdir / \"s5_01_top.parquet\"\n if not ptop.exists():\n rddtop: RDD = df.rdd \\\n .filter(lambda r: r[\"label\"] is not None) \\\n .map(key_top) \\\n .reduceByKey(add)\n print(f\"--- Writing: '{ptop}'\")\n rddtop.toDF().write \\\n .format(\"parquet\") \\\n .mode(\"overwrite\") \\\n .save(str(ptop))\n else:\n print(f\"--- Reading: '{ptop}'\")\n rddtop = spark.read.parquet(str(ptop)).rdd\n\n plot(rddtop, \"all_months\", \"Sales for all months\", key_month)\n plot(rddtop, \"all_years\", \"Sales for all years\", key_year)\n plot(rddtop, \"all_stores_snap\", \"Sales for all stores by snap\", key_store_id_snap, xrot=45)\n plot(rddtop, \"all_stores_ram\", \"Sales for all stores by ramadan\", key_store_id_flag_ram, xrot=45)\n plot(rddtop, \"all_stores\", \"Sales for all stores\", key_store_id)\n plot(rddtop, \"all_wday\", \"Sales for all weekdays\", key_wday)\n plot(rddtop, \"all_dept\", \"Sales for all departments\", key_dept_id, xrot=45)\n plot(rddtop, \"all_dept_snap\", \"Sales for all departments by snap\", key_dept_id_snap, xrot=45)\n plot(rddtop, \"all_dept_ram\", \"Sales for all departments by ramadan flag\", key_dept_id_flag_ram, xrot=45)\n\n\ndef run(spark: SparkSession):\n datadir: Path = Path(os.getenv(\"DATADIR\"))\n if datadir is None:\n raise ValueError(\"Environment variable DATADIR must be defined\")\n print(f\"datadir = '{datadir}'\")\n\n ppnam = \"s5_01\"\n pppath = datadir / f\"{ppnam}.parquet\"\n if not pppath.exists():\n preprocessing(spark, pppath, datadir)\n analyse01(spark, pppath, datadir)\n\n\ndef main():\n start = time.time()\n spark = SparkSession.builder \\\n .appName(\"analyse\") \\\n .getOrCreate()\n run(spark)\n end = time.time()\n elapsed = end - start\n elapseds = time.strftime(\"%H:%M:%S\", time.gmtime(elapsed))\n print(f\"------------------------- R E A D Y ------------ {elapseds} --------------------\")\n spark.stop()\n exit(0)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.pyplot.figure"
]
]
|
sandutsar/incubator-mxnet | [
"3ae7ddfd9c99f40a33c4cb716b3810e5463b810a"
]
| [
"tests/python/gpu/test_operator_gpu.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport sys\nimport os\nimport time\nimport multiprocessing as mp\nimport mxnet as mx\nimport numpy as np\nimport pytest\nimport itertools\nimport scipy.sparse as sps\nimport mxnet.ndarray.sparse as mxsps\nfrom mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose\nfrom mxnet.test_utils import check_symbolic_forward, check_symbolic_backward, discard_stderr\nfrom mxnet.test_utils import default_context, rand_shape_2d, rand_ndarray, same, environment, get_rtc_compile_opts\nfrom mxnet.base import MXNetError\nfrom mxnet import autograd\n\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '../unittest'))\nfrom common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied\nfrom common import run_in_spawned_process\nfrom test_operator import check_sequence_reverse, allclose_function\nfrom test_operator import *\nfrom test_numpy_ndarray import *\nfrom test_numpy_op import *\nfrom test_numpy_interoperability import *\nfrom test_gluon_probability_v2 import *\nfrom test_optimizer import *\nfrom test_random import *\nfrom test_exc_handling import *\nfrom test_sparse_ndarray import *\nfrom test_sparse_operator import *\nfrom test_ndarray import *\nfrom test_subgraph_op import *\nfrom test_gluon_gpu import _test_bulking\nfrom test_contrib_operator import test_multibox_target_op\nfrom test_optimizer import test_adamW\ndel test_custom_op_fork #noqa\n\nset_default_context(mx.gpu(0))\n\ndef check_countsketch(in_dim,out_dim,n):\n data = mx.sym.Variable(\"data\")\n h = mx.sym.Variable(\"h\")\n s = mx.sym.Variable(\"s\")\n sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)\n shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s\n\n arr = [mx.nd.empty(shape[i]) for i in range(3)]\n arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]\n x = np.random.uniform(-10, 10, shape[0])\n arr[0][:] = x #input x\n h = np.random.randint(0, out_dim, shape[1])\n arr[1][:] = h #hash h\n s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])\n arr[2][:] = s #hash s\n locations = {\"data\": x, \"h\": h, \"s\": s}\n a = np.zeros((n,out_dim))\n temp = np.multiply(x, s)\n for num_sample in np.arange(0,n):\n for idx in np.arange(0,in_dim):\n a[num_sample][h[0][idx]] += temp[num_sample][idx]\n check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))\n out_grad = mx.nd.empty((n,out_dim))\n out_grad[:] = np.random.normal(-3, 3, (n,out_dim))\n a = np.zeros((n,in_dim))\n for j in np.arange(0,n):\n for i in np.arange(0,in_dim):\n a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]\n check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))\n\n\[email protected]\ndef test_countsketch():\n minindim = 40\n maxindim = 100\n minoutdim = 5\n maxoutdim = 30\n maxn = 200\n in_dim = np.random.randint(minindim, maxindim)\n out_dim = np.random.randint(minoutdim, maxoutdim)\n n = np.random.randint(1, maxn)\n check_countsketch(in_dim, out_dim, n)\n\n\ndef check_fft(shape):\n sym = mx.sym.contrib.fft(name='fft', compute_size = 128)\n if len(shape) == 2:\n if shape[1]%2 != 0:\n lst = list(shape)\n lst[1] = lst[1]*2\n shape = tuple(lst)\n shape_old = shape\n if len(shape) == 4:\n if shape[3]%2 != 0:\n lst = list(shape)\n lst[3] = lst[3]*2\n shape = tuple(lst)\n shape_old = shape\n init = [np.random.normal(size=shape, scale=1.0)]\n arr_grad = [mx.nd.empty(shape)]\n ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]\n exe_list = [sym._simple_bind(**ctx) for ctx in ctx_list]\n\n for exe in exe_list:\n for arr, iarr in zip(exe.arg_arrays, init):\n arr[:] = iarr.astype(arr.dtype)\n # forward\n for exe in exe_list:\n exe.forward(is_train=True)\n out1 = [exe.outputs[0].asnumpy() for exe in exe_list]\n out = np.fft.fft(init, n=None, axis=-1, norm=None)\n if len(shape) == 2:\n out = np.reshape(out,(out.shape[1],out.shape[2]))\n out2 = np.append(out.real, out.imag, axis = 1)\n a = np.zeros(out1[0].shape)\n p = 0\n for i in range(out2.shape[1]//2):\n a[:,p] = out2[:,i]\n a[:,p+1] = out2[:,i+out2.shape[1]//2]\n p = p+2\n\n if len(shape) == 4:\n out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))\n out2 = np.append(out.real, out.imag, axis = 1)\n a = np.zeros(out1[0].shape)\n for i in range(out1[0].shape[0]):\n for j in range(out1[0].shape[1]):\n p = 0\n for k in range(out2.shape[3]):\n a[i,j,:,p] = out2[i,j,:,k]\n a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]\n p = p+2\n\n assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)\n\n # backward\n if len(shape) == 2:\n out_grad = mx.nd.empty((shape[0],2*shape[1]))\n out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))\n # out_grad_to_complex\n out_grad_complex = np.zeros(shape,dtype = np.complex64)\n for i in range(0,shape[1]):\n out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]\n out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]\n for exe in exe_list:\n exe.backward([out_grad])\n a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)\n\n if len(shape) == 4:\n out_grad = mx.nd.empty(out1[0].shape)\n out_grad[:] = np.random.normal(-3, 3, out1[0].shape)\n # out_grad_to_complex\n out_grad_complex = np.zeros(shape,dtype = np.complex64)\n for i in range(0,shape[3]):\n out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]\n out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]\n for exe in exe_list:\n exe.backward([out_grad])\n a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)\n\ndef test_fft():\n nrepeat = 2\n maxdim = 10\n for repeat in range(nrepeat):\n for order in [2,4]:\n shape = tuple(np.random.randint(1, maxdim, size=order))\n check_fft(shape)\n\ndef _make_ndarrays(input_list, ctx=mx.gpu(0)):\n return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]\n\ndef check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):\n values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]\n mx_vals = _make_ndarrays(values_arr, ctx=ctx)\n sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))\n sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))\n # checks that operator is deterministic\n assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())\n\n ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],\n dtype='float32', ctx=ctx)\n assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)\n\[email protected]\ndef test_multi_sum_sq():\n min_nparam = 100\n max_nparam = 120\n min_dim = 50000\n max_dim = 100000\n max_ndim = 1\n\n dtypes = ['float16','float32', 'float64']\n for ctx in [mx.gpu(0)]:\n for dtype in dtypes:\n nparam = np.random.randint(min_nparam + 1, max_nparam + 1)\n shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]\n low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])\n tol1 = 1e-3 if low_tol else 1e-5\n tol2 = 1e-6 if low_tol else 1e-7\n check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)\n\ndef check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):\n weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]\n grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]\n\n lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.\n wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.\n eta = (np.random.rand() + 0.1)\n eps = (np.random.rand() + 0.1) / 10000.\n\n mx_w = _make_ndarrays(weights_arr, ctx=ctx)\n mx_g = _make_ndarrays(grads_arr, ctx=ctx)\n mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)\n mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)\n\n w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))\n g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))\n\n ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],\n dtype='float32', ctx=ctx)\n ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],\n dtype='float32', ctx=ctx)\n assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)\n assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)\n\n rescale_grad = (np.random.rand() + 0.5) * 100.\n mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,\n rescale_grad=rescale_grad)\n ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)\n ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)\n ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)\n for i in range(ref_w_l2norm.size):\n _w = ref_w_l2norm[i]\n _g = ref_g_l2norm[i]\n if _w > 0.0 and _g > 0.0:\n ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)\n else:\n ref_new_lrs[i] = lrs[i]\n assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)\n\[email protected]\ndef test_fast_lars():\n min_nparam = 50\n max_nparam = 60\n maxdim = 10000\n maxndim = 1\n\n dtypes = ['float16','float32', 'float64']\n for ctx in [mx.cpu(0), mx.gpu(0)]:\n for w_dtype in dtypes:\n for g_dtype in dtypes:\n nparam = np.random.randint(min_nparam + 1, max_nparam + 1)\n shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]\n lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])\n tol1 = 1e-3 if lowTol else 1e-5\n tol2 = 1e-6 if lowTol else 1e-7\n check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)\n\ndef check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):\n def _flatten_list(nested_list):\n return [item for sublist in nested_list for item in sublist]\n weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]\n grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]\n rescale_grad = (np.random.random() + 1.0)\n mx_w = _make_ndarrays(weights_arr)\n mx_g = _make_ndarrays(grads_arr)\n mx_p_w = _make_ndarrays(weights_arr)\n mx_p_g = _make_ndarrays(grads_arr)\n lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)\n mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))\n wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)\n mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))\n if use_master_weights:\n weights32_arr = [arr.astype('float32') for arr in weights_arr]\n mx_w32 = _make_ndarrays(weights32_arr)\n mx_p_w32 = _make_ndarrays(weights32_arr)\n if momentum is None:\n if use_master_weights:\n mx.nd.multi_mp_sgd_update(\n *_flatten_list(zip(mx_w, mx_g, mx_w32)),\n num_weights=len(shapes), lrs=lrs, wds=wds,\n rescale_grad=rescale_grad, out=mx_w)\n mx.nd.preloaded_multi_mp_sgd_update(\n *(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +\n [mx_lrs, mx_wds]), num_weights=len(shapes),\n rescale_grad=rescale_grad, out=mx_p_w)\n else:\n out = mx.nd.multi_sgd_update(\n *_flatten_list(zip(mx_w, mx_g)),\n num_weights=len(shapes), lrs=lrs, wds=wds,\n rescale_grad=rescale_grad, out=mx_w)\n preloaded_out = mx.nd.preloaded_multi_sgd_update(\n *(_flatten_list(zip(mx_p_w, mx_p_g)) +\n [mx_lrs, mx_wds]), num_weights=len(shapes),\n rescale_grad=rescale_grad, out=mx_p_w)\n else:\n if use_master_weights:\n momentums_arr = [np.random.rand(*shape).astype(\"float32\") for shape in shapes]\n mx_m = _make_ndarrays(momentums_arr)\n mx_p_m = _make_ndarrays(momentums_arr)\n out = mx.nd.multi_mp_sgd_mom_update(\n *_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),\n num_weights=len(shapes), lrs=lrs, wds=wds,\n rescale_grad=0.95, momentum=momentum, out=mx_w)\n preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(\n *(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +\n [mx_lrs, mx_wds]), num_weights=len(shapes),\n rescale_grad=0.95, momentum=momentum, out=mx_p_w)\n else:\n momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]\n mx_m = _make_ndarrays(momentums_arr)\n mx_p_m = _make_ndarrays(momentums_arr)\n mx.nd.multi_sgd_mom_update(\n *_flatten_list(zip(mx_w, mx_g, mx_m)),\n num_weights=len(shapes), lrs=lrs, wds=wds,\n rescale_grad=0.95, momentum=momentum, out=mx_w)\n mx.nd.preloaded_multi_sgd_mom_update(\n *(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +\n [mx_lrs, mx_wds]), num_weights=len(shapes),\n rescale_grad=0.95, momentum=momentum, out=mx_p_w)\n\n def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):\n for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):\n assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)\n if dtype == 'float16':\n rtol = 1e-3\n atol = 1e-2\n else:\n rtol = 1e-5\n atol = 1e-6\n _assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)\n if momentum is not None:\n _assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)\n if use_master_weights:\n _assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)\n\ndef test_preloaded_multi_sgd():\n dtypes = ['float16', 'float32']\n momentums = [None, 0.9]\n min_nparam = 5\n max_nparam = 10\n maxdim = 6\n maxndim = 4\n for dtype in dtypes:\n use_master_weights_list = [False,] if dtype == 'float32' else [True, False]\n for use_master_weights in use_master_weights_list:\n for momentum in momentums:\n nparam = np.random.randint(min_nparam + 1, max_nparam + 1)\n shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]\n check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)\n\n\[email protected]\[email protected]\ndef test_batchnorm_with_type():\n ctx_list_v2_2D = [\n {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},\n {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},\n ]\n\n ctx_list_v2_1D = [\n {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},\n {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},\n ]\n\n ctx_list_v2_3D = [\n {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},\n {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}\n ]\n\n # V2, 2D\n bools = [False, True]\n for fix_gamma, cudnn_off in itertools.product(bools, bools):\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)\n check_consistency(sym, ctx_list_v2_2D)\n\n # V2, 1D\n for fix_gamma, cudnn_off in itertools.product(bools, bools):\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)\n check_consistency(sym, ctx_list_v2_1D)\n\n # V2, 3D\n for fix_gamma, cudnn_off in itertools.product(bools, [True,]):\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)\n check_consistency(sym, ctx_list_v2_3D)\n\n\[email protected]\ndef test_batchnorm_versions():\n def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):\n ctx_list = []\n sym_list = []\n\n # BatchNorm cpu\n if 'batchnorm_cpu' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm'))\n\n # BatchNorm gpu (organic)\n if 'batchnorm_gpu' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm', cudnn_off=True))\n\n # BatchNorm gpu cudnn (if cudnn is enabled)\n if 'batchnorm_cudnn' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm', cudnn_off=False))\n\n check_consistency(sym_list, ctx_list)\n\n def test_1d_batchnorm(fix_gamma, use_global_stats):\n data = (2, 3, 20)\n test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',\n 'batchnorm_gpu', 'batchnorm_cudnn'],\n data=data,\n fix_gamma=fix_gamma, use_global_stats=use_global_stats)\n\n def test_2d_batchnorm(fix_gamma, use_global_stats):\n data = (2, 3, 10, 10)\n test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',\n 'batchnorm_gpu', 'batchnorm_cudnn'],\n data=data,\n fix_gamma=fix_gamma, use_global_stats=use_global_stats)\n\n def test_3d_batchnorm(fix_gamma, use_global_stats):\n data = (2, 3, 3, 5, 5)\n test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',\n 'batchnorm_gpu'],\n data=data,\n fix_gamma=fix_gamma, use_global_stats=use_global_stats)\n\n test_1d_batchnorm(True, False)\n test_1d_batchnorm(False, False)\n test_1d_batchnorm(False, True)\n test_1d_batchnorm(True, True)\n\n test_2d_batchnorm(True, False)\n test_2d_batchnorm(False, False)\n test_2d_batchnorm(False, True)\n test_2d_batchnorm(True, True)\n\n test_3d_batchnorm(True, False)\n test_3d_batchnorm(False, False)\n test_3d_batchnorm(False, True)\n test_3d_batchnorm(True, True)\n\n\[email protected](1234)\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\[email protected]\ndef test_convolution_with_type():\n sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')\n\n data = mx.sym.Variable('conv_data')\n w = mx.sym.Variable('conv_weight')\n b = mx.sym.Variable('conv_bias')\n w = mx.sym.transpose(w, axes=(0,2,3,1))\n sym2 = mx.sym.transpose(data, axes=(0,2,3,1))\n sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))\n sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')\n\n sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\n # NHWC\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),\n 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),\n 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}\n ]\n # wider tolerance needed for true-fp16 NCHW test above\n tol = {np.dtype(np.float16): 0.5,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, rtol=tol, atol=tol)\n # test ability to turn off training on bias\n check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, rtol=tol, atol=tol)\n\n\n# Apply N symbols against each of M contexts, checking that all NxM combinations match.\ndef check_consistency_NxM(sym_list, ctx_list):\n # e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:\n # sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]\n check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)\n\n\[email protected](reason=\"test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141\")\[email protected]\ndef test_convolution_options():\n # 1D convolution\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # 1x1 convolution\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n # 2D convolution\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # 1x1 convolution\n sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n # 3D convolution\n ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # 1x1 convolution\n sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n\[email protected]\ndef test_conv_deconv_guards():\n # Test cases for convolution and deconvolution via strided fft. Ensure that the framework\n # guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)\n # see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750\n for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:\n dataname = opname + '_data'\n ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}\n test_cases = [\n {'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},\n {'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},\n {'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},\n {'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},\n {'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},\n {'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]\n for test_case_args in test_cases:\n try:\n sym = op(**test_case_args)\n sym_no_cudnn = op(cudnn_off=True, **test_case_args)\n check_consistency([sym, sym_no_cudnn], [ctx, ctx], scale=0.1)\n except:\n print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))\n raise\n\n\ndef _conv_with_num_streams(seed):\n with random_seed(seed):\n # Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad\n num_trials = 20\n for _ in range(num_trials):\n size = np.random.randint(32, 128)\n # The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible\n # kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.\n ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),\n 'type_dict': {'conv_data': np.float32}}\n # Adding 'flip' here isolates the model from the input node (which can't use inplace store)\n flipped = mx.sym.flip(axis=0, name='conv')\n sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')\n sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),\n cudnn_off=True, name='conv')\n try:\n # tol can be pretty high- we're looking for a large diff due to garbaged workspace\n check_consistency([sym, sym_no_cudnn], [ctx, ctx], rtol=1e-2, atol=1e-2)\n except:\n print('Failing conv size = {}'.format(size))\n raise\n\n\[email protected](reason=\"skipping for now due to severe flakiness\")\ndef test_convolution_multiple_streams():\n for num_streams in ['1', '2']:\n for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:\n print('Starting engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr)\n run_in_spawned_process(_conv_with_num_streams,\n {'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})\n print('Finished engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr)\n\n\n# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.\n# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).\[email protected]\ndef test_convolution_large_c():\n problematic_c = 64 * 1024\n # The convolution accumulates many values, so scale the input magnitude.\n scale = 0.1\n def test_1D_with_width(width, grad_req):\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]\n sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')\n check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)\n\n def test_2D_with_width(width, grad_req):\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]\n sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')\n check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)\n\n # Run with different data tensor shapes to run cudnnFind() multiple times.\n # First, populate algo and op caches with models that always use cudnnFind() (req == 'write').\n # Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').\n widths = [4, 16, 64]\n for req in ['write', 'add']:\n for width in widths:\n test_1D_with_width(width, req)\n test_2D_with_width(width, req)\n\n\n# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.\n# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).\[email protected]\ndef test_deconvolution_large_c():\n problematic_c = 64 * 1024\n # The deconvolution accumulates many values, so scale the input magnitude.\n scale = 0.1\n def test_1D_with_width(width, grad_req):\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')\n check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)\n\n def test_2D_with_width(width, grad_req):\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]\n sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')\n check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)\n\n # Run with different data tensor shapes to run cudnnFind() multiple times.\n # First, populate algo and op caches with models that always use cudnnFind() (req == 'write').\n # Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').\n widths = [4, 16, 64]\n for req in ['write', 'add']:\n for width in widths:\n test_1D_with_width(width, req)\n test_2D_with_width(width, req)\n\n\[email protected]\ndef test_convolution_versions():\n # 2D convolution NCHW\n ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')\n syms = [conv_cudnn, conv_cpu, conv_gpu]\n check_consistency(syms, ctx_list)\n\n # 3D convolution NCDHW\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')\n syms = [conv_cudnn, conv_cpu, conv_gpu]\n check_consistency(syms, ctx_list)\n\n\n# More max-pooling strides and pads to test cudnn pooling implementation code paths\[email protected]\ndef test_pooling_nhwc_with_convention():\n def make_pooling_syms(**kwargs):\n # Conventional NCHW layout pooling\n sym = mx.sym.Pooling(**kwargs)\n # NHWC pooling\n data = mx.sym.Variable('pool_data')\n sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))\n sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)\n sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')\n return [sym, sym_nhwc]\n\n # While the float32 and float64 output is reliably consistent, float16 departs occasionally.\n # We compare nhwc and nchw results only within a given precision.\n for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:\n for kernel in [(2,2), (3,3), (4,4)]:\n for stride in [(1,1), (1,2), (2,1), (2,2)]:\n for data_type in [np.float64, np.float32, np.float16]:\n ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,\n 'type_dict': {'pool_data': data_type}}]\n symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,\n pooling_convention='valid', name='pool')\n check_consistency_NxM(symlist, ctx_list)\n\n symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,\n pooling_convention='full', name='pool')\n check_consistency_NxM(symlist, ctx_list)\n\n symlist = make_pooling_syms(kernel=(300,300), pool_type='max',\n global_pool=True, name='pool')\n check_consistency_NxM(symlist, ctx_list)\n\n\[email protected]\ndef test_pooling_with_type():\n ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},\n {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},\n {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},\n {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},\n {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]\n sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')\n check_consistency(sym, ctx_list, rand_type=np.float16)\n\n sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')\n check_consistency(sym, ctx_list, rand_type=np.float16)\n\n sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')\n check_consistency(sym, ctx_list, rand_type=np.float16)\n\n\[email protected]\ndef test_deconvolution_with_type():\n # Test basic deconvolution without exercising stride, pad or dilation.\n # 1D deconvolution\n sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]\n # wider tolerance needed for true-fp16 test above\n tol = {np.dtype(np.float16): 0.3,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, rtol=tol, atol=tol)\n check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req=\"add\")\n\n # 2D deconvolution\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]\n # wider tolerance needed for true-fp16 test above\n tol = {np.dtype(np.float16): 0.3,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, rtol=tol, atol=tol)\n check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req=\"add\")\n\n\[email protected]\ndef test_deconvolution_options():\n\n # 1D deconvolution\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n # 2D deconvolution\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n# # 3D deconvolution (not yet enabled)\n# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n# # Pad > 0\n# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')\n# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n# # Stride > 1\n# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')\n# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')\n# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n\[email protected](1234)\ndef test_bilinear_sampler_with_type():\n data = mx.sym.Variable('data')\n grid = mx.sym.Variable('grid')\n sym = mx.sym.BilinearSampler(data=data, grid=grid)\n ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float64}},\n {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float32}},\n {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float16}},\n {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float64}},\n {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n\n\ndef test_grid_generator_with_type():\n data = mx.sym.Variable('data')\n sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))\n scale = 1\n ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},\n {'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list, scale=scale)\n check_consistency(sym, ctx_list, scale=scale, grad_req=\"add\")\n sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))\n ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},\n {'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n\n\ndef test_spatial_transformer_with_type():\n data = mx.sym.Variable('data')\n loc = mx.sym.Flatten(data)\n loc = mx.sym.FullyConnected(data=loc, num_hidden=10)\n loc = mx.sym.Activation(data=loc, act_type='relu')\n loc = mx.sym.FullyConnected(data=loc, num_hidden=6)\n sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),\n transform_type=\"affine\", sampler_type=\"bilinear\", cudnn_off=True)\n ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},\n {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),\n transform_type=\"affine\", sampler_type=\"bilinear\", cudnn_off=False)\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n\ndef test_pooling_with_type2():\n # While the float32 and float64 output is reliably consistent, float16 departs occasionally.\n # We compare cpu and gpu results only within a given precision.\n for data_type in [np.float64, np.float32, np.float16]:\n ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},\n {'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]\n\n sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')\n check_consistency(sym, ctx_list)\n\ndef test_pooling_nhwc_with_type():\n def make_pooling_syms(**kwargs):\n # Conventional NCHW layout pooling\n sym = mx.sym.Pooling(**kwargs)\n # NHWC pooling\n data = mx.sym.Variable('pool_data')\n sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))\n sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)\n sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')\n return [sym, sym_nhwc]\n\n # While the float32 and float64 output is reliably consistent, float16 departs occasionally.\n # We compare nhwc and nchw results only within a given precision.\n for data_type in [np.float64, np.float32, np.float16]:\n # NHWC pooling only enabled on GPU with CUDNN\n ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]\n symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')\n check_consistency_NxM(symlist, ctx_list)\n\n symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')\n check_consistency_NxM(symlist, ctx_list)\n\n symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')\n check_consistency_NxM(symlist, ctx_list)\n\n\[email protected]\ndef test_pooling_versions():\n\n # Produce the name of the 'transposed' layout, given the dimension\n def transposed_layout(ndim):\n if ndim < 3 or ndim > 5:\n raise RuntimeError(\"Invalid data dim, expecting 3, 4 or 5\")\n return ('NWC', 'NHWC', 'NDHWC')[ndim-3]\n\n # default padding is all zeros\n def is_default_pad(pad):\n return pad == (0,) * len(pad)\n\n # default stride is all ones\n def is_default_stride(stride):\n return stride == (1,) * len(stride)\n\n # returns True/False randomly with equal probability\n def random_choice():\n return np.random.random(1)[0] < 0.5\n\n def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,\n pooling_convention='valid', global_pool=False, p_value=2,\n count_include_pad=True, tol=None, dtype=np.float32):\n ctx_list = []\n sym_list = []\n for pool_ctx in pool_op_list:\n (pool_op, ctx_type) = pool_ctx.rsplit('_', 1)\n expected_ctxs = ['cpu', 'gpu', 'cudnn']\n if ctx_type not in expected_ctxs:\n raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))\n ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)\n ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})\n # start with pool args present in all cases\n pool_op_args = {'kernel': kernel, 'pool_type': pool_type,\n 'pooling_convention' : pooling_convention, 'name' : 'pool'}\n # add other args as needed\n if global_pool:\n pool_op_args['global_pool'] = True\n else:\n # Add pad and stride param if needed, plus randomly when it matches the default\n if not is_default_pad(pad) or random_choice():\n pool_op_args.update({'pad' : pad})\n if not is_default_stride(stride) or random_choice():\n pool_op_args.update({'stride' : stride})\n\n expected_pool_ops = ['pool', 'pool_transposed']\n pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})\n if ctx_type != 'cpu':\n pool_op_args['cudnn_off'] = ctx_type == 'gpu'\n if pool_op == 'pool':\n # isolate pooling input from symbol input to test shared tensor optimizations\n buffered_input = mx.sym.identity(name='pool')\n sym = mx.sym.Pooling(buffered_input, **pool_op_args)\n elif pool_op == 'pool_transposed':\n ndim = len(data)\n # NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);\n axes = (0,) + tuple(range(2,ndim)) + (1,)\n transposed = mx.sym.transpose(axes=axes, name='pool')\n pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),\n **pool_op_args)\n # NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);\n axes = (0, ndim-1) + tuple(range(1,ndim-1))\n sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')\n else:\n raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,\n pool_op))\n sym_list.append(sym)\n\n check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), rtol=tol, atol=tol)\n\n def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,\n tol=None):\n if dim == '1D':\n data = (3, 3, 10)\n kernels = [(4,), (4,), (5,)]\n pads = [(0,), (2,), (2,)]\n strides = [(1,), (2,), (1,)]\n elif dim == '2D_no_padding':\n data = (3, 2, 20, 20)\n kernels = [(3, 3), (4, 5)]\n pads = [(0, 0), (0, 0)]\n strides = [(1, 1), (2, 1)]\n elif dim == '2D':\n data = (2, 2, 20, 20)\n kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]\n pads = [(0, 0), (1, 2), (0, 0), (2, 3)]\n strides = [(1, 1), (1, 1), (2, 1), (1, 1)]\n elif dim == '3D':\n data = (2, 3, 20, 20, 20)\n kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]\n pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]\n strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]\n else:\n raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))\n\n for kernel, pad, stride in zip(kernels, pads, strides):\n for pooling_convention in ['valid', 'full']:\n try:\n test_pooling_versions_helper(pool_op_list=pool_op_list,\n data=data, kernel=kernel, pad=pad, stride=stride,\n pool_type=pool_type, pooling_convention=pooling_convention,\n global_pool=False, p_value=p_value,\n count_include_pad=count_include_pad, tol=tol, dtype=dtype)\n except:\n print('pool_op_list = {}'.format(pool_op_list))\n print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))\n print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,\n pooling_convention))\n print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,\n count_include_pad, dtype))\n print('environ = \\n{}'.format(os.environ))\n raise\n\n # Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value\n kernel = kernels[0]\n if random_choice():\n kernel = (300,) * len(kernel)\n\n test_pooling_versions_helper(pool_op_list=pool_op_list,\n data=data, kernel=kernel, pad=None, stride=None,\n pool_type=pool_type, global_pool=True, p_value=p_value,\n count_include_pad=count_include_pad, tol=tol, dtype=dtype)\n\n # The various implementations of the standard pooling operator\n std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',\n 'pool_gpu', 'pool_transposed_gpu',\n 'pool_cudnn', 'pool_transposed_cudnn']\n\n for dtype in [np.float32, np.float64, np.float16]:\n # Testing of the standard (not 'v1') pooling operator is universal across all\n # data dimensions, implementations and layouts.\n for dim in ['1D', '2D', '3D']:\n test_pooling_dim(dim, 'max', dtype, std_pool_op_list)\n test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)\n test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)\n test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)\n test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)\n test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)\n test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)\n\n\ndef test_pooling_full_2d():\n def test_pooling_full_2d_type(pool_type):\n data = (2, 2, 10, 10)\n kernel = (4, 5)\n pad = (1, 2)\n stride = (3, 4)\n\n convention = 'full'\n ctx_list = []\n sym_list = []\n\n # o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4\n # o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=convention, global_pool=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=convention, global_pool=False, name='pool'))\n\n check_consistency(sym_list, ctx_list)\n\n test_pooling_full_2d_type('max')\n test_pooling_full_2d_type('avg')\n test_pooling_full_2d_type('sum')\n\n\[email protected]\ndef test_flatten_slice_after_conv():\n ctx_list = []\n\n data = mx.sym.Variable('conv_data')\n conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))\n flatten = mx.symbol.flatten(data=conv)\n slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)\n\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]\n check_consistency(slice_sym, ctx_list, scale=0.5)\n\n\ndef test_bilinear_resize_op():\n ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},\n {'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]\n\n data = mx.sym.Variable('data')\n sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)\n check_consistency(sym, ctx_list)\n\[email protected]\ndef test_global_pooling():\n def test_1d_pooling(pool_type, p_value=2):\n data = (2, 3, 20)\n kernel = (4,)\n pad = (2,)\n stride = (2,)\n\n ctx_list = []\n sym_list = []\n\n pooling_convention = 'valid'\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))\n\n check_consistency(sym_list, ctx_list)\n\n def test_2d_pooling(pool_type, p_value=2):\n data = (2, 3, 20, 20)\n kernel = (4, 4)\n pad = (2, 2)\n stride = (2, 2)\n\n ctx_list = []\n sym_list = []\n\n pooling_convention = 'valid'\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))\n\n\n check_consistency(sym_list, ctx_list)\n\n test_1d_pooling('max')\n test_1d_pooling('avg')\n test_1d_pooling('sum')\n test_1d_pooling('lp', p_value=1)\n test_1d_pooling('lp', p_value=2)\n test_1d_pooling('lp', p_value=3)\n\n test_2d_pooling('max')\n test_2d_pooling('avg')\n test_2d_pooling('sum')\n test_2d_pooling('lp', p_value=1)\n test_2d_pooling('lp', p_value=2)\n test_2d_pooling('lp', p_value=3)\n\n\ndef test_upsampling_with_type():\n sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)\n ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},\n {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},\n {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},\n {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},\n {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_upsampling_bilinear_with_type():\n sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)\n ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},\n {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},\n {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},\n {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},\n {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_concat_with_type():\n sym = mx.sym.Concat(name='concat', num_args=2)\n ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_elementwisesum_with_type():\n dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],\n [mx.cpu(0), [np.float64, np.float32]] ]\n for num_args in range(1, 6):\n ews_arg_shape = {}\n for i in range(num_args):\n ews_arg_shape['ews_arg'+str(i)] = (2, 10)\n sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)\n ctx_list = []\n for dev, types in dev_types:\n for dtype in types:\n ews_arg_dtype = {'type_dict':{}}\n for i in range(num_args):\n ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype\n ctx_elem = {'ctx': dev}\n ctx_elem.update(ews_arg_shape)\n ctx_elem.update(ews_arg_dtype)\n ctx_list.append(ctx_elem)\n check_consistency(sym, ctx_list)\n\n\ndef test_reshape_with_type():\n sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))\n ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},\n {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},\n {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},\n {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},\n {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_blockgrad_with_type():\n sym = mx.sym.BlockGrad(name='bg')\n ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},\n {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},\n {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},\n {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},\n {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_swapaxis_with_type():\n sym = mx.sym.SwapAxis(name='swap', dim1=1)\n ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},\n {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},\n {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},\n {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},\n {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_fullyconnected_with_type():\n sym = mx.sym.FullyConnected(num_hidden=3, name='inner')\n ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},\n {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},\n {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},\n {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},\n {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]\n check_consistency(sym, ctx_list)\n # Sizes are divisible by 8 to test TensorCore on Volta GPU.\n sym = mx.sym.FullyConnected(num_hidden=8, name='inner')\n ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},\n {'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_activation_with_type():\n act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']\n shape = (2, 2, 10, 10)\n for act_type in act_types:\n sym = mx.sym.Activation(name='act', act_type=act_type)\n ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},\n {'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},\n {'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},\n {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},\n {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},\n {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]\n check_consistency(sym, ctx_list)\n\n\ndef test_lrn():\n sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')\n ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},\n {'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\[email protected](os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',\n reason=\"Testing with naive engine consistently triggers illegal memory access. Tracked in #17713\")\ndef test_embedding_with_type():\n def test_embedding_helper(data_types, weight_types, low_pad, high_pad):\n NVD = [[20, 10, 20], [200, 10, 300], [10000, 4, 20]]\n for safe_accumulation in ['0', '1', None]:\n for N, V, D in NVD:\n with environment('MXNET_SAFE_ACCUMULATION', safe_accumulation):\n if N > 1000 and safe_accumulation != '1':\n break\n sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)\n ctx_list = []\n for data_type in data_types:\n for weight_type in weight_types:\n ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),\n 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})\n ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),\n 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})\n arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}\n check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},\n arg_params=arg_params, scale=0.1)\n\n data_types = [np.float16, np.float32, np.float64, np.int32]\n weight_types = [np.float16, np.float32, np.float64]\n test_embedding_helper(data_types, weight_types, 5, 5)\n data_types = [np.uint8]\n weight_types = [np.float16, np.float32, np.float64]\n test_embedding_helper(data_types, weight_types, 0, 5)\n\n\ndef test_take_with_type():\n sym = mx.sym.take(name='take')\n for safe_accumulation in ['0', '1', None]:\n for data_ndim in range(2, 5):\n for idx_ndim in range(1, 4):\n data_shape = ()\n for _ in range(data_ndim):\n data_shape += (np.random.randint(low=3, high=6), )\n idx_shape = ()\n for _ in range(idx_ndim):\n idx_shape += (np.random.randint(low=3, high=5), )\n ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float64,\n 'take_a': np.float64}},\n {'ctx': mx.gpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float32,\n 'take_a': np.float32}},\n {'ctx': mx.gpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float16,\n 'take_a': np.float16}},\n {'ctx': mx.cpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float64,\n 'take_a': np.float64}},\n {'ctx': mx.cpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float32,\n 'take_a': np.float32}},\n {'ctx': mx.cpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float16,\n 'take_a': np.float16}}]\n arg_params = {'take_indices': np.random.randint(low=0,\n high=data_shape[0],\n size=idx_shape),\n 'take_a': np.random.normal(size=data_shape)}\n with environment('MXNET_SAFE_ACCUMULATION', safe_accumulation):\n check_consistency(sym, ctx_list,\n grad_req={'take_indices': 'null',\n 'take_a': 'write'},\n arg_params=arg_params)\n\n # check a large num of indices: may underflow calculating gradient in FP16,\n # if MXNET_SAFE_ACCUMULATION is not activated\n with environment('MXNET_SAFE_ACCUMULATION', '1'):\n data_size = 4\n indices_size = 10000\n out_dim = 20\n data_types = [np.float16, np.float32, np.float64]\n indices_types = [np.float16, np.float32, np.float64, np.int32]\n # axis 0\n sym = mx.sym.take(name='take', axis=0)\n ctx_list = []\n for data_type in data_types:\n for index_type in indices_types:\n ctx_list.append({'ctx': mx.cpu(0), 'take_indices': (indices_size,),\n 'take_a': (data_size, out_dim),\n 'type_dict': {'take_indices': index_type, 'take_a': data_type}})\n ctx_list.append({'ctx': mx.gpu(0), 'take_indices': (indices_size,),\n 'take_a': (data_size, out_dim),\n 'type_dict': {'take_indices': index_type, 'take_a': data_type}})\n arg_params = {'take_indices': np.random.randint(0, data_size,\n size=(indices_size,)),\n 'take_a': np.random.normal(size=(data_size, out_dim))}\n check_consistency(sym, ctx_list,\n grad_req={'take_indices': 'null','take_a': 'write'},\n arg_params=arg_params)\n # axis 1\n sym = mx.sym.take(name='take', axis=1)\n ctx_list = []\n for data_type in data_types:\n for index_type in indices_types:\n ctx_list.append({'ctx': mx.cpu(0), 'take_indices': (indices_size,),\n 'take_a': (data_size, out_dim),\n 'type_dict': {'take_indices': index_type, 'take_a': data_type}})\n ctx_list.append({'ctx': mx.gpu(0), 'take_indices': (indices_size,),\n 'take_a': (data_size, out_dim),\n 'type_dict': {'take_indices': index_type, 'take_a': data_type}})\n arg_params = {'take_indices': np.random.randint(0, data_size,\n size=(indices_size,)),\n 'take_a': np.random.normal(size=(data_size, out_dim))}\n check_consistency(sym, ctx_list,\n grad_req={'take_indices': 'null','take_a': 'write'},\n arg_params=arg_params)\n\[email protected]\ndef test_psroipooling_with_type():\n arg_params = {\n 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}\n\n # plain psroipooling\n sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')\n ctx_list = [{'ctx': mx.gpu(0),\n 'psroipool_data': (1, 18, 14, 14),\n 'psroipool_rois': (2, 5),\n 'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},\n {'ctx': mx.gpu(0),\n 'psroipool_data': (1, 18, 14, 14),\n 'psroipool_rois': (2, 5),\n 'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},\n {'ctx': mx.gpu(0),\n 'psroipool_data': (1, 18, 14, 14),\n 'psroipool_rois': (2, 5),\n 'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},\n ]\n\n check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',\n 'psroipool_rois': 'null'}, arg_params=arg_params)\n\n\[email protected]\ndef test_deformable_psroipooling_with_type():\n tol = {np.dtype(np.float32): 1e-1,\n np.dtype(np.float64): 1e-3,\n np.dtype(np.float16): 1e-2}\n\n arg_params = {\n 'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}\n\n # deformable psroipooling\n sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,\n output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')\n\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,\n 'deformable_psroipool_trans': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,\n 'deformable_psroipool_trans': np.float32}},\n {'ctx': mx.gpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,\n 'deformable_psroipool_trans': np.float16}},\n {'ctx': mx.cpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,\n 'deformable_psroipool_trans': np.float64}},\n {'ctx': mx.cpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,\n 'deformable_psroipool_trans': np.float32}},\n {'ctx': mx.cpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,\n 'deformable_psroipool_trans': np.float16}},\n ]\n\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol,\n grad_req={'deformable_psroipool_data': 'write',\n 'deformable_psroipool_rois': 'null',\n 'deformable_psroipool_trans': 'write'}, arg_params=arg_params)\n\n\[email protected]\ndef test_deformable_convolution_with_type():\n tol = {np.dtype(np.float32): 1e-1,\n np.dtype(np.float64): 1e-3}\n\n sym = mx.sym.npx.deformable_convolution(num_filter=3, kernel=(3,3), name='deformable_conv')\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 10, 10),\n 'deformable_conv_offset': (2, 18, 8, 8),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 10, 10),\n 'deformable_conv_offset': (2, 18, 8, 8),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 10, 10),\n 'deformable_conv_offset': (2, 18, 8, 8),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 10, 10),\n 'deformable_conv_offset': (2, 18, 8, 8),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n ]\n\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)\n # test ability to turn off training on bias\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol,\n grad_req={'deformable_conv_data': 'write',\n 'deformable_conv_offset': 'write',\n 'deformable_conv_weight': 'write',\n 'deformable_conv_bias': 'null'})\n\n\ndef test_deformable_convolution_options():\n tol = {np.dtype(np.float32): 1e-1,\n np.dtype(np.float64): 1e-3}\n # 2D convolution\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n\n # Pad > 0\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 7, 7),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 7, 7),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 7, 7),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 7, 7),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n ]\n sym = mx.sym.npx.deformable_convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)\n\n # Stride > 1\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n ]\n sym = mx.sym.npx.deformable_convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)\n\n # Dilate > 1\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n ]\n sym = mx.sym.npx.deformable_convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)\n\n # Deformable group > 1\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 36, 5, 5),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 36, 5, 5),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 36, 5, 5),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.cpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 36, 5, 5),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n ]\n sym = mx.sym.npx.deformable_convolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')\n check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)\n\n\ndef check_rnn_layer(layer):\n layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n with mx.gpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n # atol of 1e-6 required, as exposed by seed 2124685726\n assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)\n\ndef check_rnn_layer_w_rand_inputs(layer):\n layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n x = mx.nd.uniform(shape=(10, 16, 30))\n with mx.gpu(0):\n x = x.copyto(mx.gpu(0))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = x.copyto(mx.cpu(0))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)\n\[email protected]\ndef test_sequence_reverse():\n check_sequence_reverse(mx.gpu(0))\n\n\[email protected]\ndef test_autograd_save_memory():\n x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))\n x.attach_grad()\n\n with mx.autograd.record():\n for i in range(200):\n x = x + 1\n x.wait_to_read()\n x.backward()\n\n\[email protected]\ndef test_cuda_rtc():\n ctx = mx.gpu(0)\n source = r'''\n extern \"C\" __global__ void axpy(const float *x, float *y, float alpha) {\n int i = threadIdx.x + blockIdx.x * blockDim.x;\n y[i] += alpha * x[i];\n }\n\n extern \"C\" __global__ void saxpy(const float *x, float *y, float alpha) {\n extern __shared__ float smem[];\n int i = threadIdx.x + blockIdx.x * blockDim.x;\n smem[threadIdx.x] = x[i];\n y[i] += alpha * smem[threadIdx.x];\n }\n '''\n\n compile_opts = get_rtc_compile_opts(ctx)\n module = mx.rtc.CudaModule(source, options=compile_opts)\n axpy = module.get_kernel(\"axpy\", \"const float *x, float *y, float alpha\")\n x = mx.nd.ones((10,), ctx=ctx)\n y = mx.nd.zeros((10,), ctx=ctx)\n axpy.launch([x, y, 3.0], ctx, (1, 1, 1), (10, 1, 1))\n assert (y.asnumpy() == 3).all()\n\n saxpy = module.get_kernel(\"saxpy\", \"const float *x, float *y, float alpha\")\n saxpy.launch([x, y, 4.0], ctx, (1, 1, 1), (10, 1, 1), 10)\n assert (y.asnumpy() == 7).all()\n\n saxpy.launch([x, y, 5.0], ctx, (2, 1, 1), (5, 1, 1), 5)\n assert (y.asnumpy() == 12).all()\n\n\[email protected]\ndef test_cross_device_autograd():\n x = mx.nd.random.uniform(shape=(10,))\n x.attach_grad()\n\n with mx.autograd.record():\n y = mx.nd.tanh(x)\n y = y.copyto(mx.gpu(0))\n y = mx.nd.tanh(y)\n y = y.copyto(mx.cpu(0))\n y = mx.nd.tanh(y)\n y = y.copyto(mx.gpu(0))\n y = y.copyto(mx.gpu(0))\n\n y.backward()\n\n dx = x.grad.copy()\n x.grad[:] = 0\n\n with mx.autograd.record():\n y = x\n for i in range(3):\n y = mx.nd.tanh(y)\n y.backward()\n\n assert_almost_equal(dx, x.grad)\n\[email protected]\ndef test_multi_proposal_op():\n # paramters\n feature_stride = 16\n scales = (8, 16, 32)\n ratios = (0.5, 1, 2)\n rpn_pre_nms_top_n = 12000\n rpn_post_nms_top_n = 2000\n rpn_min_size = feature_stride\n\n feat_len = (1000 + 15) // 16\n H, W = feat_len, feat_len\n num_anchors = len(scales) * len(ratios)\n count_anchors = H * W * num_anchors\n\n def get_new_data(batch_size, ctx):\n '''\n cls_prob: (batch_size, 2 * num_anchors, H, W)\n bbox_pred: (batch_size, 4 * num_anchors, H, W)\n im_info: (batch_size, 3)\n '''\n\n dtype = np.float32\n cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)\n bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)\n im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)\n\n cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]\n np.random.shuffle(cls)\n cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)\n bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)\n\n for i in range(batch_size):\n im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))\n im_scale = np.random.randint(80, 100) / 100.0\n im_info[i, :] = [im_size[0], im_size[1], im_scale]\n return cls_prob, bbox_pred, im_info\n\n def check_proposal_consistency(op, batch_size, with_nms=False):\n '''\n op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal\n '''\n cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))\n rois_cpu, score_cpu = op(\n cls_prob = cls_prob,\n bbox_pred = bbox_pred,\n im_info = im_info,\n feature_stride = feature_stride,\n scales = scales,\n ratios = ratios,\n rpn_pre_nms_top_n = rpn_pre_nms_top_n,\n rpn_post_nms_top_n = rpn_post_nms_top_n,\n threshold = 0.7 if with_nms else 1.0,\n rpn_min_size = rpn_min_size, output_score = True)\n\n gpu_ctx = mx.gpu(0)\n\n # copy data to gpu from cpu\n cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)\n bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)\n im_info_gpu = im_info.as_in_context(gpu_ctx)\n\n rois_gpu, score_gpu = op(\n cls_prob = cls_prob_gpu,\n bbox_pred = bbox_pred_gpu,\n im_info = im_info_gpu,\n feature_stride = feature_stride,\n scales = scales,\n ratios = ratios,\n rpn_pre_nms_top_n = rpn_pre_nms_top_n,\n rpn_post_nms_top_n = rpn_post_nms_top_n,\n threshold = 0.7 if with_nms else 1.0,\n rpn_min_size = rpn_min_size, output_score = True)\n\n rois_cpu_np = rois_cpu.asnumpy()\n rois_gpu_np = rois_gpu.asnumpy()\n\n score_cpu_np = score_cpu.asnumpy()\n score_gpu_np = score_gpu.asnumpy()\n\n if not with_nms:\n assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)\n assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)\n else:\n # no 100% gurantee with nms\n assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)\n assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)\n\n check_proposal_consistency(mx.nd.contrib.Proposal, 1)\n check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)\n check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)\n check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)\n\n\n# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.\ndef kernel_error_check_imperative():\n with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'):\n with mx.np_shape(active=True):\n a = mx.nd.array([1,2,3],ctx=mx.gpu(0))\n b = mx.nd.array([],ctx=mx.gpu(0))\n c = (a / b).asnumpy()\n\ndef kernel_error_check_symbolic():\n with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'):\n with mx.np_shape(active=True):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n c = a / b\n f = c.bind(mx.gpu(0), {'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),\n 'b':mx.nd.array([],ctx=mx.gpu(0))})\n f.forward()\n g = f.outputs[0].asnumpy()\n\[email protected]\ndef test_kernel_error_checking():\n # Running tests that may throw exceptions out of worker threads will stop CI testing\n # if not run in a separate process (with its own address space for CUDA compatibility).\n try:\n mpctx = mp.get_context('spawn')\n except:\n print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %\n sys.version_info[0:2], file=sys.stderr, end='')\n else:\n with discard_stderr():\n for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:\n p = mpctx.Process(target=f)\n p.start()\n p.join()\n assert p.exitcode != 0,\\\n \"Expected a synchronous kernel error from %s(), none seen.\" % f.__name__\n\ndef test_incorrect_gpu():\n # Try setting dev_id to a really big number\n pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))\n\ndef test_batchnorm_backwards_notrain():\n for ctx in [mx.cpu(0), mx.gpu(0)]:\n for cudnn_o in [False, True]:\n B,C,H,W = 4,3,2,2\n x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)\n gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n x.attach_grad()\n\n with autograd.record(False):\n y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),\n fix_gamma=False, cudnn_off=cudnn_o)\n loss=y.square().sum()\n loss.backward(train_mode=False)\n\ndef test_create_sparse_ndarray_gpu_to_cpu():\n dim0 = 10\n dim1 = 5\n densities = [0, 0.5, 1]\n for density in densities:\n shape = rand_shape_2d(dim0, dim1)\n matrix = rand_ndarray(shape, 'row_sparse', density)\n data = matrix.data\n indices = matrix.indices\n rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())\n assert rsp_created.stype == 'row_sparse'\n assert same(rsp_created.data.asnumpy(), data.asnumpy())\n assert same(rsp_created.indices.asnumpy(), indices.asnumpy())\n rsp_copy = mx.nd.array(rsp_created)\n assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))\n\n\ndef test_softmax_activation():\n gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],\n [2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))\n cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],\n [2., -.4, 7., 3., 0.2]], ctx=mx.cpu())\n\n cpu_a.attach_grad()\n gpu_a.attach_grad()\n with mx.autograd.record():\n gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)\n cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)\n assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)\n\n gpu_y.backward()\n cpu_y.backward()\n assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)\n\n\[email protected]\[email protected]\ndef test_bilinear_sampler_versions():\n data = mx.sym.Variable('data')\n grid = mx.sym.Variable('grid')\n sym1 = mx.sym.BilinearSampler(data=data, grid=grid)\n sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)\n sym3 = mx.sym.BilinearSampler(data=data, grid=grid)\n\n test_cases = [[(1,3,15,16),(1,2,10,10)],\n [(1,6,7,16),(1,2,10,4)],\n [(1,7,3,16),(1,2,8,11)],\n [(1,9,50,50),(1,2,50,50)]]\n\n for item in test_cases:\n data_shape, grid_shape = item\n # kWriteTo\n exe_cpu = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')\n exe_gpu = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')\n exe_cudnn = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')\n exe_list = [exe_cpu, exe_gpu, exe_cudnn]\n ref_idx = 0\n test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)\n test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)\n for exe in exe_list:\n exe.arg_dict['data'][:] = test_data\n exe.arg_dict['grid'][:] = test_grid\n exe.forward(is_train=True)\n mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)\n\n out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)\n for exe in exe_list:\n exe.backward(mx.nd.array(out_grad))\n assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)\n assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)\n\n data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()\n grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()\n\n # kAddTo\n exe_cpu_addto = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')\n exe_gpu_addto = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')\n exe_cudnn_addto = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')\n exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]\n data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)\n grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)\n for exe in exe_list:\n exe.arg_dict['data'][:] = test_data\n exe.arg_dict['grid'][:] = test_grid\n exe.grad_dict['data'][:] = data_initial_grad\n exe.grad_dict['grid'][:] = grid_initial_grad\n exe.forward(is_train=True)\n exe.backward(mx.nd.array(out_grad))\n assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)\n assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)\n assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)\n assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)\n\n for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:\n # Mixture of kWriteTo and kNullOp\n exe_cpu_mix = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)\n exe_gpu_mix = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)\n exe_cudnn_mix = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)\n exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]\n for exe in exe_list:\n exe.arg_dict['data'][:] = test_data\n exe.arg_dict['grid'][:] = test_grid\n exe.forward(is_train=True)\n exe.backward(mx.nd.array(out_grad))\n if req_dict['data'] is 'write':\n assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)\n if req_dict['grid'] is 'write':\n assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)\n\n\n# isolated execution bulking test function to be invoked with different env var settings\ndef _test_bulking_in_process(seed, time_per_iteration):\n data_shape = (10,)\n num_ops = 1000\n num_iterations = 20\n\n ctx = default_context()\n # build symbol\n X = mx.sym.Variable('X')\n sym = mx.sym.flip(X, axis=0)\n for _ in range(num_ops-1):\n sym = mx.sym.flip(sym, axis=0)\n x = mx.ndarray.zeros(data_shape)\n dx = mx.ndarray.zeros(data_shape)\n dy = mx.ndarray.ones(data_shape)\n exe = sym._bind(ctx=ctx, args=[x], args_grad = {'X':dx})\n\n # time a number of forward() and backward() executions after some warm-up iterations\n warmups = 1\n for i in range(num_iterations+warmups):\n if i == warmups:\n start = time.time()\n exe.forward(is_train=True)\n exe.backward(dy)\n dx.wait_to_read()\n time_per_iteration.value = (time.time() - start) / num_iterations\n\n\[email protected](reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')\ndef test_bulking_operator_gpu():\n _test_bulking(_test_bulking_in_process)\n\n\[email protected](reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')\ndef test_bulking():\n # test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)\n test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]\n times = {}\n times_str = ''\n for seg_sizes in test_cases:\n # Create shared variable to return measured time from test process\n time_per_iteration = mp.Manager().Value('d', 0.0)\n if not run_in_spawned_process(_test_bulking_in_process,\n {'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : str(seg_sizes[0]),\n 'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : str(seg_sizes[1]),\n 'MXNET_EXEC_BULK_EXEC_TRAIN' : str(seg_sizes[2])},\n time_per_iteration):\n # skip test since the python version can't run it properly. Warning msg was logged.\n return\n times[seg_sizes] = time_per_iteration.value\n times_str += \\\n '\\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\\t{:.1f} msec'.format(\n seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])\n\n fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])\n slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])\n fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])\n fully_bulked_time = times[(15,15,True)]\n\n print(times_str)\n # Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,\n # slower than both half-bulked times[0,15,True] and times[15,0,True]\n assert slowest_half_bulked_time < fastest_non_bulked_time, \\\n 'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \\\n .format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)\n # The fully bulked times[15,15,True] should be faster than both half-bulked runs\n assert fully_bulked_time < fastest_half_bulked_time, \\\n 'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \\\n .format(fully_bulked_time - fastest_half_bulked_time, times_str)\n\n\[email protected]\ndef test_allclose_function_gpu():\n allclose_function([mx.cpu(), mx.gpu(0)])\n\ndef test_context_num_gpus():\n # Test that num_gpus reports at least one GPU, as the test is run on a GPU host.\n assert mx.context.num_gpus() > 0\n\ndef math_log(shape, dtype, check_value):\n np_x = np.random.rand(*tuple(shape))\n x = mx.nd.array(np_x, dtype=dtype)\n y = mx.nd.log(data=x)\n if check_value:\n x_ = x.as_in_context(mx.cpu())\n y_ = mx.nd.log(data=x_)\n assert_almost_equal(y.asnumpy(), y_.asnumpy())\n\ndef math_erf(shape, dtype, check_value):\n np_x = np.random.rand(*tuple(shape))\n x = mx.nd.array(np_x, dtype=dtype)\n y = mx.nd.erf(data=x)\n if check_value:\n x_ = x.as_in_context(mx.cpu())\n y_ = mx.nd.erf(data=x_)\n assert_almost_equal(y.asnumpy(), y_.asnumpy())\n\ndef math_square(shape, dtype, check_value):\n np_x = np.random.rand(*tuple(shape))\n x = mx.nd.array(np_x, dtype=dtype)\n y = mx.nd.square(data=x)\n if check_value:\n x_ = x.as_in_context(mx.cpu())\n y_ = mx.nd.square(data=x_)\n assert_almost_equal(y.asnumpy(), y_.asnumpy())\n\ndef run_math(op, shape, dtype=\"float32\", check_value=True):\n run_num = 10\n for i in range(run_num):\n if op == 'log':\n math_log(shape=shape, dtype=dtype, check_value=check_value)\n elif op == 'erf':\n math_erf(shape=shape, dtype=dtype, check_value=check_value)\n elif op == 'square':\n math_square(shape=shape, dtype=dtype, check_value=check_value)\n\[email protected]\ndef test_math():\n ops = ['log', 'erf', 'square']\n check_value= True\n shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]\n dtypes = [\"float32\", \"float64\"]\n for shape in shape_lst:\n for dtype in dtypes:\n for op in ops:\n run_math(op, shape, dtype, check_value=check_value)\n\[email protected]\ndef test_arange_like_dtype():\n dtypes = [np.float16, np.float32, np.float64]\n\n for t in dtypes:\n x = mx.sym.Variable('x', dtype=t)\n y = mx.sym.reshape(x, shape=(0, 0, -1))\n z = mx.sym.contrib.arange_like(y, axis=-1)\n\n mod = z._simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')\n mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)\n out = mod.forward(is_train=False)\n for v in out:\n assert v.dtype == t\n\n\ndef test_fp16_spmm():\n inp = mxsps.csr_matrix(sps.coo_matrix(([2.0], ([150], [100000]))).tocsr())\n inp = inp.astype('float16', copy=False)\n weight = mx.nd.random.randn(100001, 151)\n weight = weight.astype('float16', copy=False)\n out = mxsps.dot(inp, weight)\n out_np = mx.nd.dot(inp, weight)\n assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5)\n\[email protected]\[email protected]('dtype', [\"float16\", \"float32\", \"float64\"])\ndef test_split_v2_fwd(dtype):\n dim = random.randint(2, 9)\n shape = rand_shape_nd(dim)\n axis = random.randint(-dim, dim-1)\n axis_size = shape[axis]\n samples = random.randint(0, axis_size - 1)\n indices = sorted(random.sample([i for i in range(1, axis_size)], samples))\n indices = tuple(indices)\n mx_data = rand_ndarray(shape, dtype=dtype)\n np_data = mx_data.asnumpy()\n np_out = np.split(np_data, indices_or_sections=indices, axis=axis)\n data = mx.sym.Variable(\"data\")\n sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)\n check_symbolic_forward(sym, {\"data\": mx_data}, np_out, rtol=1e-3, atol=1e-5)\n"
]
| [
[
"numpy.random.rand",
"numpy.multiply",
"numpy.fft.fft",
"numpy.random.random",
"numpy.dtype",
"numpy.random.normal",
"numpy.arange",
"numpy.random.randint",
"numpy.append",
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"numpy.random.shuffle",
"numpy.fft.ifft",
"numpy.ones",
"numpy.split",
"numpy.random.uniform",
"numpy.abs"
]
]
|
vasudha921/PythonClass9 | [
"654e826f89ac52f973ae5ec5e1de4d96c4e17d35"
]
| [
"graphs/class1.py"
]
| [
"import pandas as pd\nimport csv\nimport plotly.express as px\ndf = pd.read_csv(\"class1.csv\")\nfig = px.scatter(df, x = \"Student Number\", y = \"Marks\", title = \"Class 1 marks\")\n\nwith open(\"class1.csv\", newline= \"\") as f:\n reader = csv.reader(f)\n file_data = list(reader)\n\nfile_data.pop(0)\nnewData = []\ntotalmarks = 0\nfor i in file_data:\n totalmarks += float(i[1])\n\nn = len(file_data)\n \n\n \n \n\nmean = totalmarks/n\nprint(mean) \n\nfig.update_layout(shapes= [\n dict(\n type = 'line', y0 = mean, y1 = mean, x0 = 0, x1 = n\n )\n])\nfig.update_yaxes(rangemode = \"tozero\")\nfig.show()"
]
| [
[
"pandas.read_csv"
]
]
|
MSWagner/Flower-Classifier-Pytorch | [
"0bf79c7263d2f1edd6b1ef2e0ae773d2e6890306"
]
| [
"predict.py"
]
| [
"# PROGRAMMER: Michael Wagner\n# DATE CREATED: 08.04.2020 \n# PURPOSE: Predicts a flower class.\n#\n# Use argparse Expected Call with <> indicating expected user input:\n# python predict.py <path to image> <path to checkpoint> --top_k <k most likely classes> --category_names <JSON path to map categories to real names> -g\n# python predict.py flowers/test/1/image_06764.jpg checkpoints_test/checkpoint_best_accuracy.pth\n##\n\nimport torch\nfrom torchvision import transforms\nimport torch.nn.functional as F\n\nimport numpy as np\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimport json\n\nfrom helper import get_predict_input_args, load_checkpoint, build_model\n\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n opened_img = Image.open(image)\n \n img_transforms = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ])\n \n return img_transforms(opened_img)\n\ndef predict(image, model, topk, useGPU=True):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n \n # Use GPU if it's available\n device = torch.device(\"cuda\" if torch.cuda.is_available() and useGPU else \"cpu\")\n print(f\"Device: {device}\")\n \n model.eval()\n model.to(device);\n \n image = image.unsqueeze_(0)\n \n with torch.no_grad():\n inputs = image.to(device)\n output = model.forward(inputs)\n probability = F.softmax(output.data,dim=1)\n \n return probability.topk(topk)\n\ndef main():\n input_args = get_predict_input_args()\n \n # Load checkpoint\n checkpoint, validation_accuracy = load_checkpoint(input_args.checkpoint_path)\n \n useGPU = input_args.gpu is not None\n \n # Build model\n model = build_model(checkpoint[\"arch\"],\n checkpoint[\"hidden_units_01\"], \n checkpoint[\"hidden_units_02\"], \n checkpoint)\n\n # Process image\n processed_image = process_image(input_args.image_path)\n \n # Predict topK\n topk = predict(processed_image, model, input_args.top_k, useGPU)\n \n # Show result\n with open(input_args.category_names_path, 'r') as f:\n cat_to_name = json.load(f)\n \n probs = topk[0][0].cpu().numpy()\n categories = [cat_to_name[str(category_index+1)] for category_index in topk[1][0].cpu().numpy()]\n \n for i in range(len(probs)):\n print(\"TopK {}, Probability: {}, Category: {}\\n\".format(i+1, probs[i], categories[i]))\n \nif __name__ == '__main__':\n main()"
]
| [
[
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.functional.softmax"
]
]
|
Shaunimsorry/MLFPL | [
"32927be4d141de67e457eac317cfa6ea02757046"
]
| [
"bin/roiCalculations.py"
]
| [
"#This Script Aims to calculate all the ROI's of all the current players in the EPL FPL Season against all their previous appeareances for ROI calculations.\n\nimport numpy as np\nimport pandas as pd\nimport json\nimport requests as r\n\n#Formatting\npd.set_option('display.max_columns',20)\npd.set_option('display.max_rows',100)\npd.set_option('display.width',320)\nnp.set_printoptions(linewidth=320)\n\n#FPL API\nfplapi_endpoint = 'https://fantasy.premierleague.com/api/bootstrap-static/'\n\n#import historical cleaned data\ncleaned2020Data = pd.read_csv('../data/2020-21/cleaned_players.csv', header=0)\ncleaned2019Data = pd.read_csv('../data/2019-20/cleaned_players.csv')\ncleaned2018Data = pd.read_csv('../data/2018-19/cleaned_players.csv')\ncleaned2017Data = pd.read_csv('../data/2017-18/cleaned_players.csv')\ncleaned2016Data = pd.read_csv('../data/2016-17/cleaned_players.csv', encoding='windows-1252')\nprint ('PlayerData Loaded')\n\n#import 2020 raw player data\nraw2020PlayerData = pd.read_csv('../data/2020-21/players_raw.csv')\n#import 2020 TeamsList\nfpl_teams_data = pd.read_csv('../data/2020-21/teams.csv')\n\n#Lets Setup a list of players\nEPLfpl2020_PlayerList = []\n\n#Load in the previous GWData\ngw_keyword = 'gw1'\ngw_data = pd.read_csv('../data/2020-21/gws/'+str(gw_keyword)+'.csv')\n\n#player_db = [['first_name','second_name','team','2020Season_points','2019Season_points', '2018Season_points', '2017Season_points']]\nplayer_db = [['first_name',\n 'second_name',\n 'team',\n 'position',\n 'this round chance',\n 'next round chance',\n\n '2020Season_points',\n '2020Season_cost',\n '2020Season_roi',\n\n '2019Season_points',\n '2019Season_cost',\n '2019Season_roi',\n\n '2018Season_points',\n '2018Season_cost',\n '2018Season_roi',\n\n '2017Season_points',\n '2017Season_cost',\n '2017Season_roi',\n\n 'average_ROI']]\n\nplayer_tempRowCount = 0\n\n##THis is just a sample:\n# while player_tempRowCount < (cleaned2020Data.shape[0]):\n# #attempt to find the team name\n# playerSecondName = cleaned2020Data.loc[player_tempRowCount].second_name\n# playerTeamId = raw2020PlayerData.loc[raw2020PlayerData.second_name == playerSecondName].team.index[0]\n# playerTeamName = fpl_teams_data.loc[fpl_teams_data.id == raw2020PlayerData.loc[playerTeamId].team].name.index[0]\n# player_team_shortName = (fpl_teams_data.loc[playerTeamName].short_name)\n#\n# #Make sure to keep the shape of this identical to the one above\n# newDatabase.append(\n# [cleaned2020Data.loc[player_tempRowCount].first_name,\n# cleaned2020Data.loc[player_tempRowCount].second_name,\n# player_team_shortName,\n# cleaned2020Data.loc[player_tempRowCount].total_points,\n# cleaned2020Data.loc[player_tempRowCount].now_cost])\n# player_tempRowCount = player_tempRowCount + 1\n# print (\"Finished Setting Up Player List\")\n#\n# #Setupthe Dataframe\n# dataFrame = pd.DataFrame(newDatabase)\n# print (dataFrame)\n\nwhile player_tempRowCount < cleaned2020Data.shape[0]:\n #Code To Decant The Team Code\n player_firstName = cleaned2020Data.loc[player_tempRowCount].first_name\n player_secondName = cleaned2020Data.loc[player_tempRowCount].second_name\n player_index = raw2020PlayerData.loc[raw2020PlayerData.second_name == player_secondName].index[0]\n playerTeamName = fpl_teams_data.loc[fpl_teams_data.id == raw2020PlayerData.loc[player_index].team].name.index[0]\n player_team_shortName = (fpl_teams_data.loc[playerTeamName].short_name)\n\n #Remap the data into the dynamic GWSheet\n playerNameString = str(str(player_firstName+\" \"+str(player_secondName)))\n playerGW_index = gw_data.loc[gw_data.name == playerNameString]\n\n #I noticed some players are in the cleaned2020 sheet but now in the GW sheet must ask why\n\n #Decant Chance of playing COP\n player_thisRoundChance = raw2020PlayerData.loc[player_index].chance_of_playing_this_round\n player_nextRoundChance = raw2020PlayerData.loc[player_index].chance_of_playing_next_round\n\n\n\n\n\n\n\n\n\n #Code To Decant 2019 total points of a player in THIS season\n\n player_2019_index = cleaned2019Data.loc[cleaned2019Data.second_name == player_secondName]\n #Check if this player even played in the 2019 Season and log a value for total_points\n if player_2019_index.empty == False:\n player_2019_index = cleaned2019Data.loc[cleaned2019Data.second_name == player_secondName].index[0]\n player_2019_total_points = cleaned2019Data.loc[player_2019_index].total_points\n player_2019_now_cost = cleaned2019Data.loc[player_2019_index].now_cost\n else:\n player_2019_now_cost = -1\n player_2019_total_points = -1\n\n #Repeat process for 2018\n player_2018_index = cleaned2018Data.loc[cleaned2018Data.second_name == player_secondName]\n if player_2018_index.empty == False:\n player_2018_index = cleaned2018Data.loc[cleaned2018Data.second_name == player_secondName].index[0]\n player_2018_total_points = cleaned2018Data.loc[player_2018_index].total_points\n player_2018_now_cost= cleaned2018Data.loc[player_2018_index].now_cost\n else:\n player_2018_now_cost = -1\n player_2018_total_points = -1\n\n #Repeat process for 2017\n player_2017_index = cleaned2017Data.loc[cleaned2017Data.second_name == player_secondName]\n if player_2017_index.empty == False:\n player_2017_index = cleaned2017Data.loc[cleaned2017Data.second_name == player_secondName].index[0]\n player_2017_total_points = cleaned2017Data.loc[player_2017_index].total_points\n player_2017_now_cost = cleaned2017Data.loc[player_2017_index].now_cost\n else:\n player_2017_now_cost = -1\n player_2017_total_points = -1\n\n #Build data for the average ROI calculation\n #Must change if player did NOT play in previous seasons\n\n player_avg3s_now_cost = 0\n player_avg3s_total_points = 0\n player_avg3s_roi = 0\n\n if player_2017_now_cost != -1:\n player_avg3s_now_cost = player_avg3s_now_cost + player_2017_now_cost\n player_avg3s_total_points = player_avg3s_total_points + player_2017_total_points\n\n if player_2018_now_cost != -1:\n player_avg3s_now_cost = player_avg3s_now_cost + player_2018_now_cost\n player_avg3s_total_points = player_avg3s_total_points + player_2018_total_points\n\n if player_2019_now_cost != -1:\n player_avg3s_now_cost = player_avg3s_now_cost + player_2019_now_cost\n player_avg3s_total_points = player_avg3s_total_points + player_2019_total_points\n\n player_avg3s_now_cost = player_avg3s_now_cost + cleaned2020Data.loc[player_tempRowCount].now_cost\n player_avg3s_total_points = player_avg3s_total_points + cleaned2020Data.loc[player_tempRowCount].total_points\n\n\n\n player_avg3s_roi = player_avg3s_total_points / player_avg3s_now_cost\n\n\n #Making the list before dataframe baking\n player_db.append([\n cleaned2020Data.loc[player_tempRowCount].first_name,\n cleaned2020Data.loc[player_tempRowCount].second_name,\n player_team_shortName,\n cleaned2020Data.loc[player_tempRowCount].element_type,\n player_thisRoundChance,\n player_nextRoundChance,\n\n cleaned2020Data.loc[player_tempRowCount].total_points,\n cleaned2020Data.loc[player_tempRowCount].now_cost,\n #ROI\n cleaned2020Data.loc[player_tempRowCount].total_points / cleaned2020Data.loc[player_tempRowCount].now_cost,\n\n player_2019_total_points,\n player_2019_now_cost,\n #ROI\n player_2019_total_points / player_2019_now_cost,\n\n player_2018_total_points,\n player_2018_now_cost,\n #ROI\n player_2018_total_points / player_2018_now_cost,\n\n player_2017_total_points,\n player_2017_now_cost,\n #ROI\n player_2017_total_points / player_2017_now_cost,\n\n #Average 3Season ROI\n player_avg3s_roi\n\n\n\n ])\n\n player_tempRowCount = player_tempRowCount + 1\nvisual_dataFrame = pd.DataFrame(player_db)\nprint (\"Printing Visual Reference\")\nprint (visual_dataFrame)\n\nplayer_db.remove(player_db[0])\ndataFrame = pd.DataFrame(player_db)\n\n"
]
| [
[
"numpy.set_printoptions",
"pandas.read_csv",
"pandas.set_option",
"pandas.DataFrame"
]
]
|
DionLudjen-13/GAN-TD3 | [
"3d19d28851985f73fb2a9aea9fbf88f4f8f9ee24"
]
| [
"src/implement.py"
]
| [
"import numpy as np\nimport torch\nimport gym\nimport argparse\nimport os\n\nimport utils\nimport TD3\nfrom datetime import datetime\nfrom GenRep import GenerativeReplay\n\n\nif __name__ == \"__main__\":\n\tprint(\"Starting...\")\n\t\n\t# Hyper parameters\n\n\t# General\n\tUSE_GENERATIVE = True\n\tNO_REPLAY = False\n\tRECORD_TRAINING_TIMES = False\n\tENV = \"MountainCarContinuous-v0\"\n\tSTART_TIMESTEPS = 15e3\n\tEND = START_TIMESTEPS + 50e5\n\tEVAL_FREQ = 5e3\n\tMAX_TIMESTEPS = 2e7\n\tSEED = 10\n\tFILE_NAME = \"a\"\n\n\t# TD3 parameters\n\tEXPL_NOISE = 0.1\n\tBATCH_SIZE = 128\n\tDISCOUNT = 0.99\n\tTAU = 0.005\n\tPOLICY_NOISE = 0.2\n\tNOISE_CLIP = 0.5\n\tPOLICY_FREQ = 2\n\n\tprint_interval = 1000\n\n\tprint(f\"Start new process with {ENV} and file name {FILE_NAME}\")\n\n\tif not os.path.exists(\"./results\"):\n\t\tos.makedirs(\"./results\")\n\n\tenv = gym.make(ENV)\n\n\t# Set seeds\n\tenv.seed(SEED)\n\ttorch.manual_seed(SEED)\n\tnp.random.seed(SEED)\n\t\n\t# Some env dimentions\n\tstate_dim = env.observation_space.shape[0]\n\taction_dim = env.action_space.shape[0] \n\tmax_action = float(env.action_space.high[0])\n\n\tprint(state_dim, action_dim, max_action)\n\tprint(env.observation_space, env.action_space)\n\tprint(\"GenerativeReplay: \", USE_GENERATIVE)\n\n\t# Build TD3\n\tkwargs = {\n\t\t\"state_dim\": state_dim,\n\t\t\"action_dim\": action_dim,\n\t\t\"max_action\": max_action,\n\t\t\"discount\": DISCOUNT,\n\t\t\"tau\": TAU,\n\t\t\"policy_noise\": POLICY_NOISE,# * max_action,\n\t\t\"noise_clip\": NOISE_CLIP,# * max_action,\n\t\t\"policy_freq\": POLICY_FREQ\n\t}\n\n\tpolicy = TD3.TD3(**kwargs)\n\n\t# Use optim buffer or not. Type 0 uses episode reward and 1 uses state as comparison\n\tOPTIMAL_BUFFER = True\n\tOPTIMAL_BUFFER_TYPE = 0\n\n\tprint(\"Use Optim Buffer:\", OPTIMAL_BUFFER)\n\tif OPTIMAL_BUFFER:\n\t\tif OPTIMAL_BUFFER_TYPE == 0:\n\t\t\tprint(\"Buffer Type: Episode Reward\")\n\t\telse:\t\t\n\t\t\tprint(\"Buffer Type: State\")\n\n\t# Make the replay component\n\treplay_component = None\n\tif USE_GENERATIVE:\n\t\treplay_component = GenerativeReplay()\n\t\tif OPTIMAL_BUFFER:\n\t\t\treplay_component.use_optim_buffer = True\n\telif NO_REPLAY:\n\t\treplay_component = utils.ReplayBuffer(state_dim, action_dim, 256)\n\t\tOPTIMAL_BUFFER = False\n\telse:\n\t\treplay_component = utils.ReplayBuffer(state_dim, action_dim)\n\t\tOPTIMAL_BUFFER = False\n\t\n\n\tstate, done = env.reset(), False\n\tepisode_reward = 0\n\tepisode_timesteps = 0\n\tepisode_num = 0\n\tmax_state = None\n\tevaluations = []\n\n\tTD3_training = False\n\n\n\t# During the initial exploration of the environment, achieving positive reward is extremely rare\n\t# In order see the capabilities (without having to re-run) of ER in a positive reward scenario this is implemented\n\tguarantee_finish = False\n\tgf_count = 1\n\taction = None\n\tgf = 0\n\tif guarantee_finish:\n\t\taction = [1.0]\n\n\n\t\n\t\t\n\tfor t in range(int(MAX_TIMESTEPS)):\n\t\t# env.render()\n\n\t\tepisode_timesteps += 1\n\n\t\tif t >= END:\n\t\t\traise ValueError\n\n\t\t# Select action randomly or according to policy based on the start timesteps\n\t\tif t < START_TIMESTEPS:\n\t\t\t\n\t\t\tif guarantee_finish:\n\t\t\t\tif state[1] < 0.0001 and action[0] == 1.0:\n\t\t\t\t\taction = np.array([-1.0])\n\t\t\t\telif state[1] > -0.0001 and action[0] == -1.0:\n\t\t\t\t\taction = np.array([1.0])\n\t\t\telse:\n\t\t\t\taction = env.action_space.sample()\n\t\t\tepisode_num = 0\n\t\telse:\n\t\t\tif OPTIMAL_BUFFER and replay_component.highest_reward_state == None:\n\t\t\t\treplay_component.edit_optim_buffer(-10000)\n\t\t\treplay_component.training = True\n\t\t\taction = (\n\t\t\t\tpolicy.select_action(np.array(state))\n\t\t\t\t+ np.random.normal(0, max_action * EXPL_NOISE, size=action_dim)\n\t\t\t).clip(-max_action, max_action)\n\n\t\t# Perform action\n\t\tnext_state, reward, done, _ = env.step(action)\n\n\t\tdone_bool = float(done) if episode_timesteps < env._max_episode_steps else 0\n\n\t\t# Store data in replay component\n\t\tGAN_training = replay_component.add(state, action, next_state, reward, done_bool)\n\t\t\n\t\tif OPTIMAL_BUFFER and OPTIMAL_BUFFER_TYPE == 1:\n\t\t\tif max_state == None:\n\t\t\t\tmax_state = -100\n\t\t\telif state[0] > max_state:\n\t\t\t\tmax_state = state[0]\n\n\t\tstate = next_state\n\t\tepisode_reward += reward\n\t\t\n\n\t\t\n\t\t# Train agent after collecting sufficient data\n\t\tif t >= START_TIMESTEPS:\n\t\t\t# env.render()\n\t\t\tif TD3_training:\n\t\t\t\tprint(\"TD3 Training...\")\n\t\t\t\tTD3_training = True\n\t\t\tpolicy.train(replay_component, BATCH_SIZE)\n\t\t\t\n\t\tif done: \n\t\t\tif guarantee_finish:\n\t\t\t\tif episode_reward > 0:\n\t\t\t\t\tif gf == gf_count-1:\n\t\t\t\t\t\tguarantee_finish = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tgf += 1\n\t\t\tif OPTIMAL_BUFFER:\n\t\t\t\tif replay_component.training and t >= START_TIMESTEPS:\n\t\t\t\t\tif OPTIMAL_BUFFER_TYPE == 0:\n\t\t\t\t\t\tif episode_reward < -10 or episode_reward > 0:\n\t\t\t\t\t\t\tif replay_component.highest_reward_state < episode_reward:\n\t\t\t\t\t\t\t\tprint(episode_reward)\n\t\t\t\t\t\t\t\treplay_component.edit_optim_buffer(episode_reward)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif replay_component.highest_reward_state < max_state:\n\t\t\t\t\t\t\tprint(max_state)\n\t\t\t\t\t\t\treplay_component.edit_optim_buffer(max_state)\n\t\t\t\n\n\n\t\t\tprint(f\"Episode {episode_num}, reward is {episode_reward}, episode_timesteps {episode_timesteps}\")\n\t\t\tif t >= START_TIMESTEPS:\n\t\t\t\tevaluations.append(episode_reward)\n\t\t\t\tnp.save(f\"./results/{FILE_NAME}\", evaluations)\n\n\t\t\t# Reset environment\n\t\t\tstate, done = env.reset(), False\n\t\t\tepisode_reward = 0\n\t\t\tepisode_timesteps = 0\n\t\t\tepisode_num += 1\n\t\t"
]
| [
[
"numpy.random.normal",
"numpy.array",
"numpy.random.seed",
"numpy.save",
"torch.manual_seed"
]
]
|
sidruns30/DeepClassifierNoveltyDetection | [
"e17ad8a31d13790bc517de7f2b2a4b50ad35e065"
]
| [
"sample_data.py"
]
| [
"import numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\n\n\ndef phase_to_sin_cos(Y):\n \"\"\"Reparametrize sinusoid parameters:\n w, A, phi, b --> p, A_cos, A_sin, b\n\n Estimating these parameters seems to be easier in practice.\n \"\"\"\n w, A, phi, b = Y.T\n\n A_cos = A * np.sin(phi)\n A_sin = A * np.cos(phi)\n p = w ** -1\n\n return np.c_[p, A_cos, A_sin, b]\n\n\ndef _random_times(N, even=True, t_max=4 * np.pi, n_min=None, n_max=None, t_shape=2, t_scale=0.05):\n if n_min is None and n_max is None:\n raise ValueError(\"Either n_min or n_max is required.\")\n elif n_min is None:\n n_min = n_max\n elif n_max is None:\n n_max = n_min\n\n if even:\n return np.tile(np.linspace(0., t_max, n_max), (N, 1))\n else:\n lags = [t_scale * np.random.pareto(t_shape, size=np.random.randint(n_min, n_max + 1))\n for i in range(N)]\n return [np.r_[0, np.cumsum(lags_i)] for lags_i in lags]\n\n\ndef _periodic_params(N, A_min, A_max, w_min, w_max):\n w = 1. / np.random.uniform(1. / w_max, 1. / w_min, size=N)\n A = np.random.uniform(A_min, A_max, size=N)\n phi = 2 * np.pi * np.random.random(size=N)\n b = np.random.normal(scale=1, size=N)\n\n return w, A, phi, b\n\n\ndef _sinusoid(w, A, phi, b):\n return lambda t: A * np.sin(2 * np.pi * w * t + phi) + b\n\n\ndef periodic(N, n_min, n_max, t_max=4 * np.pi, even=True, A_min=0.5, A_max=2.0,\n noise_sigma=0., w_min=0.1, w_max=1., t_shape=2, t_scale=0.05,\n kind='sinusoid'):\n \"\"\"Returns periodic data (values, (freq, amplitude, phase, offset))\"\"\"\n t = _random_times(N, even, t_max, n_min, n_max, t_shape, t_scale)\n w, A, phi, b = _periodic_params(N, A_min, A_max, w_min, w_max)\n\n X_list = [np.c_[t[i], _sinusoid(w[i], A[i], phi[i], b[i])(t[i])] for i in range(N)]\n X_raw = pad_sequences(X_list, maxlen=n_max, value=np.nan, dtype='float', padding='post')\n X = X_raw.copy()\n X[:, :, 1] = X_raw[:, :, 1] + np.random.normal(scale=noise_sigma + 1e-9, size=(N, n_max))\n Y = np.c_[w, A, phi, b]\n \n return X, Y, X_raw\n"
]
| [
[
"numpy.random.normal",
"numpy.sin",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.cos",
"numpy.cumsum",
"numpy.random.random",
"numpy.linspace"
]
]
|
chulminkw/yolov3-tf2 | [
"22d471b7126ffeef580316b226d13bf35155d32f"
]
| [
"gen_test02.py"
]
| [
"from absl import app, flags, logging\r\nfrom absl.flags import FLAGS\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport cv2\r\nfrom tensorflow.keras.callbacks import (\r\n ReduceLROnPlateau,\r\n EarlyStopping,\r\n ModelCheckpoint,\r\n TensorBoard\r\n)\r\nfrom yolov3_tf2.models import (\r\n YoloV3, YoloV3Tiny, YoloLoss,\r\n yolo_anchors, yolo_anchor_masks,\r\n yolo_tiny_anchors, yolo_tiny_anchor_masks\r\n)\r\nfrom yolov3_tf2.utils import freeze_all\r\nimport yolov3_tf2.dataset as dataset\r\n\r\nclass FLAGS:\r\n dataset = './data/voc2012_train.tfrecord'\r\n val_dataset= None\r\n tiny = False\r\n weights = None\r\n classes = './data/voc2012.names'\r\n mode = 'eager_tf'\r\n transfer = 'none'\r\n size = 416\r\n epochs = 2\r\n batch_size = 8\r\n learning_rate= 1e-3\r\n num_classes= 20\r\n weights_num_classes=None\r\n\r\n\r\nanchors = yolo_anchors\r\nanchor_masks = yolo_anchor_masks\r\n\r\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\r\nfor physical_device in physical_devices:\r\n tf.config.experimental.set_memory_growth(physical_device, True)\r\n\r\nif FLAGS.tiny:\r\n model = YoloV3Tiny(FLAGS.size, training=True,\r\n classes=FLAGS.num_classes)\r\n anchors = yolo_tiny_anchors\r\n anchor_masks = yolo_tiny_anchor_masks\r\nelse:\r\n model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)\r\n anchors = yolo_anchors\r\n anchor_masks = yolo_anchor_masks\r\n\r\nif FLAGS.dataset:\r\n train_dataset = dataset.load_tfrecord_dataset(FLAGS.dataset, FLAGS.classes, FLAGS.size)\r\nelse:\r\n train_dataset = dataset.load_fake_dataset()\r\ntrain_dataset = train_dataset.shuffle(buffer_size=512)\r\ntrain_dataset = train_dataset.batch(FLAGS.batch_size)\r\ntrain_dataset = train_dataset.map(lambda x, y: (\r\n dataset.transform_images(x, FLAGS.size),\r\n dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))\r\ntrain_dataset = train_dataset.prefetch(\r\n buffer_size=tf.data.experimental.AUTOTUNE)\r\n\r\ntrain_ds = next(iter(train_dataset))\r\nprint(len(train_ds), type(train_ds), type(train_ds[0]), train_ds[0].shape, type(train_ds[1]), len(train_ds[1]))\r\nprint(train_ds[1][0].shape)"
]
| [
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth"
]
]
|
Kanahiro/dbf-df-translator | [
"6603ca1ac306203bf8c95e6545685c509324a438"
]
| [
"libpysal/examples/base.py"
]
| [
"\"\"\"\nBase class for managing example datasets.\n\"\"\"\n\n# Authors: Serge Rey\n# License: BSD 3 Clause\n\nimport io\nimport os\nimport webbrowser\nfrom os import environ, makedirs\nfrom os.path import exists, expanduser, join\nimport zipfile\nimport requests\nimport pandas\nfrom bs4 import BeautifulSoup\nfrom ..io import open as ps_open\n\n\nfrom typing import Union\n\nPYSALDATA = \"pysal_data\"\n\n\ndef get_data_home():\n \"\"\"Return the path of the ``libpysal`` data directory. This folder (``~/pysal_data``)\n is used by some large dataset loaders to avoid downloading the data multiple times.\n Alternatively, it can be set by the 'PYSALDATA' environment variable or programmatically\n by giving an explicit folder path. The ``'~'`` symbol is expanded to the user home\n folder If the folder does not already exist, it is automatically created.\n\n Returns\n -------\n data_home : str\n The system path where the data is/will be stored.\n\n \"\"\"\n\n data_home = environ.get(\"PYSALDATA\", join(\"~\", PYSALDATA))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef get_list_of_files(dir_name):\n \"\"\"Create a list of files and sub-directories in ``dir_name``.\n\n Parameters\n ----------\n dir_name : str\n The path to the directory or examples.\n\n Returns\n -------\n all_files : list\n All file and directory paths.\n\n Raises\n ------\n FileNotFoundError\n If the file or directory is not found.\n\n \"\"\"\n\n # names in the given directory\n all_files = list()\n try:\n file_list = os.listdir(dir_name)\n # Iterate over all the entries\n for entry in file_list:\n # Create full path\n full_path = os.path.join(dir_name, entry)\n # If entry is a directory then get the list of files in this directory\n if os.path.isdir(full_path):\n all_files = all_files + get_list_of_files(full_path)\n else:\n all_files.append(full_path)\n except FileNotFoundError:\n pass\n\n return all_files\n\n\ndef type_of_script() -> str:\n \"\"\"Helper function to determine run context.\"\"\"\n\n try:\n ipy_str = str(type(get_ipython()))\n if \"zmqshell\" in ipy_str:\n return \"jupyter\"\n if \"terminal\" in ipy_str:\n return \"ipython\"\n except:\n return \"terminal\"\n\n\nclass Example:\n \"\"\"An example dataset.\n\n Parameters\n ----------\n name : str\n The example dataset name.\n description : str\n The example dataset description.\n n : int\n The number of records in the dataset.\n k : int\n The number of fields in the dataset.\n download_url : str\n The URL to download the dataset.\n explain_url : str\n The URL to the dataset's READEME file.\n\n Attributes\n ----------\n root : str\n The ``name`` parameter with filled spaces (_).\n installed : bool\n ``True`` if the example is installed, otherwise ``False``.\n zipfile : zipfile.ZipFile\n The archived dataset.\n\n \"\"\"\n\n def __init__(self, name, description, n, k, download_url, explain_url):\n self.name = name\n self.description = description\n self.n = n\n self.k = k\n self.download_url = download_url\n self.explain_url = explain_url\n self.root = name.replace(\" \", \"_\")\n self.installed = self.downloaded()\n\n def get_local_path(self, path=get_data_home()) -> str:\n \"\"\"Get the local path for example.\"\"\"\n\n return join(path, self.root)\n\n def get_path(self, file_name, verbose=True) -> Union[str, None]:\n \"\"\"Get the path for local file.\"\"\"\n\n file_list = self.get_file_list()\n for file_path in file_list:\n base_name = os.path.basename(file_path)\n if file_name == base_name:\n return file_path\n if verbose:\n print(\"{} is not a file in this example\".format(file_name))\n return None\n\n def downloaded(self) -> bool:\n \"\"\"Check if the example has already been installed.\"\"\"\n\n path = self.get_local_path()\n if os.path.isdir(path):\n self.installed = True\n return True\n return False\n\n def explain(self) -> None:\n \"\"\"Provide a description of the example.\"\"\"\n\n file_name = self.explain_url.split(\"/\")[-1]\n if file_name == \"README.md\":\n explain_page = requests.get(self.explain_url)\n crawled = BeautifulSoup(explain_page.text, \"html.parser\")\n print(crawled.text)\n return None\n if type_of_script() == \"terminal\":\n webbrowser.open(self.explain_url)\n return None\n from IPython.display import IFrame\n\n return IFrame(self.explain_url, width=700, height=350)\n\n def download(self, path=get_data_home()):\n \"\"\"Download the files for the example.\"\"\"\n\n if self.downloaded():\n print(\"Already downloaded\")\n else:\n request = requests.get(self.download_url)\n archive = zipfile.ZipFile(io.BytesIO(request.content))\n target = join(path, self.root)\n print(\"Downloading {} to {}\".format(self.name, target))\n archive.extractall(path=target)\n self.zipfile = archive\n self.installed = True\n\n def get_file_list(self) -> Union[list, None]:\n \"\"\"Get the list of local files for the example.\"\"\"\n path = self.get_local_path()\n if os.path.isdir(path):\n return get_list_of_files(path)\n return None\n\n def json_dict(self) -> dict:\n \"\"\"Container for example meta data.\"\"\"\n meta = {}\n meta[\"name\"] = self.name\n meta[\"description\"] = self.description\n meta[\"download_url\"] = self.download_url\n meta[\"explain_url\"] = self.explain_url\n meta[\"root\"] = self.root\n return meta\n\n def load(self, file_name) -> io.FileIO:\n \"\"\"Dispatch to libpysal.io to open file.\"\"\"\n pth = self.get_path(file_name)\n if pth:\n return ps_open(pth)\n\n\nclass Examples:\n \"\"\"Manager for pysal example datasets.\"\"\"\n\n def __init__(self):\n self.datasets = {}\n\n def add_examples(self, examples):\n \"\"\"Add examples to the set of datasets available.\"\"\"\n self.datasets.update(examples)\n\n def explain(self, example_name) -> str:\n if example_name in self.datasets:\n return self.datasets[example_name].explain()\n else:\n print(\"not available\")\n\n def available(self):\n \"\"\"Report available datasets.\"\"\"\n datasets = self.datasets\n names = list(datasets.keys())\n names.sort()\n rows = []\n for name in names:\n description = datasets[name].description\n installed = datasets[name].installed\n rows.append([name, description, installed])\n datasets = pandas.DataFrame(\n data=rows, columns=[\"Name\", \"Description\", \"Installed\"]\n )\n datasets.style.set_properties(subset=[\"text\"], **{\"width\": \"300px\"})\n print(datasets.to_string(max_colwidth=60))\n\n def load(self, example_name: str) -> Example:\n \"\"\"Load example dataset, download if not locally available.\"\"\"\n if example_name in self.datasets:\n example = self.datasets[example_name]\n if example.installed:\n return example\n else:\n \"Downloading: {}\".format(example_name)\n example.download()\n return example\n else:\n print(\"Example not available: {}\".format(example_name))\n return None\n\n def download_remotes(self):\n \"\"\"Download all remotes.\"\"\"\n names = list(self.remotes.keys())\n names.sort()\n\n for name in names:\n print(name)\n example = self.remotes[name]\n try:\n example.download()\n except:\n print(\"Example not downloaded: {}\".format(name))\n\n def get_installed_names(self) -> list:\n \"\"\"Return names of all currently installed datasets.\"\"\"\n ds = self.datasets\n return [name for name in ds if ds[name].installed]\n\n\nexample_manager = Examples()\n"
]
| [
[
"pandas.DataFrame"
]
]
|
xyc1207/benchmarking-gnns | [
"9ba25a2825e8c155a93730d6e8f8752090292942"
]
| [
"nets/TSP_edge_classification/three_wl_gnn_net.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dgl\nimport time\n\n\"\"\"\n 3WLGNN / ThreeWLGNN\n Provably Powerful Graph Networks (Maron et al., 2019)\n https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf\n \n CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/\n\"\"\"\n\nfrom layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool\nfrom layers.mlp_readout_layer import MLPReadout\n\nclass ThreeWLGNNNet(nn.Module):\n def __init__(self, net_params):\n super().__init__() \n self.in_dim_node = net_params['in_dim']\n self.in_dim_edge = net_params['in_dim_edge']\n depth_of_mlp = net_params['depth_of_mlp']\n hidden_dim = net_params['hidden_dim']\n n_classes = net_params['n_classes']\n dropout = net_params['dropout']\n n_layers = net_params['L']\n self.layer_norm = net_params['layer_norm']\n self.residual = net_params['residual']\n self.edge_feat = net_params['edge_feat']\n self.device = net_params['device']\n self.gin_like_readout = False # if True, uses GIN like readout, but without diag poool, since node task\n \n block_features = [hidden_dim] * n_layers # L here is the block number\n \n if not self.edge_feat:\n original_features_num = self.in_dim_node + 1 # Number of features of the input\n else:\n original_features_num = self.in_dim_node + self.in_dim_edge + 1 # Number of features of the input\n\n # sequential mlp blocks\n last_layer_features = original_features_num\n self.reg_blocks = nn.ModuleList()\n for layer, next_layer_features in enumerate(block_features):\n mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)\n self.reg_blocks.append(mlp_block)\n last_layer_features = next_layer_features\n \n \n if self.gin_like_readout:\n self.fc_layers = nn.ModuleList()\n for output_features in block_features:\n # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected\n fc = FullyConnected(2*output_features, n_classes, activation_fn=None)\n self.fc_layers.append(fc)\n else:\n self.mlp_prediction = MLPReadout(2*(sum(block_features)+original_features_num), n_classes)\n\n\n def forward(self, x_no_edge_feat, x_with_edge_feat, edge_list):\n\n x = x_no_edge_feat\n \n if self.edge_feat:\n x = x_with_edge_feat\n \n # this x is the tensor with all info available => adj, node feat, and edge feat (if edge_feat flag True)\n \n if self.gin_like_readout:\n scores = torch.tensor(0, device=self.device, dtype=x.dtype)\n else:\n x_list = [x]\n \n for i, block in enumerate(self.reg_blocks):\n\n x = block(x)\n if self.gin_like_readout:\n x_out = torch.sum(x, dim=2) # from [1 x d_out x n x n] to [1 x d_out x n]\n node_feats = x_out.squeeze().permute(1,0) # reshaping in form of [n x d_out]\n \n # edge sources and destinations which are node indexes\n srcs, dsts = edge_list\n\n # To make a prediction for each edge e_{ij}, we first concatenate\n # node features h_i and h_j from the final GNN layer. \n # The concatenated features are then passed to an MLP for prediction.\n edge_outs = [torch.cat([node_feats[srcs[idx].item()], node_feats[dsts[idx].item()]]) for idx in range(len(srcs))] \n \n scores = self.fc_layers[i](torch.stack(edge_outs)) + scores\n else:\n x_list.append(x)\n \n if self.gin_like_readout:\n return scores\n else:\n # readout \n x_list = [torch.sum(x, dim=2) for x in x_list]\n x_list = torch.cat(x_list, dim=1)\n\n\n # node_feats will be of size (num nodes, features)\n node_feats = x_list.squeeze(0).permute(1,0)\n\n # edge sources and destinations which are node indexes\n srcs, dsts = edge_list\n\n # To make a prediction for each edge e_{ij}, we first concatenate\n # node features h_i and h_j from the final GNN layer. \n # The concatenated features are then passed to an MLP for prediction.\n edge_outs = [torch.cat([node_feats[srcs[idx].item()], node_feats[dsts[idx].item()]]) for idx in range(len(srcs))] \n edge_outs = self.mlp_prediction(torch.stack(edge_outs))\n\n return edge_outs\n\n def loss(self, pred, label):\n criterion = nn.CrossEntropyLoss(weight=None)\n loss = criterion(pred, label)\n\n return loss\n "
]
| [
[
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.tensor",
"torch.nn.CrossEntropyLoss",
"torch.sum"
]
]
|
AdaptiveBProcess/Simod | [
"8ec1a03093a958a7de13c3786659282e61609537"
]
| [
"src/simod/analyzers/sim_evaluator.py"
]
| [
"\"\"\"\nCreated on Fri Jan 10 11:40:46 2020\n\n@author: Manuel Camargo\n\"\"\"\nimport copy\nimport itertools\nimport multiprocessing\nimport random\nimport string\nimport time\nimport traceback\nimport warnings\nfrom multiprocessing import Pool\nfrom operator import itemgetter\n\nimport jellyfish as jf\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.stats import wasserstein_distance\nfrom tqdm import tqdm\n\nfrom . import alpha_oracle as ao\nfrom .alpha_oracle import Rel\nfrom simod.configuration import Configuration, Metric\n\n\nclass SimilarityEvaluator():\n \"\"\"\n This class evaluates the similarity of two event-logs\n \"\"\"\n\n def __init__(self, log_data: pd.DataFrame, simulation_data: pd.DataFrame, settings: Configuration, max_cases=500, dtype='log'):\n self.dtype = dtype\n self.log_data = copy.deepcopy(log_data)\n self.simulation_data = copy.deepcopy(simulation_data)\n self.max_cases = max_cases\n self.one_timestamp = settings.read_options.one_timestamp\n self._preprocess_data(dtype)\n\n def _preprocess_data(self, dtype):\n preprocessor = self._get_preprocessor(dtype)\n return preprocessor()\n\n def _get_preprocessor(self, dtype):\n if dtype == 'log':\n return self._preprocess_log\n elif dtype == 'serie':\n return self._preprocess_serie\n else:\n raise ValueError(dtype)\n\n def _preprocess_log(self):\n self.ramp_io_perc = 0.2\n self.log_data['source'] = 'log'\n self.simulation_data['source'] = 'simulation'\n data = pd.concat([self.log_data, self.simulation_data], axis=0, ignore_index=True)\n if (('processing_time' not in data.columns) or ('waiting_time' not in data.columns)):\n data = self.calculate_times(data)\n data = self.scaling_data(data)\n # save data\n self.log_data = data[data.source == 'log']\n self.simulation_data = data[data.source == 'simulation']\n self.alias = self.create_task_alias(data, 'task')\n\n self.alpha_concurrency = ao.AlphaOracle(self.log_data, self.alias, self.one_timestamp, True)\n # reformat and sampling data\n self.log_data = self.reformat_events(self.log_data.to_dict('records'), 'task')\n self.simulation_data = self.reformat_events(self.simulation_data.to_dict('records'), 'task')\n num_traces = int(len(self.simulation_data) * self.ramp_io_perc)\n self.simulation_data = self.simulation_data[num_traces:-num_traces]\n self.log_data = list(map(lambda i: self.log_data[i],\n np.random.randint(0, len(self.log_data), len(self.simulation_data))))\n\n\n def _preprocess_serie(self):\n # load data\n self.log_data['source'] = 'log'\n self.simulation_data['source'] = 'simulation'\n\n def measure_distance(self, metric: Metric, verbose=False):\n \"\"\"\n Measures the distance of two event-logs\n with with tsd or dl and mae distance\n\n Returns\n -------\n distance : float\n\n \"\"\"\n self.verbose = verbose\n # similarity measurement and matching\n evaluator = self._get_evaluator(metric)\n if metric in [Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:\n distance = evaluator(self.log_data, self.simulation_data, criteria=metric)\n else:\n distance = evaluator(self.log_data, self.simulation_data, metric)\n self.similarity = {'metric': metric, 'sim_val': np.mean([x['sim_score'] for x in distance])}\n\n def _get_evaluator(self, metric: Metric):\n if self.dtype == 'log':\n if metric in [Metric.TSD, Metric.DL, Metric.MAE, Metric.DL_MAE]:\n return self._evaluate_seq_distance\n elif metric is Metric.LOG_MAE:\n return self.log_mae_metric\n elif metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:\n return self.log_emd_metric\n else:\n raise ValueError(metric)\n elif self.dtype == 'serie':\n if metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:\n return self.serie_emd_metric\n else:\n raise ValueError(metric)\n else:\n raise ValueError(self.dtype)\n\n # =============================================================================\n # Timed string distance\n # =============================================================================\n\n def _evaluate_seq_distance(self, log_data, simulation_data, metric: Metric):\n \"\"\"\n Timed string distance calculation\n\n Parameters\n ----------\n log_data : Ground truth list\n simulation_data : List\n\n Returns\n -------\n similarity : tsd similarity\n\n \"\"\"\n similarity = list()\n\n def pbar_async(p, msg):\n pbar = tqdm(total=reps, desc=msg)\n processed = 0\n while not p.ready():\n cprocesed = (reps - p._number_left)\n if processed < cprocesed:\n increment = cprocesed - processed\n pbar.update(n=increment)\n processed = cprocesed\n time.sleep(1)\n pbar.update(n=(reps - processed))\n p.wait()\n pbar.close()\n\n # define the type of processing sequencial or parallel\n cases = len(set([x['caseid'] for x in log_data]))\n if cases <= self.max_cases:\n args = (metric, simulation_data, log_data,\n self.alpha_concurrency.oracle,\n ({'min': 0, 'max': len(simulation_data)},\n {'min': 0, 'max': len(log_data)}))\n df_matrix = self._compare_traces(args)\n else:\n cpu_count = multiprocessing.cpu_count()\n mx_len = len(log_data)\n ranges = self.define_ranges(mx_len, int(np.ceil(cpu_count / 2)))\n ranges = list(itertools.product(*[ranges, ranges]))\n reps = len(ranges)\n pool = Pool(processes=cpu_count)\n # Generate\n args = [(metric, simulation_data[r[0]['min']:r[0]['max']],\n log_data[r[1]['min']:r[1]['max']],\n self.alpha_concurrency.oracle,\n r) for r in ranges]\n p = pool.map_async(self._compare_traces, args)\n if self.verbose:\n pbar_async(p, f'evaluating {metric}:')\n pool.close()\n # Save results\n df_matrix = pd.concat(list(p.get()), axis=0, ignore_index=True)\n df_matrix.sort_values(by=['i', 'j'], inplace=True)\n df_matrix = df_matrix.reset_index().set_index(['i', 'j'])\n if metric == Metric.DL_MAE:\n dl_matrix = df_matrix[['dl_distance']].unstack().to_numpy()\n mae_matrix = df_matrix[['mae_distance']].unstack().to_numpy()\n # MAE normalized\n max_mae = mae_matrix.max()\n mae_matrix = np.divide(mae_matrix, max_mae)\n # multiple both matrixes by Beta equal to 0.5\n dl_matrix = np.multiply(dl_matrix, 0.5)\n mae_matrix = np.multiply(mae_matrix, 0.5)\n # add each point in between\n cost_matrix = np.add(dl_matrix, mae_matrix)\n else:\n cost_matrix = df_matrix[['distance']].unstack().to_numpy()\n row_ind, col_ind = linear_sum_assignment(np.array(cost_matrix))\n # Create response\n for idx, idy in zip(row_ind, col_ind):\n similarity.append(dict(caseid=simulation_data[idx]['caseid'],\n sim_order=simulation_data[idx]['profile'],\n log_order=log_data[idy]['profile'],\n sim_score=(cost_matrix[idx][idy]\n if metric == Metric.MAE else\n (1 - (cost_matrix[idx][idy])))\n )\n )\n return similarity\n\n @staticmethod\n def _compare_traces(args):\n\n def ae_distance(et_1, et_2, st_1, st_2):\n cicle_time_s1 = (et_1 - st_1).total_seconds()\n cicle_time_s2 = (et_2 - st_2).total_seconds()\n ae = np.abs(cicle_time_s1 - cicle_time_s2)\n return ae\n\n def tsd_alpha(s_1, s_2, p_1, p_2, w_1, w_2, alpha_concurrency):\n \"\"\"\n Compute the Damerau-Levenshtein distance between two given\n strings (s_1 and s_2)\n Parameters\n ----------\n comp_sec : dict\n alpha_concurrency : dict\n Returns\n -------\n Float\n \"\"\"\n\n def calculate_cost(s1_idx, s2_idx):\n t_1 = p_1[s1_idx] + w_1[s1_idx]\n if t_1 > 0:\n b_1 = (p_1[s1_idx] / t_1)\n cost = ((b_1 * np.abs(p_2[s2_idx] - p_1[s1_idx])) +\n ((1 - b_1) * np.abs(w_2[s2_idx] - w_1[s1_idx])))\n else:\n cost = 0\n return cost\n\n dist = {}\n lenstr1 = len(s_1)\n lenstr2 = len(s_2)\n for i in range(-1, lenstr1 + 1):\n dist[(i, -1)] = i + 1\n for j in range(-1, lenstr2 + 1):\n dist[(-1, j)] = j + 1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s_1[i] == s_2[j]:\n cost = calculate_cost(i, j)\n else:\n cost = 1\n dist[(i, j)] = min(\n dist[(i - 1, j)] + 1, # deletion\n dist[(i, j - 1)] + 1, # insertion\n dist[(i - 1, j - 1)] + cost # substitution\n )\n if i and j and s_1[i] == s_2[j - 1] and s_1[i - 1] == s_2[j]:\n if alpha_concurrency[(s_1[i], s_2[j])] == Rel.PARALLEL:\n cost = calculate_cost(i, j - 1)\n dist[(i, j)] = min(dist[(i, j)], dist[i - 2, j - 2] + cost) # transposition\n return dist[lenstr1 - 1, lenstr2 - 1]\n\n def gen(metric: Metric, serie1, serie2, oracle, r):\n \"\"\"Reads the simulation results stats\"\"\"\n try:\n df_matrix = list()\n for i, s1_ele in enumerate(serie1):\n for j, s2_ele in enumerate(serie2):\n element = {'i': r[0]['min'] + i, 'j': r[1]['min'] + j}\n if metric in [Metric.TSD, Metric.DL, Metric.DL_MAE]:\n element['s_1'] = s1_ele['profile']\n element['s_2'] = s2_ele['profile']\n element['length'] = max(len(s1_ele['profile']), len(s2_ele['profile']))\n if metric is Metric.TSD:\n element['p_1'] = s1_ele['proc_act_norm']\n element['p_2'] = s2_ele['proc_act_norm']\n element['w_1'] = s1_ele['wait_act_norm']\n element['w_2'] = s2_ele['wait_act_norm']\n if metric in [Metric.MAE, Metric.DL_MAE]:\n element['et_1'] = s1_ele['end_time']\n element['et_2'] = s2_ele['end_time']\n element['st_1'] = s1_ele['start_time']\n element['st_2'] = s2_ele['start_time']\n df_matrix.append(element)\n df_matrix = pd.DataFrame(df_matrix)\n if metric is Metric.TSD:\n df_matrix['distance'] = df_matrix.apply(\n lambda x: tsd_alpha(x.s_1, x.s_2, x.p_1, x.p_2, x.w_1, x.w_2, oracle) / x.length, axis=1)\n elif metric is Metric.DL:\n df_matrix['distance'] = df_matrix.apply(\n lambda x: jf.damerau_levenshtein_distance(''.join(x.s_1), ''.join(x.s_2)) / x.length, axis=1)\n elif metric is Metric.MAE:\n df_matrix['distance'] = df_matrix.apply(\n lambda x: ae_distance(x.et_1, x.et_2, x.st_1, x.st_2), axis=1)\n elif metric is Metric.DL_MAE:\n df_matrix['dl_distance'] = df_matrix.apply(\n lambda x: jf.damerau_levenshtein_distance(''.join(x.s_1), ''.join(x.s_2)) / x.length, axis=1)\n df_matrix['mae_distance'] = df_matrix.apply(\n lambda x: ae_distance(x.et_1, x.et_2, x.st_1, x.st_2), axis=1)\n else:\n raise ValueError(metric)\n return df_matrix\n except Exception:\n traceback.print_exc()\n\n return gen(*args)\n\n # =============================================================================\n # whole log MAE\n # =============================================================================\n def log_mae_metric(self, log_data: list, simulation_data: list, metric: Metric) -> list:\n \"\"\"\n Measures the MAE distance between two whole logs\n\n Parameters\n ----------\n log_data : list\n simulation_data : list\n Returns\n -------\n list\n \"\"\"\n similarity = list()\n log_data = pd.DataFrame(log_data)\n simulation_data = pd.DataFrame(simulation_data)\n log_timelapse = (log_data.end_time.max() - log_data.start_time.min()).total_seconds()\n sim_timelapse = (simulation_data.end_time.max() - simulation_data.start_time.min()).total_seconds()\n similarity.append({'sim_score': np.abs(sim_timelapse - log_timelapse)})\n return similarity\n\n # =============================================================================\n # Log emd distance\n # =============================================================================\n\n def log_emd_metric(self, log_data: list, simulation_data: list, criteria: Metric = Metric.HOUR_EMD) -> list:\n \"\"\"\n Measures the EMD distance between two logs on different aggregation\n levels specified by user by defaul per hour\n\n Parameters\n ----------\n log_data : list\n simulation_data : list\n criteria : TYPE, optional\n DESCRIPTION. The default is 'hour'.\n Returns\n -------\n list\n \"\"\"\n similarity = list()\n window = 1\n # hist_range = [0, int((window * 3600))]\n log_data = pd.DataFrame(log_data)\n simulation_data = pd.DataFrame(simulation_data)\n\n def split_date_time(dataframe, feature, source):\n day_hour = lambda x: x[feature].hour\n dataframe['hour'] = dataframe.apply(day_hour, axis=1)\n date = lambda x: x[feature].date()\n dataframe['date'] = dataframe.apply(date, axis=1)\n # create time windows\n i = 0\n daily_windows = dict()\n for hour in range(24):\n if hour % window == 0:\n i += 1\n daily_windows[hour] = i\n dataframe = dataframe.merge(\n pd.DataFrame.from_dict(daily_windows, orient='index').rename_axis('hour'),\n on='hour',\n how='left').rename(columns={0: 'window'})\n dataframe = dataframe[[feature, 'date', 'window']]\n dataframe.rename(columns={feature: 'timestamp'}, inplace=True)\n dataframe['source'] = source\n return dataframe\n\n data = split_date_time(log_data, 'start_time', 'log')\n data = data.append(split_date_time(log_data, 'end_time', 'log'), ignore_index=True)\n data = data.append(split_date_time(simulation_data, 'start_time', 'sim'), ignore_index=True)\n data = data.append(split_date_time(simulation_data, 'end_time', 'sim'), ignore_index=True)\n data['weekday'] = data.apply(lambda x: x.date.weekday(), axis=1)\n g_criteria = {Metric.HOUR_EMD: 'window', Metric.DAY_EMD: 'weekday', Metric.DAY_HOUR_EMD: ['weekday', 'window'],\n Metric.CAL_EMD: 'date'}\n similarity = list()\n for key, group in data.groupby(g_criteria[criteria]):\n w_df = group.copy()\n w_df = w_df.reset_index()\n basetime = w_df.timestamp.min().floor(freq='H')\n diftime = lambda x: (x['timestamp'] - basetime).total_seconds()\n w_df['rel_time'] = w_df.apply(diftime, axis=1)\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n log_hist = np.histogram(w_df[w_df.source == 'log'].rel_time, density=True)\n sim_hist = np.histogram(w_df[w_df.source == 'sim'].rel_time, density=True)\n if np.isnan(np.sum(log_hist[0])) or np.isnan(np.sum(sim_hist[0])):\n similarity.append({'window': key, 'sim_score': 0})\n else:\n similarity.append({'window': key, 'sim_score': wasserstein_distance(log_hist[0], sim_hist[0])})\n return similarity\n\n # =============================================================================\n # serie emd distance\n # =============================================================================\n\n def serie_emd_metric(self, log_data, simulation_data, criteria: Metric = Metric.HOUR_EMD):\n similarity = list()\n window = 1\n log_data = pd.DataFrame(log_data)\n simulation_data = pd.DataFrame(simulation_data)\n\n def split_date_time(dataframe, feature, source):\n day_hour = lambda x: x[feature].hour\n dataframe['hour'] = dataframe.apply(day_hour, axis=1)\n date = lambda x: x[feature].date()\n dataframe['date'] = dataframe.apply(date, axis=1)\n # create time windows\n i = 0\n daily_windows = dict()\n for x in range(24):\n if x % window == 0:\n i += 1\n daily_windows[x] = i\n dataframe = dataframe.merge(\n pd.DataFrame.from_dict(daily_windows, orient='index').rename_axis('hour'),\n on='hour', how='left').rename(columns={0: 'window'})\n dataframe = dataframe[[feature, 'date', 'window']]\n dataframe.rename(columns={feature: 'timestamp'}, inplace=True)\n dataframe['source'] = source\n return dataframe\n\n data = split_date_time(log_data, 'timestamp', 'log')\n data = data.append(split_date_time(simulation_data, 'timestamp', 'sim'), ignore_index=True)\n data['weekday'] = data.apply(lambda x: x.date.weekday(), axis=1)\n g_criteria = {Metric.HOUR_EMD: 'window', Metric.DAY_EMD: 'weekday', Metric.DAY_HOUR_EMD: ['weekday', 'window'],\n Metric.CAL_EMD: 'date'}\n similarity = list()\n for key, group in data.groupby(g_criteria[criteria]):\n w_df = group.copy()\n w_df = w_df.reset_index()\n basetime = w_df.timestamp.min().floor(freq='H')\n diftime = lambda x: (x['timestamp'] - basetime).total_seconds()\n w_df['rel_time'] = w_df.apply(diftime, axis=1)\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n log_hist = np.histogram(w_df[w_df.source == 'log'].rel_time, density=True)\n sim_hist = np.histogram(w_df[w_df.source == 'sim'].rel_time, density=True)\n if np.isnan(np.sum(log_hist[0])) or np.isnan(np.sum(sim_hist[0])):\n similarity.append({'window': key, 'sim_score': 1})\n else:\n similarity.append({'window': key, 'sim_score': wasserstein_distance(log_hist[0], sim_hist[0])})\n return similarity\n\n # =============================================================================\n # Support methods\n # =============================================================================\n\n def create_task_alias(self, data, features):\n \"\"\"\n Create string alias for tasks names or tuples of tasks-roles names\n\n Parameters\n ----------\n features : list\n\n Returns\n -------\n alias : alias dictionary\n\n \"\"\"\n data = data.to_dict('records')\n subsec_set = set()\n if isinstance(features, list):\n task_list = [(x[features[0]], x[features[1]]) for x in data]\n else:\n task_list = [x[features] for x in data]\n [subsec_set.add(x) for x in task_list]\n variables = sorted(list(subsec_set))\n characters = string.ascii_letters + string.digits\n # characters = [chr(i) for i in range(0, len(variables))]\n aliases = list(map(lambda i: characters[i], np.random.randint(0, len(characters), len(variables))))\n alias = dict()\n for i, _ in enumerate(variables):\n alias[variables[i]] = aliases[i]\n return alias\n\n @staticmethod\n def calculate_times(log):\n \"\"\"Appends the indexes and relative time to the dataframe.\n parms:\n log: dataframe.\n Returns:\n Dataframe: The dataframe with the calculated features added.\n \"\"\"\n log['processing_time'] = 0\n log['multitasking'] = 0\n log = log.to_dict('records')\n log = sorted(log, key=lambda x: (x['source'], x['caseid']))\n for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):\n events = list(group)\n events = sorted(events, key=itemgetter('start_timestamp'))\n for i in range(0, len(events)):\n # In one-timestamp approach the first activity of the trace\n # is taken as instantsince there is no previous timestamp\n # to find a range\n dur = (events[i]['end_timestamp'] - events[i]['start_timestamp']).total_seconds()\n if i == 0:\n wit = 0\n else:\n wit = (events[i]['start_timestamp'] - events[i - 1]['end_timestamp']).total_seconds()\n events[i]['waiting_time'] = wit if wit >= 0 else 0\n events[i]['processing_time'] = dur\n return pd.DataFrame.from_dict(log)\n\n def scaling_data(self, data):\n \"\"\"\n Scales times values activity based\n\n Parameters\n ----------\n data : dataframe\n\n Returns\n -------\n data : dataframe with normalized times\n\n \"\"\"\n df_modif = data.copy()\n np.seterr(divide='ignore')\n if self.one_timestamp:\n summ = data.groupby(['task'])['duration'].max().to_dict()\n dur_act_norm = (lambda x: x['duration'] / summ[x['task']]\n if summ[x['task']] > 0 else 0)\n df_modif['dur_act_norm'] = df_modif.apply(dur_act_norm, axis=1)\n else:\n summ = data.groupby(['task'])['processing_time'].max().to_dict()\n proc_act_norm = (lambda x: x['processing_time'] / summ[x['task']]\n if summ[x['task']] > 0 else 0)\n df_modif['proc_act_norm'] = df_modif.apply(proc_act_norm, axis=1)\n # ---\n summ = data.groupby(['task'])['waiting_time'].max().to_dict()\n wait_act_norm = (lambda x: x['waiting_time'] / summ[x['task']]\n if summ[x['task']] > 0 else 0)\n df_modif['wait_act_norm'] = df_modif.apply(wait_act_norm, axis=1)\n return df_modif\n\n def reformat_events(self, data, features):\n \"\"\"Creates series of activities, roles and relative times per trace.\n parms:\n log_df: dataframe.\n ac_table (dict): index of activities.\n rl_table (dict): index of roles.\n Returns:\n list: lists of activities, roles and relative times.\n \"\"\"\n # Update alias\n if isinstance(features, list):\n [x.update(dict(alias=self.alias[(x[features[0]], x[features[1]])])) for x in data]\n else:\n [x.update(dict(alias=self.alias[x[features]])) for x in data]\n temp_data = list()\n # define ordering keys and columns\n if self.one_timestamp:\n columns = ['alias', 'duration', 'dur_act_norm']\n sort_key = 'end_timestamp'\n else:\n sort_key = 'start_timestamp'\n columns = ['alias', 'processing_time', 'proc_act_norm', 'waiting_time', 'wait_act_norm']\n data = sorted(data, key=lambda x: (x['caseid'], x[sort_key]))\n for key, group in itertools.groupby(data, key=lambda x: x['caseid']):\n trace = list(group)\n temp_dict = dict()\n for col in columns:\n serie = [y[col] for y in trace]\n if col == 'alias':\n temp_dict = {**{'profile': serie}, **temp_dict}\n else:\n serie = [y[col] for y in trace]\n temp_dict = {**{col: serie}, **temp_dict}\n temp_dict = {**{'caseid': key, 'start_time': trace[0][sort_key], 'end_time': trace[-1][sort_key]},\n **temp_dict}\n temp_data.append(temp_dict)\n return sorted(temp_data, key=itemgetter('start_time'))\n\n @staticmethod\n def define_ranges(size, num_folds):\n num_events = int(np.round(size / num_folds))\n folds = list()\n for i in range(0, num_folds):\n sidx = i * num_events\n eidx = (i + 1) * num_events\n if i == 0:\n folds.append({'min': 0, 'max': eidx})\n elif i == (num_folds - 1):\n folds.append({'min': sidx, 'max': size})\n else:\n folds.append({'min': sidx, 'max': eidx})\n return folds\n"
]
| [
[
"numpy.divide",
"numpy.histogram",
"numpy.array",
"numpy.add",
"numpy.ceil",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"numpy.round",
"numpy.sum",
"numpy.seterr",
"numpy.mean",
"numpy.multiply",
"numpy.abs",
"pandas.concat",
"scipy.stats.wasserstein_distance"
]
]
|
marcociccone/Lasagne | [
"19b9d958507e7300e355d7cd1a6bbff2c7726c07"
]
| [
"lasagne/tests/layers/test_helper.py"
]
| [
"import warnings\nfrom mock import Mock, PropertyMock\nimport pytest\nimport numpy\nimport theano\n\n\nclass TestGetAllLayers:\n def test_stack(self):\n from lasagne.layers import InputLayer, DenseLayer, get_all_layers\n from itertools import permutations\n # l1 --> l2 --> l3\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n # try all possible combinations and orders for a query\n for count in (0, 1, 2, 3):\n for query in permutations([l1, l2, l3], count):\n if l3 in query:\n expected = [l1, l2, l3]\n elif l2 in query:\n expected = [l1, l2]\n elif l1 in query:\n expected = [l1]\n else:\n expected = []\n assert get_all_layers(query) == expected\n # treat_as_input=[l2] should block l1 from appearing\n assert get_all_layers(l3, treat_as_input=[l2]) == [l2, l3]\n\n def test_merge(self):\n from lasagne.layers import (InputLayer, DenseLayer, ElemwiseSumLayer,\n get_all_layers)\n # l1 --> l2 --> l3 --> l6\n # l4 --> l5 ----^\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n l4 = InputLayer((10, 30))\n l5 = DenseLayer(l4, 40)\n l6 = ElemwiseSumLayer([l3, l5])\n # try various combinations and orders for a query\n assert get_all_layers(l6) == [l1, l2, l3, l4, l5, l6]\n assert get_all_layers([l4, l6]) == [l4, l1, l2, l3, l5, l6]\n assert get_all_layers([l5, l6]) == [l4, l5, l1, l2, l3, l6]\n assert get_all_layers([l4, l2, l5, l6]) == [l4, l1, l2, l5, l3, l6]\n # check that treat_as_input correctly blocks the search\n assert get_all_layers(l6, treat_as_input=[l2]) == [l2, l3, l4, l5, l6]\n assert get_all_layers(l6, treat_as_input=[l3, l5]) == [l3, l5, l6]\n assert get_all_layers([l6, l2], treat_as_input=[l6]) == [l6, l1, l2]\n\n def test_split(self):\n from lasagne.layers import InputLayer, DenseLayer, get_all_layers\n # l1 --> l2 --> l3\n # \\---> l4\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n l4 = DenseLayer(l1, 50)\n # try various combinations and orders for a query\n assert get_all_layers(l3) == [l1, l2, l3]\n assert get_all_layers(l4) == [l1, l4]\n assert get_all_layers([l3, l4]) == [l1, l2, l3, l4]\n assert get_all_layers([l4, l3]) == [l1, l4, l2, l3]\n # check that treat_as_input correctly blocks the search\n assert get_all_layers(l3, treat_as_input=[l2]) == [l2, l3]\n assert get_all_layers([l3, l4], treat_as_input=[l2]) == [l2, l3,\n l1, l4]\n\n def test_bridge(self):\n from lasagne.layers import (InputLayer, DenseLayer, ElemwiseSumLayer,\n get_all_layers)\n # l1 --> l2 --> l3 --> l4 --> l5\n # \\------------^\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 30)\n l4 = ElemwiseSumLayer([l2, l3])\n l5 = DenseLayer(l4, 40)\n # check for correct topological order\n assert get_all_layers(l5) == [l1, l2, l3, l4, l5]\n # check that treat_as_input=[l4] blocks the search and =[l3] does not\n assert get_all_layers(l5, treat_as_input=[l4]) == [l4, l5]\n assert get_all_layers(l5, treat_as_input=[l3]) == [l1, l2, l3, l4, l5]\n\n\nclass TestGetOutput_InputLayer:\n @pytest.fixture\n def get_output(self):\n from lasagne.layers.helper import get_output\n return get_output\n\n @pytest.fixture\n def layer(self):\n from lasagne.layers.input import InputLayer\n return InputLayer((3, 2))\n\n def test_get_output_without_arguments(self, layer, get_output):\n assert get_output(layer) is layer.input_var\n\n def test_get_output_input_is_variable(self, layer, get_output):\n variable = theano.Variable(\"myvariable\")\n assert get_output(layer, variable) is variable\n\n def test_get_output_input_is_array(self, layer, get_output):\n inputs = [[1, 2, 3]]\n output = get_output(layer, inputs)\n assert numpy.all(output.eval() == inputs)\n\n def test_get_output_input_is_a_mapping(self, layer, get_output):\n inputs = {layer: theano.tensor.matrix()}\n assert get_output(layer, inputs) is inputs[layer]\n\n\nclass TestGetOutput_Layer:\n @pytest.fixture\n def get_output(self):\n from lasagne.layers.helper import get_output\n return get_output\n\n @pytest.fixture\n def layers(self):\n from lasagne.layers.base import Layer\n from lasagne.layers.input import InputLayer\n # create a mock that has the same attributes as an InputLayer instance\n l1 = Mock(InputLayer((None,)), output_shape=(None,),\n get_output_kwargs=[])\n # create a mock that has the same attributes as a Layer instance\n l2 = Mock(Layer(l1), output_shape=(None,), get_output_kwargs=[])\n # link it to the InputLayer mock\n l2.input_layer = l1\n # create another mock that has the same attributes as a Layer instance\n l3 = Mock(Layer(l2), output_shape=(None,), get_output_kwargs=['kwarg'])\n # link it to the first mock, to get an \"l1 --> l2 --> l3\" chain\n l3.input_layer = l2\n return l1, l2, l3\n\n def test_get_output_without_arguments(self, layers, get_output):\n l1, l2, l3 = layers\n output = get_output(l3)\n # expected: l3.get_output_for(l2.get_output_for(l1.input_var))\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with(\n l2.get_output_for.return_value)\n l2.get_output_for.assert_called_with(\n l1.input_var)\n\n def test_get_output_with_single_argument(self, layers, get_output):\n l1, l2, l3 = layers\n inputs, kwarg = theano.tensor.matrix(), object()\n output = get_output(l3, inputs, kwarg=kwarg)\n # expected: l3.get_output_for(l2.get_output_for(inputs, kwarg=kwarg),\n # kwarg=kwarg)\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with(\n l2.get_output_for.return_value, kwarg=kwarg)\n l2.get_output_for.assert_called_with(\n inputs, kwarg=kwarg)\n\n def test_get_output_input_is_a_mapping(self, layers, get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1).input_var = p\n inputs = {l3: theano.tensor.matrix()}\n # expected: inputs[l3]\n assert get_output(l3, inputs) is inputs[l3]\n # l3.get_output_for, l2.get_output_for should not have been called\n assert l3.get_output_for.call_count == 0\n assert l2.get_output_for.call_count == 0\n # l1.input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_input_is_a_mapping_no_key(self, layers, get_output):\n l1, l2, l3 = layers\n output = get_output(l3, {})\n # expected: l3.get_output_for(l2.get_output_for(l1.input_var))\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with(\n l2.get_output_for.return_value)\n l2.get_output_for.assert_called_with(\n l1.input_var)\n\n def test_get_output_input_is_a_mapping_to_array(self, layers, get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1).input_var = p\n inputs = {l3: [[1, 2, 3]]}\n output = get_output(l3, inputs)\n # expected: inputs[l3]\n assert numpy.all(output.eval() == inputs[l3])\n # l3.get_output_for, l2.get_output_for should not have been called\n assert l3.get_output_for.call_count == 0\n assert l2.get_output_for.call_count == 0\n # l1.input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_input_is_a_mapping_for_layer(self, layers, get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1).input_var = p\n input_expr, kwarg = theano.tensor.matrix(), object()\n inputs = {l2: input_expr}\n output = get_output(l3, inputs, kwarg=kwarg)\n # expected: l3.get_output_for(input_expr, kwarg=kwarg)\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with(input_expr, kwarg=kwarg)\n # l2.get_output_for should not have been called\n assert l2.get_output_for.call_count == 0\n # l1.input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_input_is_a_mapping_for_input_layer(self, layers,\n get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1).input_var = p\n input_expr, kwarg = theano.tensor.matrix(), object()\n inputs = {l1: input_expr}\n output = get_output(l3, inputs, kwarg=kwarg)\n # expected: l3.get_output_for(l2.get_output_for(input_expr,\n # kwarg=kwarg),\n # kwarg=kwarg)\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with(\n l2.get_output_for.return_value, kwarg=kwarg)\n l2.get_output_for.assert_called_with(\n input_expr, kwarg=kwarg)\n # l1.input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_with_unused_kwarg(self, layers, get_output):\n l1, l2, l3 = layers\n unused_kwarg = object()\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n get_output(l3, kwagg=unused_kwarg)\n assert len(w) == 1\n assert issubclass(w[0].category, UserWarning)\n assert 'perhaps you meant kwarg' in str(w[0].message)\n\n def test_get_output_with_no_unused_kwarg(self, layers, get_output):\n l1, l2, l3 = layers\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n get_output(l3)\n assert len(w) == 0\n\n @pytest.fixture\n def layer_from_shape(self):\n from lasagne.layers.base import Layer\n return Layer((None, 20))\n\n def test_layer_from_shape_invalid_get_output(self, layer_from_shape,\n get_output):\n layer = layer_from_shape\n with pytest.raises(ValueError):\n get_output(layer)\n with pytest.raises(ValueError):\n get_output(layer, [1, 2])\n with pytest.raises(ValueError):\n get_output(layer, {Mock(): [1, 2]})\n\n def test_layer_from_shape_valid_get_output(self, layer_from_shape,\n get_output):\n layer = layer_from_shape\n inputs = {layer: theano.tensor.matrix()}\n assert get_output(layer, inputs) is inputs[layer]\n inputs = {None: theano.tensor.matrix()}\n layer.get_output_for = Mock()\n assert get_output(layer, inputs) is layer.get_output_for.return_value\n layer.get_output_for.assert_called_with(inputs[None])\n\n\nclass TestGetOutput_MergeLayer:\n @pytest.fixture\n def get_output(self):\n from lasagne.layers.helper import get_output\n return get_output\n\n @pytest.fixture\n def layers(self):\n from lasagne.layers.base import Layer, MergeLayer\n from lasagne.layers.input import InputLayer\n # create two mocks of the same attributes as an InputLayer instance\n l1 = [Mock(InputLayer((None,)), output_shape=(None,),\n get_output_kwargs=[]),\n Mock(InputLayer((None,)), output_shape=(None,),\n get_output_kwargs=[])]\n # create two mocks of the same attributes as a Layer instance\n l2 = [Mock(Layer(l1[0]), output_shape=(None,),\n get_output_kwargs=[]),\n Mock(Layer(l1[1]), output_shape=(None,),\n get_output_kwargs=[])]\n # link them to the InputLayer mocks\n l2[0].input_layer = l1[0]\n l2[1].input_layer = l1[1]\n # create a mock that has the same attributes as a MergeLayer\n l3 = Mock(MergeLayer(l2), get_output_kwargs=['kwarg'])\n # link it to the two layer mocks, to get the following network:\n # l1[0] --> l2[0] --> l3\n # l1[1] --> l2[1] ----^\n l3.input_layers = l2\n return l1, l2, l3\n\n def test_get_output_without_arguments(self, layers, get_output):\n l1, l2, l3 = layers\n output = get_output(l3)\n # expected: l3.get_output_for([l2[0].get_output_for(l1[0].input_var),\n # l2[1].get_output_for(l1[1].input_var)])\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with([\n l2[0].get_output_for.return_value,\n l2[1].get_output_for.return_value,\n ])\n l2[0].get_output_for.assert_called_with(\n l1[0].input_var)\n l2[1].get_output_for.assert_called_with(\n l1[1].input_var)\n\n def test_get_output_with_single_argument_fails(self, layers, get_output):\n l1, l2, l3 = layers\n inputs, kwarg = theano.tensor.matrix(), object()\n # expected to fail: only gave one expression for two input layers\n with pytest.raises(ValueError):\n output = get_output(l3, inputs, kwarg=kwarg)\n\n def test_get_output_input_is_a_mapping(self, layers, get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1[0]).input_var = p\n type(l1[1]).input_var = p\n inputs = {l3: theano.tensor.matrix()}\n # expected: inputs[l3]\n assert get_output(l3, inputs) is inputs[l3]\n # l3.get_output_for, l2[*].get_output_for should not have been called\n assert l3.get_output_for.call_count == 0\n assert l2[0].get_output_for.call_count == 0\n assert l2[1].get_output_for.call_count == 0\n # l1[*].input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_input_is_a_mapping_no_key(self, layers, get_output):\n l1, l2, l3 = layers\n output = get_output(l3, {})\n # expected: l3.get_output_for([l2[0].get_output_for(l1[0].input_var),\n # l2[1].get_output_for(l1[1].input_var)])\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with([\n l2[0].get_output_for.return_value,\n l2[1].get_output_for.return_value,\n ])\n l2[0].get_output_for.assert_called_with(\n l1[0].input_var)\n l2[1].get_output_for.assert_called_with(\n l1[1].input_var)\n\n def test_get_output_input_is_a_mapping_to_array(self, layers, get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1[0]).input_var = p\n type(l1[1]).input_var = p\n inputs = {l3: [[1, 2, 3]]}\n output = get_output(l3, inputs)\n # expected: inputs[l3]\n assert numpy.all(output.eval() == inputs[l3])\n # l3.get_output_for, l2[*].get_output_for should not have been called\n assert l3.get_output_for.call_count == 0\n assert l2[0].get_output_for.call_count == 0\n assert l2[1].get_output_for.call_count == 0\n # l1[*].input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_input_is_a_mapping_for_layer(self, layers, get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1[0]).input_var = p\n input_expr, kwarg = theano.tensor.matrix(), object()\n inputs = {l2[0]: input_expr}\n output = get_output(l3, inputs, kwarg=kwarg)\n # expected: l3.get_output_for([input_expr,\n # l2[1].get_output_for(l1[1].input_var,\n # kwarg=kwarg)],\n # kwarg=kwarg)\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with([\n input_expr,\n l2[1].get_output_for.return_value,\n ], kwarg=kwarg)\n l2[1].get_output_for.assert_called_with(\n l1[1].input_var, kwarg=kwarg)\n # l2[0].get_output_for should not have been called\n assert l2[0].get_output_for.call_count == 0\n # l1[0].input_var should not have been accessed\n assert p.call_count == 0\n\n def test_get_output_input_is_a_mapping_for_input_layer(self, layers,\n get_output):\n l1, l2, l3 = layers\n p = PropertyMock()\n type(l1[0]).input_var = p\n input_expr, kwarg = theano.tensor.matrix(), object()\n inputs = {l1[0]: input_expr}\n output = get_output(l3, inputs, kwarg=kwarg)\n # expected: l3.get_output_for([l2[0].get_output_for(input_expr,\n # kwarg=kwarg),\n # l2[1].get_output_for(l1[1].input_var,\n # kwarg=kwarg)],\n # kwarg=kwarg)\n assert output is l3.get_output_for.return_value\n l3.get_output_for.assert_called_with([\n l2[0].get_output_for.return_value,\n l2[1].get_output_for.return_value,\n ], kwarg=kwarg)\n l2[0].get_output_for.assert_called_with(\n input_expr, kwarg=kwarg)\n l2[1].get_output_for.assert_called_with(\n l1[1].input_var, kwarg=kwarg)\n # l1[0].input_var should not have been accessed\n assert p.call_count == 0\n\n @pytest.fixture\n def layer_from_shape(self):\n from lasagne.layers.input import InputLayer\n from lasagne.layers.base import MergeLayer\n return MergeLayer([\n (None, 20),\n Mock(InputLayer((None,)), output_shape=(None,))])\n\n def test_layer_from_shape_invalid_get_output(self, layer_from_shape,\n get_output):\n layer = layer_from_shape\n with pytest.raises(ValueError):\n get_output(layer)\n with pytest.raises(ValueError):\n get_output(layer, [1, 2])\n with pytest.raises(ValueError):\n get_output(layer, {layer.input_layers[1]: [1, 2]})\n\n def test_layer_from_shape_valid_get_output(self, layer_from_shape,\n get_output):\n layer = layer_from_shape\n inputs = {layer: theano.tensor.matrix()}\n assert get_output(layer, inputs) is inputs[layer]\n inputs = {None: theano.tensor.matrix()}\n layer.get_output_for = Mock()\n assert get_output(layer, inputs) is layer.get_output_for.return_value\n layer.get_output_for.assert_called_with(\n [inputs[None], layer.input_layers[1].input_var])\n\n\nclass TestGetOutputShape_InputLayer:\n @pytest.fixture\n def get_output_shape(self):\n from lasagne.layers.helper import get_output_shape\n return get_output_shape\n\n @pytest.fixture\n def layer(self):\n from lasagne.layers.input import InputLayer\n return InputLayer((3, 2))\n\n def test_get_output_shape_without_arguments(self, layer, get_output_shape):\n assert get_output_shape(layer) == (3, 2)\n\n def test_get_output_shape_input_is_tuple(self, layer, get_output_shape):\n shp = (4, 5, 6)\n assert get_output_shape(layer, shp) == shp\n\n def test_get_output_shape_input_is_a_mapping(self, layer,\n get_output_shape):\n input_shapes = {layer: (4, 5, 6)}\n assert get_output_shape(layer, input_shapes) == input_shapes[layer]\n\n\nclass TestGetOutputShape_Layer:\n @pytest.fixture\n def get_output_shape(self):\n from lasagne.layers.helper import get_output_shape\n return get_output_shape\n\n @pytest.fixture\n def layers(self):\n from lasagne.layers.base import Layer\n from lasagne.layers.input import InputLayer\n # create a mock that has the same attributes as an InputLayer instance\n l1 = Mock(InputLayer((None,)), output_shape=(None,))\n # create a mock that has the same attributes as a Layer instance\n l2 = Mock(Layer(l1), output_shape=(None,))\n # link it to the InputLayer mock\n l2.input_layer = l1\n # create another mock that has the same attributes as a Layer instance\n l3 = Mock(Layer(l2), output_shape=(None,))\n # link it to the first mock, to get an \"l1 --> l2 --> l3\" chain\n l3.input_layer = l2\n return l1, l2, l3\n\n def test_get_output_shape_without_arguments(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n output_shape = get_output_shape(l3)\n # expected: l3.output_shape\n assert output_shape is l3.output_shape\n # l3.get_output_shape_for, l2.get_output_shape_for should not have been\n # called\n assert l3.get_output_shape_for.call_count == 0\n assert l2.get_output_shape_for.call_count == 0\n\n def test_get_output_shape_with_single_argument(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n shp = (3, 4, 5)\n output_shape = get_output_shape(l3, shp)\n # expected: l3.get_output_shape_for(l2.get_output_shape_for(shp))\n assert output_shape is l3.get_output_shape_for.return_value\n l3.get_output_shape_for.assert_called_with(\n l2.get_output_shape_for.return_value)\n l2.get_output_shape_for.assert_called_with(shp)\n\n def test_get_output_shape_input_is_a_mapping(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n input_shapes = {l3: (4, 5, 6)}\n # expected: input_shapes[l3]\n assert get_output_shape(l3, input_shapes) is input_shapes[l3]\n # l3.get_output_shape_for, l2.get_output_shape_for should not have been\n # called\n assert l3.get_output_shape_for.call_count == 0\n assert l2.get_output_shape_for.call_count == 0\n\n def test_get_output_shape_input_is_a_mapping_no_key(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n output_shape = get_output_shape(l3, {})\n # expected: l3.output_shape\n assert output_shape is l3.output_shape\n # l3.get_output_shape_for, l2.get_output_shape_for should not have been\n # called\n assert l3.get_output_shape_for.call_count == 0\n assert l2.get_output_shape_for.call_count == 0\n\n def test_get_output_shape_input_is_a_mapping_for_layer(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n shp = (4, 5, 6)\n input_shapes = {l2: shp}\n output_shape = get_output_shape(l3, input_shapes)\n # expected: l3.get_output_shape_for(shp)\n assert output_shape is l3.get_output_shape_for.return_value\n l3.get_output_shape_for.assert_called_with(shp)\n # l2.get_output_shape_for should not have been called\n assert l2.get_output_shape_for.call_count == 0\n\n def test_get_output_shape_input_is_a_mapping_for_input_layer(\n self, layers, get_output_shape):\n l1, l2, l3 = layers\n shp = (4, 5, 6)\n input_shapes = {l1: shp}\n output_shape = get_output_shape(l3, input_shapes)\n # expected: l3.get_output_shape_for(l2.get_output_shape_for(shp))\n assert output_shape is l3.get_output_shape_for.return_value\n l3.get_output_shape_for.assert_called_with(\n l2.get_output_shape_for.return_value)\n l2.get_output_shape_for.assert_called_with(shp)\n\n @pytest.fixture\n def layer_from_shape(self):\n from lasagne.layers.base import Layer\n return Layer((None, 20))\n\n def test_layer_from_shape(self, layer_from_shape, get_output_shape):\n layer = layer_from_shape\n input_shapes = {layer: (4, 5, 6)}\n assert get_output_shape(layer, input_shapes) is input_shapes[layer]\n input_shapes = {None: (4, 5, 6)}\n layer.get_output_shape_for = Mock()\n assert (get_output_shape(layer, input_shapes) is\n layer.get_output_shape_for.return_value)\n layer.get_output_shape_for.assert_called_with(input_shapes[None])\n\n\nclass TestGetOutputShape_MergeLayer:\n @pytest.fixture\n def get_output_shape(self):\n from lasagne.layers.helper import get_output_shape\n return get_output_shape\n\n @pytest.fixture\n def layers(self):\n from lasagne.layers.base import Layer, MergeLayer\n from lasagne.layers.input import InputLayer\n # create two mocks of the same attributes as an InputLayer instance\n l1 = [Mock(InputLayer((None,)), output_shape=(None,)),\n Mock(InputLayer((None,)), output_shape=(None,))]\n # create two mocks of the same attributes as a Layer instance\n l2 = [Mock(Layer(l1[0]), output_shape=(None,)),\n Mock(Layer(l1[1]), output_shape=(None,))]\n # link them to the InputLayer mocks\n l2[0].input_layer = l1[0]\n l2[1].input_layer = l1[1]\n # create a mock that has the same attributes as a MergeLayer\n l3 = Mock(MergeLayer(l2))\n # link it to the two layer mocks, to get the following network:\n # l1[0] --> l2[0] --> l3\n # l1[1] --> l2[1] ----^\n l3.input_layers = l2\n return l1, l2, l3\n\n def test_get_output_shape_without_arguments(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n output_shape = get_output_shape(l3)\n # expected: l3.output_shape\n assert output_shape is l3.output_shape\n # l3.get_output_shape_for, l2[*].get_output_shape_for should not have\n # been called\n assert l3.get_output_shape_for.call_count == 0\n assert l2[0].get_output_shape_for.call_count == 0\n assert l2[1].get_output_shape_for.call_count == 0\n\n def test_get_output_shape_with_single_argument_fails(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n shp = (4, 5, 6)\n # expected to fail: only gave one shape tuple for two input layers\n with pytest.raises(ValueError):\n output_shape = get_output_shape(l3, shp)\n\n def test_get_output_shape_input_is_a_mapping(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n input_shapes = {l3: (4, 5, 6)}\n # expected: input_shapes[l3]\n assert get_output_shape(l3, input_shapes) is input_shapes[l3]\n # l3.get_output_shape_for, l2[*].get_output_shape_for should not have\n # been called\n assert l3.get_output_shape_for.call_count == 0\n assert l2[0].get_output_shape_for.call_count == 0\n assert l2[1].get_output_shape_for.call_count == 0\n\n def test_get_output_shape_input_is_a_mapping_no_key(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n output_shape = get_output_shape(l3, {})\n # expected: l3.output_shape\n assert output_shape is l3.output_shape\n # l3.get_output_shape_for, l2[*].get_output_shape_for should not have\n # been called\n assert l3.get_output_shape_for.call_count == 0\n assert l2[0].get_output_shape_for.call_count == 0\n assert l2[1].get_output_shape_for.call_count == 0\n\n def test_get_output_shape_input_is_a_mapping_for_layer(self, layers,\n get_output_shape):\n l1, l2, l3 = layers\n shp = (4, 5, 6)\n input_shapes = {l2[0]: shp}\n output = get_output_shape(l3, input_shapes)\n # expected: l3.get_output_shape_for(\n # [shp, l2[1].get_output_shape_for(l1[1].shape)])\n assert output is l3.get_output_shape_for.return_value\n l3.get_output_shape_for.assert_called_with([\n shp, l2[1].get_output_shape_for.return_value])\n l2[1].get_output_shape_for.assert_called_with(l1[1].shape)\n # l2[0].get_output_shape_for should not have been called\n assert l2[0].get_output_shape_for.call_count == 0\n\n def test_get_output_shape_input_is_a_mapping_for_input_layer(\n self, layers, get_output_shape):\n l1, l2, l3 = layers\n shp = (4, 5, 6)\n input_shapes = {l1[0]: shp}\n output = get_output_shape(l3, input_shapes)\n # expected: l3.get_output_shape_for(\n # [l2[0].get_output_shape_for(shp),\n # l2[1].get_output_shape_for(l1[1].shape)])\n assert output is l3.get_output_shape_for.return_value\n l3.get_output_shape_for.assert_called_with([\n l2[0].get_output_shape_for.return_value,\n l2[1].get_output_shape_for.return_value,\n ])\n l2[0].get_output_shape_for.assert_called_with(shp)\n l2[1].get_output_shape_for.assert_called_with(l1[1].shape)\n\n @pytest.fixture\n def layer_from_shape(self):\n from lasagne.layers.input import InputLayer\n from lasagne.layers.base import MergeLayer\n return MergeLayer([\n (None, 20),\n Mock(InputLayer((None,)), output_shape=(None,))])\n\n def test_layer_from_shape_valid_get_output_shape(self, layer_from_shape,\n get_output_shape):\n layer = layer_from_shape\n input_shapes = {layer: (4, 5, 6)}\n assert get_output_shape(layer, input_shapes) is input_shapes[layer]\n input_shapes = {None: (4, 5, 6)}\n layer.get_output_shape_for = Mock()\n assert (get_output_shape(layer, input_shapes) is\n layer.get_output_shape_for.return_value)\n layer.get_output_shape_for.assert_called_with(\n [input_shapes[None], layer.input_layers[1].shape])\n\n\nclass TestGetAllParams:\n def test_get_all_params(self):\n from lasagne.layers import (InputLayer, DenseLayer, get_all_params)\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n\n assert get_all_params(l3) == l2.get_params() + l3.get_params()\n assert (get_all_params(l3, regularizable=False) ==\n (l2.get_params(regularizable=False) +\n l3.get_params(regularizable=False)))\n\n assert (get_all_params(l3, regularizable=True) ==\n (l2.get_params(regularizable=True) +\n l3.get_params(regularizable=True)))\n\n\nclass TestCountParams:\n def test_get_all_params(self):\n from lasagne.layers import (InputLayer, DenseLayer, count_params)\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n\n num_weights = 20 * 30 + 30 * 40\n num_biases = 30 + 40\n\n assert count_params(l3, regularizable=True) == num_weights\n assert count_params(l3, regularizable=False) == num_biases\n assert count_params(l3) == num_weights + num_biases\n\n\nclass TestGetAllParamValues:\n def test_get_all_param_values(self):\n from lasagne.layers import (InputLayer, DenseLayer,\n get_all_param_values)\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n\n pvs = get_all_param_values(l3)\n assert len(pvs) == 4\n\n\nclass TestSetAllParamValues:\n def test_set_all_param_values(self):\n from lasagne.layers import (InputLayer, DenseLayer,\n set_all_param_values)\n from lasagne.utils import floatX\n\n l1 = InputLayer((10, 20))\n l2 = DenseLayer(l1, 30)\n l3 = DenseLayer(l2, 40)\n\n a2 = floatX(numpy.random.normal(0, 1, (20, 30)))\n b2 = floatX(numpy.random.normal(0, 1, (30,)))\n a3 = floatX(numpy.random.normal(0, 1, (30, 40)))\n b3 = floatX(numpy.random.normal(0, 1, (40,)))\n set_all_param_values(l3, [a2, b2, a3, b3])\n assert numpy.allclose(l3.W.get_value(), a3)\n assert numpy.allclose(l3.b.get_value(), b3)\n assert numpy.allclose(l2.W.get_value(), a2)\n assert numpy.allclose(l2.b.get_value(), b2)\n\n with pytest.raises(ValueError):\n set_all_param_values(l3, [a3, b3, a2])\n\n with pytest.raises(ValueError):\n a3_bad = floatX(numpy.random.normal(0, 1, (25, 40)))\n set_all_param_values(l3, [a2, b2, a3_bad, b3])\n"
]
| [
[
"numpy.random.normal"
]
]
|
nixingyang/ShuffleBits | [
"46f1b2ff92d506dae9eba5f269c9ce9eca71dc72"
]
| [
"datasets/market1501.py"
]
| [
"import glob\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.io import loadmat\n\n\ndef _load_accumulated_info(root_folder_path,\n dataset_folder_name=\"Market-1501-v15.09.15\",\n image_folder_name=\"bounding_box_train\"):\n \"\"\"\n References:\n https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view\n https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive\n gdrive download 0B8-rUzbwVRk0c054eEozWG9COHM\n 7za x Market-1501-v15.09.15.zip\n sha256sum Market-1501-v15.09.15.zip\n 416bb77b5a2449b32e936f623cbee58becf1a9e7e936f36380cb8f9ab928fe96 Market-1501-v15.09.15.zip\n \"\"\"\n dataset_folder_path = os.path.join(root_folder_path, dataset_folder_name)\n image_folder_path = os.path.join(dataset_folder_path, image_folder_name)\n\n image_file_path_list = sorted(\n glob.glob(os.path.join(image_folder_path, \"*.jpg\")))\n if image_folder_name == \"bounding_box_train\":\n assert len(image_file_path_list) == 12936\n elif image_folder_name == \"bounding_box_test\":\n assert len(image_file_path_list) == 19732\n elif image_folder_name == \"query\":\n assert len(image_file_path_list) == 3368\n else:\n assert False, \"{} is an invalid argument!\".format(image_folder_name)\n\n # Improving Person Re-identification by Attribute and Identity Learning\n # https://github.com/vana77/Market-1501_Attribute\n attribute_file_path = os.path.join(dataset_folder_path,\n \"Market-1501_Attribute\",\n \"market_attribute.mat\")\n attribute_file_content = loadmat(attribute_file_path)[\"market_attribute\"][0,\n 0]\n train_attribute_file_content, test_attribute_file_content = attribute_file_content[\n \"train\"], attribute_file_content[\"test\"]\n assert sorted(train_attribute_file_content.dtype.names) == sorted(\n test_attribute_file_content.dtype.names)\n attribute_name_list = sorted(train_attribute_file_content.dtype.names)\n attribute_name_list.remove(\"image_index\")\n identity_IDs, attribute_values = [], []\n for split_attribute_file_content in (train_attribute_file_content,\n test_attribute_file_content):\n identity_IDs.append(\n split_attribute_file_content[\"image_index\"][0, 0].flatten().astype(\n np.int))\n attribute_values.append(\n np.swapaxes(\n np.vstack([\n split_attribute_file_content[attribute_name][0,\n 0].flatten()\n for attribute_name in attribute_name_list\n ]), 0, 1))\n identity_IDs, attribute_values = np.hstack(\n identity_IDs).tolist(), np.vstack(attribute_values)\n\n accumulated_info_list = []\n for image_file_path in image_file_path_list:\n # Extract identity_ID\n image_file_name = image_file_path.split(os.sep)[-1]\n identity_ID = int(image_file_name.split(\"_\")[0])\n if identity_ID == -1:\n # Ignore junk images\n # https://github.com/Cysu/open-reid/issues/16\n # https://github.com/michuanhaohao/reid-strong-baseline/blob/\\\n # 69348ceb539fc4bafd006575f7bd432a4d08b9e6/data/datasets/market1501.py#L71\n continue\n\n # Extract camera_ID\n cam_seq_ID = image_file_name.split(\"_\")[1]\n camera_ID = int(cam_seq_ID[1])\n\n # Append the records\n accumulated_info = {\n \"image_file_path\": image_file_path,\n \"identity_ID\": identity_ID,\n \"camera_ID\": camera_ID\n }\n try:\n attribute_index = identity_IDs.index(identity_ID)\n for attribute_name, attribute_value in zip(\n attribute_name_list, attribute_values[attribute_index]):\n accumulated_info[attribute_name] = attribute_value\n except ValueError:\n pass\n finally:\n accumulated_info_list.append(accumulated_info)\n\n # Convert list to data frame\n accumulated_info_dataframe = pd.DataFrame(accumulated_info_list)\n return accumulated_info_dataframe\n\n\ndef load_Market1501(root_folder_path):\n train_and_valid_accumulated_info_dataframe = _load_accumulated_info(\n root_folder_path=root_folder_path,\n image_folder_name=\"bounding_box_train\")\n test_gallery_accumulated_info_dataframe = _load_accumulated_info(\n root_folder_path=root_folder_path,\n image_folder_name=\"bounding_box_test\")\n test_query_accumulated_info_dataframe = _load_accumulated_info(\n root_folder_path=root_folder_path, image_folder_name=\"query\")\n return train_and_valid_accumulated_info_dataframe, test_query_accumulated_info_dataframe, test_gallery_accumulated_info_dataframe\n"
]
| [
[
"pandas.DataFrame",
"scipy.io.loadmat",
"numpy.hstack",
"numpy.vstack"
]
]
|
ahayul/op4 | [
"b8b5e2747a00af6eaa2deca999a5a19590095f95"
]
| [
"selfdrive/controls/lib/lateral_planner.py"
]
| [
"import os\nimport math\nimport numpy as np\nfrom common.params import Params\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom common.numpy_fast import interp, clip\nfrom selfdrive.car.hyundai.values import CAR\nfrom selfdrive.ntune import ntune_common_get, ntune_common_enabled\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.controls.lib.lateral_mpc import libmpc_py\nfrom selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS\nfrom selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\nfrom selfdrive.config import Conversions as CV\nimport cereal.messaging as messaging\nfrom cereal import log\n\nAUTO_LCA_START_TIME = 1.0\n\nLaneChangeState = log.LateralPlan.LaneChangeState\nLaneChangeDirection = log.LateralPlan.LaneChangeDirection\n\nLOG_MPC = os.environ.get('LOG_MPC', False)\n\nLANE_CHANGE_SPEED_MIN = 60 * CV.KPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass LateralPlanner():\n def __init__(self, CP, use_lanelines=True, wide_camera=False):\n self.use_lanelines = use_lanelines\n self.LP = LanePlanner(wide_camera)\n\n self.last_cloudlog_t = 0\n\n self.setup_mpc()\n self.solution_invalid_cnt = 0\n\n self.lane_change_enabled = Params().get_bool('LaneChangeEnabled')\n self.auto_lane_change_enabled = Params().get_bool('AutoLaneChangeEnabled')\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.keep_pulse_timer = 0.0\n self.prev_one_blinker = False\n self.desire = log.LateralPlan.Desire.none\n\n self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))\n self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))\n self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))\n self.t_idxs = np.arange(TRAJECTORY_SIZE)\n self.y_pts = np.zeros(TRAJECTORY_SIZE)\n\n self.auto_lane_change_timer = 0.0\n self.prev_torque_applied = False\n self.steerRatio = 0.0\n self.wide_camera = wide_camera\n\n def setup_mpc(self):\n self.libmpc = libmpc_py.libmpc\n self.libmpc.init()\n\n self.mpc_solution = libmpc_py.ffi.new(\"log_t *\")\n self.cur_state = libmpc_py.ffi.new(\"state_t *\")\n self.cur_state[0].x = 0.0\n self.cur_state[0].y = 0.0\n self.cur_state[0].psi = 0.0\n self.cur_state[0].curvature = 0.0\n\n self.desired_curvature = 0.0\n self.safe_desired_curvature = 0.0\n self.desired_curvature_rate = 0.0\n self.safe_desired_curvature_rate = 0.0\n\n def update(self, sm, CP):\n v_ego = sm['carState'].vEgo\n active = sm['controlsState'].active\n measured_curvature = sm['controlsState'].curvature\n\n md = sm['modelV2']\n self.LP.parse_model(sm['modelV2'])\n if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:\n self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])\n\n cameraOffset = ntune_common_get(\"cameraOffset\") + 0.08 if self.wide_camera else ntune_common_get(\"cameraOffset\")\n self.path_xyz[:, 1] -= cameraOffset\n\n self.t_idxs = np.array(md.position.t)\n self.plan_yaw = list(md.orientation.z)\n if len(md.orientation.xStd) == TRAJECTORY_SIZE:\n self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])\n\n # Lane change logic\n one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker\n below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN\n\n if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not one_blinker) or (not self.lane_change_enabled):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n else:\n torque_applied = sm['carState'].steeringPressed and \\\n ((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right)) or \\\n self.auto_lane_change_enabled and \\\n (AUTO_LCA_START_TIME+0.25) > self.auto_lane_change_timer > AUTO_LCA_START_TIME\n\n blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n\n lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob\n\n # State transitions\n # off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n if sm['carState'].leftBlinker:\n self.lane_change_direction = LaneChangeDirection.left\n elif sm['carState'].rightBlinker:\n self.lane_change_direction = LaneChangeDirection.right\n\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n\n # pre\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n if not one_blinker or below_lane_change_speed:\n self.lane_change_state = LaneChangeState.off\n elif torque_applied and (not blindspot_detected or self.prev_torque_applied):\n self.lane_change_state = LaneChangeState.laneChangeStarting\n elif torque_applied and blindspot_detected and self.auto_lane_change_timer != 10.0:\n self.auto_lane_change_timer = 10.0\n elif not torque_applied and self.auto_lane_change_timer == 10.0 and not self.prev_torque_applied:\n self.prev_torque_applied = True\n\n # starting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)\n # 98% certainty\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n\n # finishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n if one_blinker and self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.preLaneChange\n elif self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.off\n\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n if self.lane_change_state == LaneChangeState.off:\n self.auto_lane_change_timer = 0.0\n self.prev_torque_applied = False\n elif self.auto_lane_change_timer < (AUTO_LCA_START_TIME+0.25): # stop afer 3 sec resume from 10 when torque applied\n self.auto_lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Send keep pulse once per second during LaneChangeStart.preLaneChange\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:\n self.keep_pulse_timer = 0.0\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n self.keep_pulse_timer += DT_MDL\n if self.keep_pulse_timer > 1.0:\n self.keep_pulse_timer = 0.0\n elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:\n self.desire = log.LateralPlan.Desire.none\n\n # Turn off lanes during lane change\n if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:\n self.LP.lll_prob *= self.lane_change_ll_prob\n self.LP.rll_prob *= self.lane_change_ll_prob\n if self.use_lanelines:\n d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)\n self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, ntune_common_get('steerRateCost'))\n else:\n d_path_xyz = self.path_xyz\n path_cost = np.clip(abs(self.path_xyz[0, 1] / self.path_xyz_stds[0, 1]), 0.5, 5.0) * MPC_COST_LAT.PATH\n # Heading cost is useful at low speed, otherwise end of plan can be off-heading\n heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])\n self.libmpc.set_weights(path_cost, heading_cost, ntune_common_get('steerRateCost'))\n\n y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])\n heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)\n self.y_pts = y_pts\n\n assert len(y_pts) == LAT_MPC_N + 1\n assert len(heading_pts) == LAT_MPC_N + 1\n # for now CAR_ROTATION_RADIUS is disabled\n # to use it, enable it in the MPC\n assert abs(CAR_ROTATION_RADIUS) < 1e-3\n self.libmpc.run_mpc(self.cur_state, self.mpc_solution,\n float(v_ego),\n CAR_ROTATION_RADIUS,\n list(y_pts),\n list(heading_pts))\n # init state for next\n self.cur_state.x = 0.0\n self.cur_state.y = 0.0\n self.cur_state.psi = 0.0\n self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)\n\n # Check for infeasable MPC solution\n mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)\n t = sec_since_boot()\n if mpc_nans:\n self.libmpc.init()\n self.cur_state.curvature = measured_curvature\n\n if t > self.last_cloudlog_t + 5.0:\n self.last_cloudlog_t = t\n cloudlog.warning(\"Lateral mpc - nan: True\")\n\n if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge\n self.solution_invalid_cnt += 1\n else:\n self.solution_invalid_cnt = 0\n\n def publish(self, sm, pm):\n plan_solution_valid = self.solution_invalid_cnt < 2\n plan_send = messaging.new_message('lateralPlan')\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])\n plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)\n plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]\n plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]\n plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]\n plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]\n plan_send.lateralPlan.lProb = float(self.LP.lll_prob)\n plan_send.lateralPlan.rProb = float(self.LP.rll_prob)\n plan_send.lateralPlan.dProb = float(self.LP.d_prob)\n\n plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)\n\n plan_send.lateralPlan.desire = self.desire\n plan_send.lateralPlan.laneChangeState = self.lane_change_state\n plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction\n plan_send.lateralPlan.autoLaneChangeEnabled = self.auto_lane_change_enabled\n plan_send.lateralPlan.autoLaneChangeTimer = int(AUTO_LCA_START_TIME) - int(self.auto_lane_change_timer)\n\n pm.send('lateralPlan', plan_send)\n\n if LOG_MPC:\n dat = messaging.new_message('liveMpc')\n dat.liveMpc.x = list(self.mpc_solution.x)\n dat.liveMpc.y = list(self.mpc_solution.y)\n dat.liveMpc.psi = list(self.mpc_solution.psi)\n dat.liveMpc.curvature = list(self.mpc_solution.curvature)\n dat.liveMpc.cost = self.mpc_solution.cost\n pm.send('liveMpc', dat)\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.column_stack"
]
]
|
black938/RelationExtractionProject | [
"aeb4475adbca2ca112684fbfe47bb0b9d6f527e4"
]
| [
"ServiceRelationExtraction/Attention.py"
]
| [
"import logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nlogger = logging.getLogger(__name__)\n\nclass DotAttention(nn.Module):\n def __init__(self, dropout=0.0):\n super(DotAttention, self).__init__()\n self.dropout = dropout\n\n def forward(self, Q, K, V, mask_out=None, head_mask=None):\n \"\"\"\n 一般输入信息 X 时,假设 K = V = X\n\n att_weight = softmax( score_func(q, k) )\n att = sum( att_weight * v )\n\n :param Q: [..., L, H]\n :param K: [..., S, H]\n :param V: [..., S, H]\n :param mask_out: [..., 1, S]\n :return:\n \"\"\"\n H = Q.size(-1)\n\n scale = float(H)**0.5\n attention_weight = torch.matmul(Q, K.transpose(-1, -2)) / scale\n\n if mask_out is not None:\n # 当 DotAttention 单独使用时(几乎不会),保证维度一样\n while mask_out.dim() != Q.dim():\n mask_out = mask_out.unsqueeze(1)\n attention_weight.masked_fill_(mask_out, -1e8)\n\n attention_weight = F.softmax(attention_weight, dim=-1)\n\n attention_weight = F.dropout(attention_weight, self.dropout)\n\n # mask heads if we want to:\n # multi head 才会使用\n if head_mask is not None:\n attention_weight = attention_weight * head_mask\n\n attention_out = torch.matmul(attention_weight, V)\n\n return attention_out, attention_weight\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, embed_dim, num_heads, dropout=0.0, output_attentions=True):\n \"\"\"\n :param embed_dim: 输入的维度,必须能被 num_heads 整除\n :param num_heads: attention 的个数\n :param dropout: float。\n \"\"\"\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.output_attentions = output_attentions\n self.head_dim = int(embed_dim / num_heads)\n self.all_head_dim = self.head_dim * num_heads\n assert self.all_head_dim == embed_dim, logger.error(\n f\"embed_dim{embed_dim} must be divisible by num_heads{num_heads}\")\n\n self.q_in = nn.Linear(embed_dim, self.all_head_dim)\n self.k_in = nn.Linear(embed_dim, self.all_head_dim)\n self.v_in = nn.Linear(embed_dim, self.all_head_dim)\n self.attention = DotAttention(dropout=dropout)\n self.out = nn.Linear(self.all_head_dim, embed_dim)\n\n def forward(self, Q, K, V, key_padding_mask=None, attention_mask=None, head_mask=None):\n \"\"\"\n :param Q: [B, L, Hs]\n :param K: [B, S, Hs]\n :param V: [B, S, Hs]\n :param key_padding_mask: [B, S] 为 1/True 的地方需要 mask\n :param attention_mask: [S] / [L, S] 指定位置 mask 掉, 为 1/True 的地方需要 mask\n :param head_mask: [N] 指定 head mask 掉, 为 1/True 的地方需要 mask\n \"\"\"\n B, L, Hs = Q.shape\n S = V.size(1)\n N, H = self.num_heads, self.head_dim\n\n q = self.q_in(Q).view(B, L, N, H).transpose(1, 2) # [B, N, L, H]\n k = self.k_in(K).view(B, S, N, H).transpose(1, 2) # [B, N, S, H]\n v = self.v_in(V).view(B, S, N, H).transpose(1, 2) # [B, N, S, H]\n\n if key_padding_mask is not None:\n key_padding_mask = key_padding_mask.ne(0)\n key_padding_mask = key_padding_mask.unsqueeze(1).unsqueeze(1)\n\n if attention_mask is not None:\n attention_mask = attention_mask.ne(0)\n if attention_mask.dim() == 1:\n attention_mask = attention_mask.unsqueeze(0)\n elif attention_mask.dim() == 2:\n attention_mask = attention_mask.unsqueeze(0).unsqueeze(0).expand(B, -1, -1, -1)\n else:\n raise ValueError(f'attention_mask dim must be 1 or 2, can not be {attention_mask.dim()}')\n\n if key_padding_mask is None:\n mask_out = attention_mask if attention_mask is not None else None\n else:\n mask_out = (key_padding_mask + attention_mask).ne(0) if attention_mask is not None else key_padding_mask\n\n if head_mask is not None:\n head_mask = head_mask.eq(0)\n head_mask = head_mask.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n\n attention_out, attention_weight = self.attention(q, k, v, mask_out=mask_out, head_mask=head_mask)\n\n attention_out = attention_out.transpose(1, 2).reshape(B, L, N * H) # [B, N, L, H] -> [B, L, N * H]\n\n # concat all heads, and do output linear\n attention_out = self.out(attention_out) # [B, L, N * H] -> [B, L, H]\n\n if self.output_attentions:\n return attention_out, attention_weight\n else:\n return attention_out,\n\n\nif __name__ == '__main__':\n from utils import seq_len_to_mask\n\n q = torch.randn(4, 6, 20) # [B, L, H]\n k = v = torch.randn(4, 5, 20) # [B, S, H]\n key_padding_mask = seq_len_to_mask([5, 4, 3, 2], max_len=5)\n attention_mask = torch.tensor([1, 0, 0, 1, 0]) # 为1 的地方 mask 掉\n head_mask = torch.tensor([0, 1]) # 为1 的地方 mask 掉\n\n m = MultiHeadAttention(embed_dim=20, num_heads=2, dropout=0.0, output_attentions=True)\n ao, aw = m(q, k, v, key_padding_mask=key_padding_mask, attention_mask=attention_mask, head_mask=head_mask)\n print(ao.shape, aw.shape) # [B, L, H] [B, N, L, S]\n print(ao)\n print(aw.unbind(1))\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.functional.dropout",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.matmul",
"torch.randn"
]
]
|
ReEn-Neom/ReEn.Neom-source-code- | [
"11ec834d5eac5a5a63c71f6b41107769dafc591c"
]
| [
"Manufacture/metal/trash_classifier-master/pi_trash_classifier.py"
]
| [
"import keras\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\n\nfrom keras.models import Model, load_model\nfrom keras.applications import mobilenet\nfrom keras.applications.mobilenet import preprocess_input\nfrom keras.preprocessing import image\nfrom keras.utils.generic_utils import CustomObjectScope\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport time\nimport cv2\nimport os\n\ndef pp_image():\n img = image.load_img('pic.png', target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n return np.asarray(x)\n\nprediction_list=['cardboard', 'glass', 'metal', 'paper', 'plastic', 'trash']\nmodel=load_model('models/model1.h5', custom_objects={'relu6': mobilenet.relu6})\n\ncamera = PiCamera()\nrawCapture=PiRGBArray(camera)\n\nfor i in range(10):\n time.sleep(0.5)\n\n try:\n import ipdb; ipdb.set_trace()\n # Access the image data.\n camera.capture(rawCapture, format='rgb')\n img=rawCapture.array\n cv2.imwrite('pic.png', img)\n #import ipdb; ipdb.set_trace()\n pred_img=pp_image()\n yo=model.predict(pred_img)\n pred=prediction_list[np.argmax(yo)]\n cv2.putText(img, pred, (10,1000), cv2.FONT_HERSHEY_SIMPLEX, 5, (0,0,0), 5, False)\n name='img'+str(i)+'.png'\n cv2.imwrite(os.path.join('prediction_images', name), img)\n rawCapture.truncate(0)\n #print(\"Gray value of first pixel: \", img[0, 0])\n except:\n print('Could not perform prediction')\n\ncamera.stop_preview()\n"
]
| [
[
"numpy.argmax",
"numpy.asarray",
"numpy.expand_dims"
]
]
|
junhoher/pytorch_geometric | [
"5abedb87d503ceeb8061acf74a702d4f019f778d"
]
| [
"torch_geometric/datasets/tu_dataset.py"
]
| [
"import os\nimport os.path as osp\nimport shutil\n\nimport torch\nfrom torch_geometric.data import InMemoryDataset, download_url, extract_zip\nfrom torch_geometric.io import read_tu_data\n\n\nclass TUDataset(InMemoryDataset):\n r\"\"\"A variety of graph kernel benchmark datasets, *.e.g.* \"IMDB-BINARY\",\n \"REDDIT-BINARY\" or \"PROTEINS\", collected from the `TU Dortmund University\n <http://graphkernels.cs.tu-dortmund.de>`_.\n In addition, this dataset wrapper provides `cleaned dataset versions\n <https://github.com/nd7141/graph_datasets>`_ as motivated by the\n `\"Understanding Isomorphism Bias in Graph Data Sets\"\n <https://arxiv.org/abs/1910.12091>`_ paper, containing only non-isomorphic\n graphs.\n\n .. note::\n Some datasets may not come with any node labels.\n You can then either make use of the argument :obj:`use_node_attr`\n to load additional continuous node attributes (if present) or provide\n synthetic node features using transforms such as\n like :class:`torch_geometric.transforms.Constant` or\n :class:`torch_geometric.transforms.OneHotDegree`.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The `name <http://graphkernels.cs.tu-dortmund.de>`_ of\n the dataset.\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n use_node_attr (bool, optional): If :obj:`True`, the dataset will\n contain additional continuous node attributes (if present).\n (default: :obj:`False`)\n use_edge_attr (bool, optional): If :obj:`True`, the dataset will\n contain additional continuous edge attributes (if present).\n (default: :obj:`False`)\n cleaned: (bool, optional): If :obj:`True`, the dataset will\n contain only non-isomorphic graphs. (default: :obj:`False`)\n \"\"\"\n\n url = ('https://ls11-www.cs.tu-dortmund.de/people/morris/'\n 'graphkerneldatasets')\n cleaned_url = ('https://raw.githubusercontent.com/nd7141/'\n 'graph_datasets/master/datasets')\n\n def __init__(self, root, name, transform=None, pre_transform=None,\n pre_filter=None, use_node_attr=False, use_edge_attr=False,\n cleaned=False):\n self.name = name\n self.cleaned = cleaned\n super(TUDataset, self).__init__(root, transform, pre_transform,\n pre_filter)\n self.data, self.slices = torch.load(self.processed_paths[0])\n if self.data.x is not None and not use_node_attr:\n num_node_attributes = self.num_node_attributes\n self.data.x = self.data.x[:, num_node_attributes:]\n if self.data.edge_attr is not None and not use_edge_attr:\n num_edge_attributes = self.num_edge_attributes\n self.data.edge_attr = self.data.edge_attr[:, num_edge_attributes:]\n\n @property\n def raw_dir(self):\n name = 'raw{}'.format('_cleaned' if self.cleaned else '')\n return osp.join(self.root, self.name, name)\n\n @property\n def processed_dir(self):\n name = 'processed{}'.format('_cleaned' if self.cleaned else '')\n return osp.join(self.root, self.name, name)\n\n @property\n def num_node_labels(self):\n if self.data.x is None:\n return 0\n for i in range(self.data.x.size(1)):\n if self.data.x[:, i:].sum() == self.data.x.size(0):\n return self.data.x.size(1) - i\n return 0\n\n @property\n def num_node_attributes(self):\n if self.data.x is None:\n return 0\n return self.data.x.size(1) - self.num_node_labels\n\n @property\n def num_edge_labels(self):\n if self.data.edge_attr is None:\n return 0\n for i in range(self.data.edge_attr.size(1)):\n if self.data.edge_attr[:, i:].sum() == self.data.edge_attr.size(0):\n return self.data.edge_attr.size(1) - i\n return 0\n\n @property\n def num_edge_attributes(self):\n if self.data.edge_attr is None:\n return 0\n return self.data.edge_attr.size(1) - self.num_edge_labels\n\n @property\n def raw_file_names(self):\n names = ['A', 'graph_indicator']\n return ['{}_{}.txt'.format(self.name, name) for name in names]\n\n @property\n def processed_file_names(self):\n return 'data.pt'\n\n def download(self):\n url = self.cleaned_url if self.cleaned else self.url\n folder = osp.join(self.root, self.name)\n path = download_url('{}/{}.zip'.format(url, self.name), folder)\n extract_zip(path, folder)\n os.unlink(path)\n shutil.rmtree(self.raw_dir)\n os.rename(osp.join(folder, self.name), self.raw_dir)\n\n def process(self):\n self.data, self.slices = read_tu_data(self.raw_dir, self.name)\n\n if self.pre_filter is not None:\n data_list = [self.get(idx) for idx in range(len(self))]\n data_list = [data for data in data_list if self.pre_filter(data)]\n self.data, self.slices = self.collate(data_list)\n\n if self.pre_transform is not None:\n data_list = [self.get(idx) for idx in range(len(self))]\n data_list = [self.pre_transform(data) for data in data_list]\n self.data, self.slices = self.collate(data_list)\n\n torch.save((self.data, self.slices), self.processed_paths[0])\n\n def __repr__(self):\n return '{}({})'.format(self.name, len(self))\n"
]
| [
[
"torch.save",
"torch.load"
]
]
|
bfclarke/kipoi | [
"992b41eee8e35b39ae61262d988db974d8583759"
]
| [
"kipoi/pipeline.py"
]
| [
"\"\"\"Whole model pipeline: dataloader + model\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nfrom kipoi_utils.utils import cd\nfrom kipoi_utils.data_utils import numpy_collate_concat\nimport kipoi # for .config module\n# import h5py\nimport numpy as np\nimport six\nfrom tqdm import tqdm\nimport deprecation\nfrom ._version import __version__\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\n\[email protected](deprecated_in=\"0.6.8\", removed_in=\"0.7.0\",\n current_version=__version__,\n details=\"\"\" installing packages in a running python env is error prone.\n Use command line interface of kipoi to install packages.\n \"\"\")\ndef install_model_requirements(model, source=\"kipoi\", and_dataloaders=True):\n \"\"\"Install model dependencies\n\n # Arguments\n model (str): model name\n source (str): model source\n and_dataloaders (bool): if True, install also the dependencies\n for the default dataloader\n \"\"\"\n md = kipoi.get_source(source).get_model_descr(model)\n md.dependencies.install()\n if and_dataloaders:\n if \":\" in md.default_dataloader:\n dl_source, dl_path = md.default_dataloader.split(\":\")\n else:\n dl_source = source\n dl_path = md.default_dataloader\n\n default_dataloader_path = os.path.join(\"/\" + model, dl_path)[1:]\n dl = kipoi.config.get_source(dl_source).get_dataloader_descr(default_dataloader_path)\n dl.dependencies.install()\n\[email protected](deprecated_in=\"0.6.8\", removed_in=\"0.7.0\",\n current_version=__version__,\n details=\"\"\" installing packages in a running python env is error prone.\n Use command line interface of kipoi to install packages.\n \"\"\")\ndef install_dataloader_requirements(dataloader, source=\"kipoi\"):\n \"\"\"Install dataloader dependencies\n\n # Arguments\n datalaoder (str): dataloader name\n source (str): model source\n \"\"\"\n kipoi.get_source(source).get_model_descr(dataloader).dependencies.install()\n\n\ndef validate_kwargs(dataloader, dataloader_kwargs):\n # check that the dataloader_kwargs indeed match\n if not isinstance(dataloader_kwargs, dict):\n raise ValueError(\"Dataloader_kwargs need to be a dictionary\")\n\n missing_arg = []\n req_args = {k for k in dataloader.args\n if not dataloader.args[k].optional}\n missing_arg = req_args - set(dataloader_kwargs.keys())\n if len(missing_arg) > 0:\n logger.warning(\"Required arguments for the dataloader: {0} were not specified\".\n format(\",\".join(list(missing_arg))))\n unused = set(dataloader_kwargs.keys()) - set(dataloader.args.keys())\n if len(unused) > 0:\n logger.warning(\"Some provided dataloader kwargs were not used: {0}\".format(unused))\n return {k: v for k, v in six.iteritems(dataloader_kwargs) if k in dataloader.args}\n\n\nclass Pipeline(object):\n \"\"\"Runs model predictions from raw files:\n\n ```\n raw files --(dataloader)--> data batches --(model)--> prediction\n ```\n\n # Arguments\n model: model returned by `kipoi.get_model`\n dataloader_cls: dataloader class returned by `kipoi.get_dataloader_factory`\n of `kipoi.get_model().default_dataloader`\n \"\"\"\n\n def __init__(self, model, dataloader_cls):\n self.model = model\n self.dataloader_cls = dataloader_cls\n\n # validate if model and datalaoder_cls are compatible\n if not self.model.schema.compatible_with_schema(self.dataloader_cls.get_output_schema()):\n logger.warning(\"dataloader.output_schema is not compatible with model.schema\")\n else:\n logger.info(\"dataloader.output_schema is compatible with model.schema\")\n\n def predict_example(self, batch_size=32, output_file=None):\n \"\"\"Run model prediction for the example file\n\n # Arguments\n batch_size: batch_size\n output_file: if not None, inputs and predictions are stored to `output_file` path\n **kwargs: Further arguments passed to batch_iter\n \"\"\"\n logger.info('Initialized data generator. Running batches...')\n\n from kipoi.writers import get_writer\n from kipoi.cli.main import prepare_batch\n\n if output_file is not None:\n output_file = os.path.abspath(output_file)\n if os.path.exists(output_file):\n raise ValueError(\"Output file: {} already exists.\".format(output_file))\n with cd(self.dataloader_cls.source_dir):\n # init the dataloader\n dl = self.dataloader_cls.init_example()\n logger.info('Returned data schema correct')\n\n if output_file is not None:\n writer = get_writer(output_file, dl.get_output_schema().metadata)\n\n it = dl.batch_iter(batch_size=batch_size)\n\n # test that all predictions go through\n pred_list = []\n for i, batch in enumerate(tqdm(it)):\n if i == 0 and not self.dataloader_cls.get_output_schema().compatible_with_batch(batch):\n logger.warning(\"First batch of data is not compatible with the dataloader schema.\")\n pred_batch = self.model.predict_on_batch(batch['inputs'])\n pred_list.append(pred_batch)\n\n if output_file is not None:\n output_batch = prepare_batch(batch, pred_batch, keep_inputs=True)\n writer.batch_write(output_batch)\n\n if output_file is not None:\n writer.close()\n\n logger.info('predict_example done!')\n return numpy_collate_concat(pred_list)\n\n def predict(self, dataloader_kwargs, batch_size=32, dataloader_hook=None,\n **kwargs):\n \"\"\"\n # Arguments\n dataloader_kwargs: Keyword arguments passed to the pre-processor\n **kwargs: Further arguments passed to batch_iter\n\n # Returns\n np.array, dict, list: Predict the whole array\n \"\"\"\n pred_metadata_list = [batch for batch in tqdm(\n self.predict_generator(dataloader_kwargs,\n batch_size=batch_size,\n dataloader_hook=dataloader_hook,\n **kwargs))]\n\n if len(pred_metadata_list) == 0:\n predictions = np.array([])\n metadata = {}\n else:\n pred_metadata_list_unzipped = list(zip(*pred_metadata_list))\n predictions = numpy_collate_concat(pred_metadata_list_unzipped[0])\n metadata = numpy_collate_concat(pred_metadata_list_unzipped[1])\n\n if dataloader_hook is not None:\n result = (predictions, metadata)\n else:\n result = predictions\n\n return result\n\n def predict_generator(self, dataloader_kwargs, batch_size=32, layer=None,\n dataloader_hook=None, **kwargs):\n \"\"\"Prediction generator\n\n # Arguments\n dataloader_kwargs: Keyword arguments passed to the dataloader\n batch_size: Size of batches produced by the dataloader\n layer: If not None activation of specified layer will be returned. Only possible for models that are a\n subclass of `LayerActivationMixin`.\n **kwargs: Further arguments passed to batch_iter\n\n # Yields\n - `dict`: model batch prediction\n \"\"\"\n logger.info('Initialized data generator. Running batches...')\n\n validate_kwargs(self.dataloader_cls, dataloader_kwargs)\n it = self.dataloader_cls(**dataloader_kwargs).batch_iter(batch_size=batch_size, **kwargs)\n\n from .model import LayerActivationMixin\n if layer is not None and not isinstance(self.model, LayerActivationMixin):\n raise Exception(\"Attempting to extract layer activation (argument `layer` is not None) on a model that\"\n \" is not a subclass of `LayerActivationMixin`.\")\n\n for i, batch in enumerate(it):\n if i == 0 and not self.dataloader_cls.get_output_schema().compatible_with_batch(batch):\n logger.warning(\"First batch of data is not compatible with the dataloader schema.\")\n\n # TODO: use dataloader_hook (if not None) on each element of batch\n # TODO: return tuple of (predictions, metadata)\n if layer is None:\n predictions = self.model.predict_on_batch(batch['inputs'])\n else:\n predictions = self.model.predict_activation_on_batch(batch['inputs'], layer=layer)\n\n if dataloader_hook is not None:\n metadata = dataloader_hook(batch['metadata'])\n else:\n metadata = None\n\n yield (predictions, metadata)\n\n def predict_to_file(self, output_file, dataloader_kwargs, batch_size=32, keep_inputs=False, **kwargs):\n \"\"\"Make predictions and write them iteratively to a file\n\n # Arguments\n output_file: output file path. File format is inferred from the file path ending. Available file formats are:\n 'bed', 'h5', 'hdf5', 'tsv'\n dataloader_kwargs: Keyword arguments passed to the dataloader\n batch_size: Batch size used for the dataloader\n keep_inputs: if True, inputs and targets will also be written to the output file.\n **kwargs: Further arguments passed to batch_iter\n \"\"\"\n from kipoi.writers import get_writer\n from kipoi.cli.main import prepare_batch\n\n # setup dataloader\n validate_kwargs(self.dataloader_cls, dataloader_kwargs)\n dl = self.dataloader_cls(**dataloader_kwargs)\n it = dl.batch_iter(batch_size=batch_size, **kwargs)\n writer = get_writer(output_file, dl.get_output_schema().metadata)\n\n for i, batch in enumerate(tqdm(it)):\n if i == 0 and not self.dataloader_cls.get_output_schema().compatible_with_batch(batch):\n logger.warning(\"First batch of data is not compatible with the dataloader schema.\")\n pred_batch = self.model.predict_on_batch(batch['inputs'])\n output_batch = prepare_batch(batch, pred_batch, keep_inputs=keep_inputs)\n writer.batch_write(output_batch)\n writer.close()\n\n def input_grad(self, dataloader_kwargs, batch_size=32, filter_idx=None, avg_func=None, layer=None,\n final_layer=True, selected_fwd_node=None, pre_nonlinearity=False, **kwargs):\n \"\"\"Get input gradients\n\n # Arguments\n dataloader_kwargs: Keyword arguments passed to the dataloader\n batch_size: Batch size used for the dataloader\n filter_idx: filter index of `layer` for which the gradient should be returned\n avg_func: String name of averaging function to be applied across filters in layer `layer`\n layer: layer from which backwards the gradient should be calculated\n final_layer: Use the final (classification) layer as `layer`\n selected_fwd_node: None - not supported by KerasModel at the moment\n pre_nonlinearity: Try to use the layer output prior to activation (will not always be possible in an\n automatic way)\n **kwargs: Further arguments passed to input_grad\n\n # Returns\n dict: A dictionary of all model inputs and the gradients. Gradients are stored in key 'grads'\n \"\"\"\n\n batches = [batch for batch in tqdm(self.input_grad_generator(dataloader_kwargs, batch_size, filter_idx,\n avg_func, layer, final_layer,\n selected_fwd_node, pre_nonlinearity, **kwargs))]\n return numpy_collate_concat(batches)\n\n def input_grad_generator(self, dataloader_kwargs, batch_size=32, filter_idx=None, avg_func=None, layer=None,\n final_layer=True, selected_fwd_node=None, pre_nonlinearity=False, **kwargs):\n \"\"\"Get input gradients\n\n # Arguments\n dataloader_kwargs: Keyword arguments passed to the dataloader\n batch_size: Batch size used for the dataloader\n filter_idx: filter index of `layer` for which the gradient should be returned\n avg_func: String name of averaging function to be applied across filters in layer `layer`\n layer: layer from which backwards the gradient should be calculated\n final_layer: Use the final (classification) layer as `layer`\n selected_fwd_node: None - not supported by KerasModel at the moment\n pre_nonlinearity: Try to use the layer output prior to activation (will not always be possible in an\n automatic way)\n **kwargs: Further arguments passed to input_grad\n\n # Yields\n - `dict`: A dictionary of all model inputs and the gradients. Gradients are stored in key 'grads'\n \"\"\"\n\n if not isinstance(self.model, kipoi.model.GradientMixin):\n raise Exception(\"Model does not implement GradientMixin, so `input_grad` is not available.\")\n\n logger.info('Initialized data generator. Running batches...')\n\n validate_kwargs(self.dataloader_cls, dataloader_kwargs)\n it = self.dataloader_cls(**dataloader_kwargs).batch_iter(batch_size=batch_size, **kwargs)\n\n for i, batch in enumerate(it):\n if i == 0 and not self.dataloader_cls.get_output_schema().compatible_with_batch(batch):\n logger.warning(\"First batch of data is not compatible with the dataloader schema.\")\n\n pred = self.model.input_grad(batch['inputs'], filter_idx, avg_func, layer, final_layer,\n selected_fwd_node, pre_nonlinearity, **kwargs)\n\n # store the predictions with the inputs, so that they can be analysed together afterwards.\n batch['grads'] = pred\n yield batch\n"
]
| [
[
"numpy.array"
]
]
|
balefebvre/phylib | [
"e610d19c464363a16a5624a68f94a639a33eb618"
]
| [
"phylib/traces/tests/test_waveform.py"
]
| [
"# -*- coding: utf-8 -*-\n\n\"\"\"Tests of waveform loader.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal as ae\nfrom pytest import raises\n\nfrom phylib.io.mock import artificial_traces, artificial_spike_samples\nfrom ..waveform import _slice, WaveformLoader, WaveformExtractor\n\n\n#------------------------------------------------------------------------------\n# Tests extractor\n#------------------------------------------------------------------------------\n\ndef test_extract_simple():\n weak = 1.\n strong = 2.\n nc = 4\n ns = 20\n\n data = np.random.uniform(size=(ns, nc), low=0., high=1.)\n\n data[10, 0] = 0.5\n data[11, 0] = 1.5\n data[12, 0] = 1.0\n\n data[10, 1] = 1.5\n data[11, 1] = 2.5\n data[12, 1] = 2.0\n\n component = np.array([[10, 0],\n [10, 1],\n [11, 0],\n [11, 1],\n [12, 0],\n [12, 1],\n ])\n\n we = WaveformExtractor(extract_before=3,\n extract_after=5,\n )\n we.set_thresholds(weak=weak, strong=strong)\n\n # _component()\n comp = we._component(component, n_samples=ns)\n ae(comp.comp_s, [10, 10, 11, 11, 12, 12])\n ae(comp.comp_ch, [0, 1, 0, 1, 0, 1])\n assert (comp.s_min, comp.s_max) == (10 - 3, 12 + 4)\n\n # _normalize()\n assert we._normalize(weak) == 0\n assert we._normalize(strong) == 1\n ae(we._normalize([(weak + strong) / 2.]), [.5])\n\n # _comp_wave()\n wave = we._comp_wave(data, comp)\n assert wave.shape == (3 + 5 + 1, nc)\n ae(wave[3:6, :], [[0.5, 1.5, 0., 0.],\n [1.5, 2.5, 0., 0.],\n [1.0, 2.0, 0., 0.]])\n\n # masks()\n masks = we.masks(data, wave, comp)\n ae(masks, [.5, 1., 0, 0])\n\n # spike_sample_aligned()\n s = we.spike_sample_aligned(wave, comp)\n assert 11 <= s < 12\n\n # extract()\n wave_e = we.extract(data, s)\n assert wave_e.shape[1] == wave.shape[1]\n ae(wave[3:6, :2], wave_e[3:6, :2])\n\n # align()\n wave_a = we.align(wave_e, s)\n assert wave_a.shape == (3 + 5, nc)\n\n # Test final call.\n s_f, masks_f, wave_f = we(component, data=data, data_t=data)\n assert s_f == s\n ae(masks_f, masks)\n ae(wave_f, wave_a)\n\n # Tests with a different order.\n we = WaveformExtractor(extract_before=3,\n extract_after=5,\n thresholds={'weak': weak,\n 'strong': strong},\n )\n s_f_o, masks_f_o, wave_f_o = we(component, data=data, data_t=data)\n assert s_f == s_f_o\n assert np.allclose(wave_f, wave_f_o)\n ae(masks_f_o, [0.5, 1., 0., 0.])\n\n\n#------------------------------------------------------------------------------\n# Tests utility functions\n#------------------------------------------------------------------------------\n\ndef test_slice():\n assert _slice(0, (20, 20)) == slice(0, 20, None)\n\n\n#------------------------------------------------------------------------------\n# Tests loader\n#------------------------------------------------------------------------------\n\ndef waveform_loader(do_filter=False, mask_threshold=None):\n n_samples_trace, n_channels = 1000, 5\n h = 10\n n_samples_waveforms = 2 * h\n n_spikes = n_samples_trace // (2 * n_samples_waveforms)\n sample_rate = 2000.\n\n traces = artificial_traces(n_samples_trace, n_channels)\n spike_samples = artificial_spike_samples(n_spikes,\n max_isi=2 * n_samples_waveforms)\n\n loader = WaveformLoader(traces=traces,\n spike_samples=spike_samples,\n n_samples_waveforms=n_samples_waveforms,\n filter_order=3 if do_filter else None,\n sample_rate=sample_rate,\n )\n return loader\n\n\ndef test_loader_simple():\n loader = waveform_loader()\n spike_samples = loader.spike_samples\n traces = loader.traces\n n_samples_traces, n_channels = traces.shape\n n_samples_waveforms = loader.n_samples_waveforms\n h = n_samples_waveforms // 2\n\n waveforms = loader[10:20]\n assert waveforms.shape == (10, n_samples_waveforms, n_channels)\n t = spike_samples[15]\n w1 = waveforms[5, ...]\n w2 = traces[t - h:t + h, :]\n assert np.allclose(w1, w2)\n assert np.allclose(loader[15], w2)\n\n\ndef test_edges():\n loader = waveform_loader(do_filter=True)\n ns = loader.n_samples_waveforms + sum(loader._filter_margin)\n nc = loader.n_channels\n\n assert loader._load_at(0).shape == (ns, nc)\n assert loader._load_at(5).shape == (ns, nc)\n assert loader._load_at(loader.n_samples_trace - 5).shape == (ns, nc)\n assert loader._load_at(loader.n_samples_trace - 1).shape == (ns, nc)\n\n\ndef test_loader_filter_1():\n traces = np.c_[np.arange(20), np.arange(20, 40)].astype(np.int32)\n n_samples_trace, n_channels = traces.shape\n h = 3\n\n def my_filter(x, axis=0):\n return x * x\n\n loader = WaveformLoader(spike_samples=np.arange(20),\n n_samples_waveforms=(h, h),\n )\n assert loader[0].shape == (1, 2 * h, loader.n_channels)\n\n loader.traces = traces\n loader._filter = my_filter\n\n t = 10\n waveform_filtered = loader[t]\n traces_filtered = my_filter(traces)\n assert np.allclose(waveform_filtered, traces_filtered[t - h:t + h, :])\n\n\ndef test_loader_filter_2():\n loader = waveform_loader(do_filter=True)\n ns = loader.n_samples_waveforms\n nc = loader.n_channels\n\n with raises(ValueError):\n loader._load_at(-10)\n\n assert loader[0].shape == (1, ns, nc)\n assert loader[:].shape == (loader.n_spikes, ns, nc)\n\n\ndef test_loader_filter_3():\n loader = waveform_loader()\n ns = loader.n_samples_waveforms\n\n w = loader.get([0], channels=[0])\n assert w.shape == (1, ns, 1)\n"
]
| [
[
"numpy.array",
"numpy.testing.assert_array_equal",
"numpy.allclose",
"numpy.random.uniform",
"numpy.arange"
]
]
|
TateWalker/AV-Query | [
"1985139abf858de55413da6cf1d411a4fddd21ef"
]
| [
"plotAV.py"
]
| [
"# plotAV.py\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os.path\ndef plotData(avData):\n\tavData = np.asarray(avData)\n\tnewAvData = np.zeros((5,len(avData)))\n\t# print(avData)\n\tfig = plt.figure()\n\tplt.subplot(2, 1, 1)\n\tcol = ''\n\n\n\tlabels = ['Center','North','East','South','West']\n\tfor i in range(0,len(avData)):\n\t\tfor j in range(0,5):\n\t\t\tnewAvData[j][i] = avData[i][j]\n\tfor i in range(1,5):\n\t\tif i == 1:\n\t\t\tcol = 'g'\n\t\telif i == 2:\n\t\t\tcol = 'r'\n\t\telif i == 3:\n\t\t\tcol = 'k'\n\t\telif i == 4:\n\t\t\tcol = 'b'\n\t\tplt.scatter(newAvData[0,:],newAvData[i,:], color = col ,label = labels[i],s = 25)\n\tplt.legend(loc='best',prop = {'size':6})\n\tplt.title('Center Av vs 20 Arcminutes')\n\tplt.ylabel('Av at 20 Arcminutes')\n\tplt.tight_layout(pad = 2)\n\tplt.subplot(2, 1, 2)\n\n\tfor i in range(1,5):\n\t\tif i == 1:\n\t\t\tcol = 'g'\n\t\telif i == 2:\n\t\t\tcol = 'r'\n\t\telif i == 3:\n\t\t\tcol = 'k'\n\t\telif i == 4:\n\t\t\tcol = 'b'\n\t\tplt.scatter(newAvData[0,:], np.divide(newAvData[i,:],newAvData[0,:]), color =col,label = labels[i], s = 25)\n\t# plt.title('Center Av vs 20 Arcminutes/Center Av')\n\tplt.ylabel('Av at 20 Arcminutes/Center Av')\n\tplt.xlabel('Center Av')\n\tplt.legend(loc='best',prop = {'size':6})\n\tplt.show()\n\tfig.savefig(os.path.join('Pictures',\"randomGal.png\"))\n\nimport csv\ncurrData = [None] * 5\navData=[]\nwith open('RandomAv.csv') as csvinp:\n\treader = csv.reader(csvinp,delimiter = ',')\n\tcount = 3\n\tcount2 = 0\n\tcount3 = 0\n\tfor row in reader:\n\t\tcount2 +=1\n\t\tif count != 0:\n\t\t\tcount-=1\n\t\telif count2%2 == 0:\n\t\t\tfor j in range(0,5):\n\t\t\t\tcurrData[j] = row[j]\n\t\t\tcurrData = currData[:]\n\t\t\tavData.append(currData)\n\t\t\tcount3+=1\nplotData(avData)\n\n"
]
| [
[
"numpy.divide",
"numpy.asarray",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
]
|
cmusatyalab/mega-nerf | [
"306e06cc316dd4f5c84d0610308bcbc208228fc3"
]
| [
"scripts/render_images.py"
]
| [
"import os\nimport traceback\nfrom argparse import Namespace\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom PIL import Image\nfrom torch.distributed.elastic.multiprocessing.errors import record\nfrom tqdm import tqdm\n\nfrom mega_nerf.image_metadata import ImageMetadata\nfrom mega_nerf.opts import get_opts_base\nfrom mega_nerf.runner import Runner\n\n\ndef _get_render_opts() -> Namespace:\n parser = get_opts_base()\n\n parser.add_argument('--input', type=str, required=True)\n parser.add_argument('--output', type=str, required=True)\n parser.add_argument('--dataset_path', type=str, required=True)\n parser.add_argument('--centroids_path', type=str, required=True)\n parser.add_argument('--save_depth_npz', default=False, action='store_true')\n parser.add_argument('--resume', default=False, action='store_true')\n\n return parser.parse_args()\n\n\[email protected]_mode()\ndef _render_images(hparams: Namespace) -> None:\n runner = Runner(hparams, False)\n\n input = Path(hparams.input)\n centroids = torch.load(hparams.centroids_path, map_location='cpu')['centroids']\n\n c2ws = []\n poses_path = input / 'poses.txt'\n with poses_path.open() as f:\n for line in f:\n c2ws.append(torch.FloatTensor([float(x) for x in line.strip().split()]).view(3, 4))\n\n intrinsics = []\n intrinsics_path = input / 'intrinsics.txt'\n with intrinsics_path.open() as f:\n for line in f:\n intrinsics.append([float(x) / hparams.val_scale_factor for x in line.strip().split()])\n\n embeddings = []\n embeddings_path = input / 'embeddings.txt'\n with embeddings_path.open() as f:\n for line in f:\n embeddings.append(int(line.strip()))\n\n output = Path(hparams.output)\n\n rank = int(os.environ.get('RANK', '0'))\n if rank == 0:\n (output / 'rgbs').mkdir(parents=True, exist_ok=hparams.resume)\n (output / 'depths').mkdir(parents=True, exist_ok=hparams.resume)\n (output / 'cells').mkdir(parents=True, exist_ok=hparams.resume)\n if hparams.save_depth_npz:\n (output / 'depths_npz').mkdir(parents=True, exist_ok=hparams.resume)\n\n if 'RANK' in os.environ:\n dist.barrier()\n world_size = int(os.environ['WORLD_SIZE'])\n else:\n world_size = 1\n\n runner.nerf.eval()\n if runner.bg_nerf is not None:\n runner.bg_nerf.eval()\n\n coordinate_info = torch.load(Path(hparams.dataset_path) / 'coordinates.pt', map_location='cpu')\n pose_scale_factor = coordinate_info['pose_scale_factor']\n\n for i in tqdm(np.arange(rank, len(c2ws), world_size)):\n cell_path = output / 'cells' / '{0:06d}.jpg'.format(i)\n\n if hparams.resume and cell_path.exists():\n try:\n test = np.array(Image.open(cell_path)) # verify with last visualization to be written\n print('skipping {} {}'.format(cell_path, test[0]))\n continue\n except:\n traceback.print_exc()\n pass\n\n W = int(intrinsics[i][0])\n H = int(intrinsics[i][1])\n results, rays = runner.render_image(\n ImageMetadata(Path(''), c2ws[i], W, H, torch.FloatTensor(intrinsics[i][2:]), embeddings[i], None, False))\n\n typ = 'fine' if 'rgb_fine' in results else 'coarse'\n result_rgbs = results[f'rgb_{typ}']\n\n result_rgbs = result_rgbs.view(H, W, 3) * 255\n rgbs = result_rgbs.byte().cpu().numpy().astype(np.uint8)\n img = Image.fromarray(rgbs)\n img.save(output / 'rgbs' / '{0:06d}.jpg'.format(i))\n\n depth = torch.nan_to_num(results[f'depth_{typ}']).view(H, W).cpu()\n\n if hparams.save_depth_npz:\n np.save(str(output / 'depths_npz' / '{0:06d}.npy'.format(i)), (depth * pose_scale_factor).numpy())\n\n if f'bg_depth_{typ}' in results:\n to_use = torch.nan_to_num(results[f'fg_depth_{typ}']).view(-1)\n while to_use.shape[0] > 2 ** 24:\n to_use = to_use[::2]\n ma = torch.quantile(to_use, 0.95)\n depth = depth.clamp_max(ma)\n\n depth_vis = Runner.visualize_scalars(torch.log(depth + 1e-8).view(H, W).cpu())\n Image.fromarray(depth_vis.astype(np.uint8)).save(output / 'depths' / '{0:06d}.jpg'.format(i))\n\n rays = rays.view(H, W, -1).cpu()\n locations = rays[..., :3] + rays[..., 3:6] * depth.unsqueeze(-1)\n\n cluster_assignments = torch.cdist(locations.view(-1, 3)[:, :3], centroids).argmin(dim=1).view(H, W).float()\n cluster_assignments /= len(centroids)\n centroid_colors = cv2.cvtColor(cv2.applyColorMap((cluster_assignments * 255).byte().numpy(), cv2.COLORMAP_HSV),\n cv2.COLOR_BGR2RGB)\n\n centroid_colors = cv2.addWeighted(rgbs, 0.7, centroid_colors, 0.3, 0)\n Image.fromarray(centroid_colors.astype(np.uint8)).save(cell_path)\n\n\n@record\ndef main(hparams: Namespace) -> None:\n assert hparams.ckpt_path is not None or hparams.container_path is not None\n\n if hparams.detect_anomalies:\n with torch.autograd.detect_anomaly():\n _render_images(hparams)\n else:\n _render_images(hparams)\n\n\nif __name__ == '__main__':\n main(_get_render_opts())\n"
]
| [
[
"torch.nan_to_num",
"torch.FloatTensor",
"torch.quantile",
"torch.inference_mode",
"torch.autograd.detect_anomaly",
"torch.load",
"torch.log",
"torch.distributed.barrier"
]
]
|
tamiya-onodera/qiskit-optimization | [
"4e6a22bb13a13e504de607a02f5afdfd33abfb7c"
]
| [
"qiskit_optimization/problems/linear_expression.py"
]
| [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Linear expression interface.\"\"\"\n\nfrom typing import List, Union, Dict, Any\n\nfrom numpy import ndarray\nfrom scipy.sparse import spmatrix, dok_matrix\n\nfrom .quadratic_program_element import QuadraticProgramElement\nfrom ..exceptions import QiskitOptimizationError\n\n\nclass LinearExpression(QuadraticProgramElement):\n \"\"\" Representation of a linear expression by its coefficients.\"\"\"\n\n def __init__(self, quadratic_program: Any,\n coefficients: Union[ndarray, spmatrix, List[float],\n Dict[Union[int, str], float]]) -> None:\n \"\"\"Creates a new linear expression.\n\n The linear expression can be defined via an array, a list, a sparse matrix, or a dictionary\n that uses variable names or indices as keys and stores the values internally as a\n dok_matrix.\n\n Args:\n quadratic_program: The parent QuadraticProgram.\n coefficients: The (sparse) representation of the coefficients.\n\n \"\"\"\n super().__init__(quadratic_program)\n self.coefficients = coefficients\n\n def __getitem__(self, i: Union[int, str]) -> float:\n \"\"\"Returns the i-th coefficient where i can be a variable name or index.\n\n Args:\n i: the index or name of the variable corresponding to the coefficient.\n\n Returns:\n The coefficient corresponding to the addressed variable.\n \"\"\"\n if isinstance(i, str):\n i = self.quadratic_program.variables_index[i]\n return self.coefficients[0, i]\n\n def __setitem__(self, i: Union[int, str], value: float) -> None:\n if isinstance(i, str):\n i = self.quadratic_program.variables_index[i]\n self._coefficients[0, i] = value\n\n def _coeffs_to_dok_matrix(self,\n coefficients: Union[ndarray, spmatrix,\n List, Dict[Union[int, str], float]]\n ) -> dok_matrix:\n \"\"\"Maps given 1d-coefficients to a dok_matrix.\n\n Args:\n coefficients: The 1d-coefficients to be mapped.\n\n Returns:\n The given 1d-coefficients as a dok_matrix\n\n Raises:\n QiskitOptimizationError: if coefficients are given in unsupported format.\n \"\"\"\n if isinstance(coefficients, list) or \\\n isinstance(coefficients, ndarray) and len(coefficients.shape) == 1:\n coefficients = dok_matrix([coefficients])\n elif isinstance(coefficients, spmatrix):\n coefficients = dok_matrix(coefficients)\n elif isinstance(coefficients, dict):\n coeffs = dok_matrix((1, self.quadratic_program.get_num_vars()))\n for index, value in coefficients.items():\n if isinstance(index, str):\n index = self.quadratic_program.variables_index[index]\n coeffs[0, index] = value\n coefficients = coeffs\n else:\n raise QiskitOptimizationError(\"Unsupported format for coefficients.\")\n return coefficients\n\n @property\n def coefficients(self) -> dok_matrix:\n \"\"\" Returns the coefficients of the linear expression.\n\n Returns:\n The coefficients of the linear expression.\n \"\"\"\n return self._coefficients\n\n @coefficients.setter\n def coefficients(self,\n coefficients: Union[ndarray, spmatrix,\n List[float], Dict[Union[str, int], float]]\n ) -> None:\n \"\"\"Sets the coefficients of the linear expression.\n\n Args:\n coefficients: The coefficients of the linear expression.\n \"\"\"\n self._coefficients = self._coeffs_to_dok_matrix(coefficients)\n\n def to_array(self) -> ndarray:\n \"\"\"Returns the coefficients of the linear expression as array.\n\n Returns:\n An array with the coefficients corresponding to the linear expression.\n \"\"\"\n return self._coefficients.toarray()[0]\n\n def to_dict(self, use_name: bool = False) -> Dict[Union[int, str], float]:\n \"\"\"Returns the coefficients of the linear expression as dictionary, either using variable\n names or indices as keys.\n\n Args:\n use_name: Determines whether to use index or names to refer to variables.\n\n Returns:\n An dictionary with the coefficients corresponding to the linear expression.\n \"\"\"\n if use_name:\n return {self.quadratic_program.variables[k].name: v\n for (_, k), v in self._coefficients.items()}\n else:\n return {k: v for (_, k), v in self._coefficients.items()}\n\n def evaluate(self, x: Union[ndarray, List, Dict[Union[int, str], float]]) -> float:\n \"\"\"Evaluate the linear expression for given variables.\n\n Args:\n x: The values of the variables to be evaluated.\n\n Returns:\n The value of the linear expression given the variable values.\n \"\"\"\n # cast input to dok_matrix if it is a dictionary\n x = self._coeffs_to_dok_matrix(x)\n\n # compute the dot-product of the input and the linear coefficients\n val = (x @ self.coefficients.transpose())[0, 0]\n\n # return the result\n return val\n\n # pylint: disable=unused-argument\n def evaluate_gradient(self, x: Union[ndarray, List, Dict[Union[int, str], float]]) -> ndarray:\n \"\"\"Evaluate the gradient of the linear expression for given variables.\n\n Args:\n x: The values of the variables to be evaluated.\n\n Returns:\n The value of the gradient of the linear expression given the variable values.\n \"\"\"\n\n # extract the coefficients as array and return it\n return self.to_array()\n"
]
| [
[
"scipy.sparse.dok_matrix"
]
]
|
kamoshi/Advent-of-Code | [
"5b78fa467409e8b8c5a16efe31684b8ce493bcee"
]
| [
"2021/Python/day13.py"
]
| [
"import re\n\nimport numpy as np\n\n\npattern_dots = re.compile(r\"([0-9]+),([0-9]+)\")\npattern_folds = re.compile(r\"fold along ([xy])=([0-9]+)\")\n\n\ndef load():\n with open(\"../.input/day13\") as f:\n dots, folds = f.read().split(\"\\n\\n\")\n dots = [(int(x), int(y)) for x, y in pattern_dots.findall(dots)]\n matrix = np.zeros((1 + max(y for _, y in dots), 1 + max(x for x, _ in dots)), dtype=bool)\n for x, y in dots:\n matrix[y, x] = True\n return matrix, [(axis, int(offset)) for axis, offset in pattern_folds.findall(folds)]\n\n\ndef fold(matrix: np.ndarray, axis, offset) -> np.ndarray:\n if axis == \"x\":\n result = matrix[:, :offset]\n right = np.fliplr(matrix[:, offset+1:])\n if result.shape == right.shape:\n result |= right\n else:\n result[:, -right.shape[1]:] |= right\n else:\n result = matrix[:offset, :]\n bottom = np.flipud(matrix[offset+1:, :])\n if result.shape == bottom.shape:\n result |= bottom\n else:\n result[-bottom.shape[0]:, :] |= bottom\n return result\n\n\ndef solve1() -> int:\n dots, folds = load()\n return fold(dots, *folds[0]).sum()\n\n\ndef solve2() -> None:\n dots, folds = load()\n for axis, offset in folds:\n dots = fold(dots, axis, offset)\n [print(\"\".join(line)) for line in np.where(dots, \"#\", \" \")]\n\n\nif __name__ == \"__main__\":\n print(solve1()) # 638\n solve2() # CJCKEBAPB\n"
]
| [
[
"numpy.flipud",
"numpy.where",
"numpy.fliplr"
]
]
|
PerryLewis01/FloodWarning | [
"6bdce9277755ef0826789eb193ddf3f7f3457ab7"
]
| [
"floodsystem/geo.py"
]
| [
"# Copyright (C) 2018 Garth N. Wells\n#\n# SPDX-License-Identifier: MIT\n\"\"\"This module contains a collection of functions related to\ngeographical data.\n\n\"\"\"\n#from curses.ascii import NUL\nimport numpy as np\nfrom .utils import sorted_by_key # noqa\n\nfrom .station import MonitoringStation\n\n\ndef stations_by_distance(stations, p):\n \"\"\"stations_by_distance(stations, p) where p is the location (lat, long) and stations is a list of stations, will return the name town and distance from p in an list of tuples\"\"\"\n locations = np.array([station.coord for station in stations])\n \n # using haversine formula\n # using r as 6371km, numpy uses radians so all angles must be converted from degrees before calculation\n\n distanceToP = 2 * 6371 * np.arcsin(\n np.sqrt(\n (np.sin((np.deg2rad(p[0] - locations[:,0]))/2))**2 + np.cos(np.deg2rad(locations[:,0])) * np.cos(np.deg2rad(p[0])) * (np.sin((np.deg2rad(p[1] - locations[:,1]))/2))**2\n )\n )\n \n #now sort via distance with names and county\n distanceandTown = sorted([(station, distanceToP[i]) for i, station in enumerate(stations)], key = lambda x:x[1])\n # possibel correct version of above list : distanceandTown = sorted([(station.name, station.town, distanceToP[i]) for i, station in enumerate(stations)], key = lambda x:x[2])\n return distanceandTown\n\n\ndef rivers_with_station(stations):\n \"\"\"rivers_with_station(stations) returns a sorted list of the rivers stations are on without repeats\"\"\"\n stationRivers = set()\n for station in stations:\n stationRivers.add(station.river)\n stationRivers = sorted(stationRivers)\n return stationRivers\n\ndef stations_by_river(stations):\n \"\"\"stations_by_river(stations) returns dictionary of rivers (the key) and a sorted list of stations on that river\"\"\"\n stationRivers = set(rivers_with_station(stations)) #sligtly quicker then for sorting later\n riversStations = {} #empty dictionary\n for station in stations:\n if not station.river in riversStations:\n riversStations.update({station.river: list([station.name])}) \n #add item to dictionary if it doesn't exist and create list with it\n else:\n riversStations[station.river] = list(riversStations[station.river]) + [station.name]\n #if it does exist add the station name to the list that already exists\n \n for station in stationRivers:\n riversStations[station] = sorted(riversStations[station])\n #sort the names in the lists for all rivers\n\n return riversStations\n \n\n\n\ndef stations_within_radius(stations, centre, r):\n \"\"\"stations_within_radius(stations, centre, r) returns an alphabetical list of the stations within radius r of the coordinate, centre\"\"\"\n # Calls stations from previous function\n stations_dist = stations_by_distance(stations, centre)\n\n #create list of stations in radius\n stations_in_r = []\n\n #adds stations to stations_in_r\n for i, stations in enumerate(stations):\n if stations_dist[i][1] < r:\n stations_in_r.append(stations_dist[i][0].name)\n else:\n break\n \n #sort stations_in_r alphabetically\n stations_in_r.sort()\n \n return stations_in_r\n\n\n\ndef rivers_by_station_number(stations, N):\n \"\"\"rivers_by_station_number returns a list of tuples(river name, number of stations) sorted by number of stations for the first N rivers\"\"\"\n\n #use dictionary from stations_by_river(stations)\n stations_by_riv = stations_by_river(stations) \n\n #create empty list of rivers\n rivers_stations = []\n\n #add number of stations to list\n for key, value in stations_by_riv.items():\n rivers_stations.append((key, len(value)))\n\n #sort list by number of stations\n rivers_stations = sorted(rivers_stations, key = lambda x:-x[1]) \n\n output = rivers_stations[:N]\n\n #sort what happens if nth entry has equal sumber of stations\n list_complete = False\n while list_complete == False:\n #print(rivers_stations[N-1], rivers_stations[N])\n #print(rivers_stations[N-1][1], rivers_stations[N][1])\n if rivers_stations[N-1][1] == rivers_stations[N][1]:\n output.append(rivers_stations[N])\n N+= 1\n else:\n list_complete = True\n\n return output"
]
| [
[
"numpy.deg2rad",
"numpy.array"
]
]
|
yuki-asano/python_doe_kspub | [
"d210fe50338b2132bd795b4bf52c29ad51c5e91b"
]
| [
"sample_program_03_06_cross_validation.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Hiromasa Kaneko\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import KFold, cross_val_predict # クロスバリデーションをするときに使用\r\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error # r^2, RMSE, MAE の計算に使用\r\n\r\nfold_number = 10 # クロスバリデーションの fold 数\r\n\r\ndataset = pd.read_csv('resin.csv', index_col=0, header=0)\r\n\r\n# データ分割\r\ny = dataset.iloc[:, 0] # 目的変数\r\nx = dataset.iloc[:, 1:] # 説明変数\r\n\r\n# 標準偏差が 0 の特徴量の削除\r\ndeleting_variables = x.columns[x.std() == 0]\r\nx = x.drop(deleting_variables, axis=1)\r\n\r\n# オートスケーリング\r\nautoscaled_y = (y - y.mean()) / y.std()\r\nautoscaled_x = (x - x.mean()) / x.std()\r\n\r\n# モデル構築\r\nmodel = LinearRegression() # モデルの宣言\r\nmodel.fit(autoscaled_x, autoscaled_y) # モデル構築\r\n\r\n# 標準回帰係数\r\nstandard_regression_coefficients = pd.DataFrame(model.coef_, index=x.columns, columns=['standard_regression_coefficients'])\r\nstandard_regression_coefficients.to_csv(\r\n 'standard_regression_coefficients_ols.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください\r\n\r\n# トレーニングデータの推定\r\nautoscaled_estimated_y = model.predict(autoscaled_x) # y の推定\r\nestimated_y = autoscaled_estimated_y * y.std() + y.mean() # スケールをもとに戻す\r\nestimated_y = pd.DataFrame(estimated_y, index=x.index, columns=['estimated_y'])\r\n\r\n# トレーニングデータの実測値 vs. 推定値のプロット\r\nplt.rcParams['font.size'] = 18\r\nplt.scatter(y, estimated_y.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット\r\ny_max = max(y.max(), estimated_y.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得\r\ny_min = min(y.min(), estimated_y.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得\r\nplt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\r\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成\r\nplt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定\r\nplt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定\r\nplt.xlabel('actual y') # x 軸の名前\r\nplt.ylabel('estimated y') # y 軸の名前\r\nplt.gca().set_aspect('equal', adjustable='box') # 図の形を正方形に\r\nplt.show() # 以上の設定で描画\r\n\r\n# トレーニングデータのr2, RMSE, MAE\r\nprint('r^2 for training data :', r2_score(y, estimated_y))\r\nprint('RMSE for training data :', mean_squared_error(y, estimated_y, squared=False))\r\nprint('MAE for training data :', mean_absolute_error(y, estimated_y))\r\n\r\n# トレーニングデータの結果の保存\r\ny_for_save = pd.DataFrame(y)\r\ny_for_save.columns = ['actual_y']\r\ny_error_train = y_for_save.iloc[:, 0] - estimated_y.iloc[:, 0]\r\ny_error_train = pd.DataFrame(y_error_train)\r\ny_error_train.columns = ['error_of_y(actual_y-estimated_y)']\r\nresults_train = pd.concat([y_for_save, estimated_y, y_error_train], axis=1) # 結合\r\nresults_train.to_csv('estimated_y_in_detail_ols.csv') # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください\r\n\r\n# クロスバリデーションによる y の値の推定\r\ncross_validation = KFold(n_splits=fold_number, random_state=9, shuffle=True) # クロスバリデーションの分割の設定\r\nautoscaled_estimated_y_in_cv = cross_val_predict(model, autoscaled_x, autoscaled_y, cv=cross_validation) # y の推定\r\nestimated_y_in_cv = autoscaled_estimated_y_in_cv * y.std() + y.mean() # スケールをもとに戻す\r\nestimated_y_in_cv = pd.DataFrame(estimated_y_in_cv, index=x.index, columns=['estimated_y'])\r\n\r\n# クロスバリデーションにおける実測値 vs. 推定値のプロット\r\nplt.rcParams['font.size'] = 18\r\nplt.scatter(y, estimated_y_in_cv.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット\r\ny_max = max(y.max(), estimated_y_in_cv.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得\r\ny_min = min(y.min(), estimated_y_in_cv.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得\r\nplt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\r\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成\r\nplt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定\r\nplt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定\r\nplt.xlabel('actual y') # x 軸の名前\r\nplt.ylabel('estimated y') # y 軸の名前\r\nplt.gca().set_aspect('equal', adjustable='box') # 図の形を正方形に\r\nplt.show() # 以上の設定で描画\r\n\r\n# クロスバリデーションにおけるr2, RMSE, MAE\r\nprint('r^2 in cross-validation :', r2_score(y, estimated_y_in_cv))\r\nprint('RMSE in cross-validation :', mean_squared_error(y, estimated_y_in_cv, squared=False))\r\nprint('MAE in cross-validation :', mean_absolute_error(y, estimated_y_in_cv))\r\n\r\n# クロスバリデーションの結果の保存\r\ny_error_in_cv = y_for_save.iloc[:, 0] - estimated_y_in_cv.iloc[:, 0]\r\ny_error_in_cv = pd.DataFrame(y_error_in_cv)\r\ny_error_in_cv.columns = ['error_of_y(actual_y-estimated_y)']\r\nresults_in_cv = pd.concat([y_for_save, estimated_y_in_cv, y_error_in_cv], axis=1) # 結合\r\nresults_in_cv.to_csv('estimated_y_in_cv_in_detail_ols.csv') # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください\r\n"
]
| [
[
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.xlim",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.gca",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_absolute_error",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"pandas.concat",
"sklearn.model_selection.KFold",
"matplotlib.pyplot.scatter",
"pandas.read_csv",
"sklearn.model_selection.cross_val_predict"
]
]
|
wheatma38/akshare | [
"36698393ebd85d885c99101ce80ac7b5ac6b278d"
]
| [
"akshare/stock_feature/stock_em_hist.py"
]
| [
"# -*- coding:utf-8 -*-\n#!/usr/bin/env python\n\"\"\"\nDate: 2021/8/28 21:26\nDesc: 东方财富网-行情首页-上证 A 股-每日行情\n\"\"\"\nimport requests\nimport pandas as pd\n\n\ndef stock_zh_a_spot_em() -> pd.DataFrame:\n \"\"\"\n 东方财富-A股-实时行情\n http://quote.eastmoney.com/center/gridlist.html#hs_a_board\n :return: 实时行情\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://35.push2.eastmoney.com/api/qt/clist/get\"\n params = {\n \"pn\": \"1\",\n \"pz\": \"5000\",\n \"po\": \"1\",\n \"np\": \"1\",\n \"ut\": \"bd1d9ddb04089700cf9c27f6f7426281\",\n \"fltt\": \"2\",\n \"invt\": \"2\",\n \"fid\": \"f3\",\n \"fs\": \"m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23\",\n \"fields\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152\",\n \"_\": \"1623833739532\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"][\"diff\"])\n temp_df.columns = [\n \"_\",\n \"最新价\",\n \"涨跌幅\",\n \"涨跌额\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"换手率\",\n \"市盈率-动态\",\n \"量比\",\n \"_\",\n \"代码\",\n \"_\",\n \"名称\",\n \"最高\",\n \"最低\",\n \"今开\",\n \"昨收\",\n \"_\",\n \"_\",\n \"_\",\n \"市净率\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ]\n temp_df.reset_index(inplace=True)\n temp_df[\"index\"] = range(1, len(temp_df) + 1)\n temp_df.rename(columns={\"index\": \"序号\"}, inplace=True)\n temp_df = temp_df[\n [\n \"序号\",\n \"代码\",\n \"名称\",\n \"最新价\",\n \"涨跌幅\",\n \"涨跌额\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"最高\",\n \"最低\",\n \"今开\",\n \"昨收\",\n \"量比\",\n \"换手率\",\n \"市盈率-动态\",\n \"市净率\",\n ]\n ]\n temp_df[\"最新价\"] = pd.to_numeric(temp_df[\"最新价\"], errors=\"coerce\")\n temp_df[\"涨跌幅\"] = pd.to_numeric(temp_df[\"涨跌幅\"], errors=\"coerce\")\n temp_df[\"涨跌额\"] = pd.to_numeric(temp_df[\"涨跌额\"], errors=\"coerce\")\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"], errors=\"coerce\")\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"], errors=\"coerce\")\n temp_df[\"振幅\"] = pd.to_numeric(temp_df[\"振幅\"], errors=\"coerce\")\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"], errors=\"coerce\")\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"], errors=\"coerce\")\n temp_df[\"今开\"] = pd.to_numeric(temp_df[\"今开\"], errors=\"coerce\")\n temp_df[\"昨收\"] = pd.to_numeric(temp_df[\"昨收\"], errors=\"coerce\")\n temp_df[\"量比\"] = pd.to_numeric(temp_df[\"量比\"], errors=\"coerce\")\n temp_df[\"换手率\"] = pd.to_numeric(temp_df[\"换手率\"], errors=\"coerce\")\n temp_df[\"市盈率-动态\"] = pd.to_numeric(temp_df[\"市盈率-动态\"], errors=\"coerce\")\n temp_df[\"市净率\"] = pd.to_numeric(temp_df[\"市净率\"], errors=\"coerce\")\n return temp_df\n\n\ndef _code_id_map() -> dict:\n \"\"\"\n 东方财富-股票和市场代码\n http://quote.eastmoney.com/center/gridlist.html#hs_a_board\n :return: 股票和市场代码\n :rtype: dict\n \"\"\"\n url = \"http://80.push2.eastmoney.com/api/qt/clist/get\"\n params = {\n \"pn\": \"1\",\n \"pz\": \"5000\",\n \"po\": \"1\",\n \"np\": \"1\",\n \"ut\": \"bd1d9ddb04089700cf9c27f6f7426281\",\n \"fltt\": \"2\",\n \"invt\": \"2\",\n \"fid\": \"f3\",\n \"fs\": \"m:1 t:2,m:1 t:23\",\n \"fields\": \"f12\",\n \"_\": \"1623833739532\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"][\"diff\"])\n temp_df[\"market_id\"] = 1\n temp_df.columns = [\"sh_code\", \"sh_id\"]\n code_id_dict = dict(zip(temp_df[\"sh_code\"], temp_df[\"sh_id\"]))\n params = {\n \"pn\": \"1\",\n \"pz\": \"5000\",\n \"po\": \"1\",\n \"np\": \"1\",\n \"ut\": \"bd1d9ddb04089700cf9c27f6f7426281\",\n \"fltt\": \"2\",\n \"invt\": \"2\",\n \"fid\": \"f3\",\n \"fs\": \"m:0 t:6,m:0 t:80\",\n \"fields\": \"f12\",\n \"_\": \"1623833739532\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df_sz = pd.DataFrame(data_json[\"data\"][\"diff\"])\n temp_df_sz[\"sz_id\"] = 0\n code_id_dict.update(dict(zip(temp_df_sz[\"f12\"], temp_df_sz[\"sz_id\"])))\n return code_id_dict\n\n\ndef stock_zh_a_hist(\n symbol: str = \"000001\",\n period: str = 'daily',\n start_date: str = \"19700101\",\n end_date: str = \"22220101\",\n adjust: str = \"\",\n) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-上证 A 股-每日行情\n http://quote.eastmoney.com/concept/sh603777.html?from=classic\n :param symbol: 股票代码\n :type symbol: str\n :param period: choice of {'daily', 'weekly', 'monthly'}\n :type period: str\n :param start_date: 开始日期\n :type start_date: str\n :param end_date: 结束日期\n :type end_date: str\n :param adjust: choice of {\"qfq\": \"1\", \"hfq\": \"2\", \"\": \"不复权\"}\n :type adjust: str\n :return: 每日行情\n :rtype: pandas.DataFrame\n \"\"\"\n code_id_dict = _code_id_map()\n adjust_dict = {\"qfq\": \"1\", \"hfq\": \"2\", \"\": \"0\"}\n period_dict = {'daily': '101', 'weekly': '102', 'monthly': '103'}\n url = \"http://push2his.eastmoney.com/api/qt/stock/kline/get\"\n params = {\n \"fields1\": \"f1,f2,f3,f4,f5,f6\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61\",\n \"ut\": \"7eea3edcaed734bea9cbfc24409ed989\",\n \"klt\": period_dict[period],\n \"fqt\": adjust_dict[adjust],\n \"secid\": f\"{code_id_dict[symbol]}.{symbol}\",\n \"beg\": \"0\",\n \"end\": \"20500000\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"klines\"]])\n temp_df.columns = [\n \"日期\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"涨跌幅\",\n \"涨跌额\",\n \"换手率\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"日期\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(inplace=True, drop=True)\n temp_df = temp_df.astype(\n {\n \"开盘\": float,\n \"收盘\": float,\n \"最高\": float,\n \"最低\": float,\n \"成交量\": int,\n \"成交额\": float,\n \"振幅\": float,\n \"涨跌幅\": float,\n \"涨跌额\": float,\n \"换手率\": float,\n }\n )\n return temp_df\n\n\ndef stock_zh_a_hist_min_em(\n symbol: str = \"000001\",\n period: str = '1',\n adjust: str = '',\n start_date: str = \"1979-09-01 09:32:00\",\n end_date: str = \"2222-01-01 09:32:00\",\n) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-上证 A 股-每日分时行情\n http://quote.eastmoney.com/concept/sh603777.html?from=classic\n :param symbol: 股票代码\n :type symbol: str\n :param period: choice of {'1', '5', '15', '30', '60'}\n :type period: str\n :param adjust: choice of {'', 'qfq', 'hfq'}\n :type adjust: str\n :param start_date: 开始日期\n :type start_date: str\n :param end_date: 结束日期\n :type end_date: str\n :return: 每日分时行情\n :rtype: pandas.DataFrame\n \"\"\"\n code_id_dict = _code_id_map()\n adjust_map = {\n '': '0',\n 'qfq': '1',\n 'hfq': '2',\n }\n if period == '1':\n url = 'https://push2his.eastmoney.com/api/qt/stock/trends2/get'\n params = {\n \"fields1\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58\",\n \"ut\": \"7eea3edcaed734bea9cbfc24409ed989\",\n \"ndays\": \"5\",\n \"iscr\": \"0\",\n \"secid\": f\"{code_id_dict[symbol]}.{symbol}\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"trends\"]])\n temp_df.columns = [\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"最新价\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"时间\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(drop=True, inplace=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"最新价\"] = pd.to_numeric(temp_df[\"最新价\"])\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str)\n return temp_df\n else:\n url = 'http://push2his.eastmoney.com/api/qt/stock/kline/get'\n params = {\n 'fields1': 'f1,f2,f3,f4,f5,f6',\n 'fields2': 'f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61',\n 'ut': '7eea3edcaed734bea9cbfc24409ed989',\n 'klt': period,\n 'fqt': adjust_map[adjust],\n 'secid': f\"{code_id_dict[symbol]}.{symbol}\",\n 'beg': '0',\n 'end': '20500000',\n '_': '1630930917857',\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"klines\"]])\n temp_df.columns = [\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"涨跌幅\",\n \"涨跌额\",\n \"换手率\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"时间\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(drop=True, inplace=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"振幅\"] = pd.to_numeric(temp_df[\"振幅\"])\n temp_df[\"涨跌幅\"] = pd.to_numeric(temp_df[\"涨跌幅\"])\n temp_df[\"涨跌额\"] = pd.to_numeric(temp_df[\"涨跌额\"])\n temp_df[\"换手率\"] = pd.to_numeric(temp_df[\"换手率\"])\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str)\n temp_df = temp_df[[\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"涨跌幅\",\n \"涨跌额\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"换手率\",\n ]]\n return temp_df\n\n\ndef stock_zh_a_hist_pre_min_em(symbol: str = \"000001\",\n start_time: str = \"09:00:00\",\n end_time: str = \"15:50:00\",\n ) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-上证 A 股-每日分时行情包含盘前数据\n http://quote.eastmoney.com/concept/sh603777.html?from=classic\n :param symbol: 股票代码\n :type symbol: str\n :param start_time: 开始时间\n :type start_time: str\n :param end_time: 结束时间\n :type end_time: str\n :return: 每日分时行情包含盘前数据\n :rtype: pandas.DataFrame\n \"\"\"\n code_id_dict = _code_id_map()\n url = \"https://push2.eastmoney.com/api/qt/stock/trends2/get\"\n params = {\n \"fields1\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58\",\n \"ut\": \"fa5fd1943c7b386f172d6893dbfba10b\",\n \"ndays\": \"1\",\n \"iscr\": \"1\",\n \"iscca\": \"0\",\n \"secid\": f\"{code_id_dict[symbol]}.{symbol}\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"trends\"]])\n temp_df.columns = [\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"最新价\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"时间\"])\n date_format = temp_df.index[0].date().isoformat()\n temp_df = temp_df[date_format + ' ' + start_time:date_format + ' ' + end_time]\n temp_df.reset_index(drop=True, inplace=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"最新价\"] = pd.to_numeric(temp_df[\"最新价\"])\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str)\n return temp_df\n\n\ndef stock_hk_spot_em() -> pd.DataFrame:\n \"\"\"\n 东方财富-港股-实时行情\n http://quote.eastmoney.com/center/gridlist.html#hk_stocks\n :return: 港股-实时行情\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://72.push2.eastmoney.com/api/qt/clist/get\"\n params = {\n \"pn\": \"1\",\n \"pz\": \"5000\",\n \"po\": \"1\",\n \"np\": \"1\",\n \"ut\": \"bd1d9ddb04089700cf9c27f6f7426281\",\n \"fltt\": \"2\",\n \"invt\": \"2\",\n \"fid\": \"f3\",\n \"fs\": \"m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2\",\n \"fields\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152\",\n \"_\": \"1624010056945\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"][\"diff\"])\n temp_df.columns = [\n \"_\",\n \"最新价\",\n \"涨跌幅\",\n \"涨跌额\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"换手率\",\n \"市盈率-动态\",\n \"量比\",\n \"_\",\n \"代码\",\n \"_\",\n \"名称\",\n \"最高\",\n \"最低\",\n \"今开\",\n \"昨收\",\n \"_\",\n \"_\",\n \"_\",\n \"市净率\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ]\n temp_df.reset_index(inplace=True)\n temp_df[\"index\"] = range(1, len(temp_df) + 1)\n temp_df.rename(columns={\"index\": \"序号\"}, inplace=True)\n temp_df = temp_df[\n [\n \"序号\",\n \"代码\",\n \"名称\",\n \"最新价\",\n \"涨跌额\",\n \"涨跌幅\",\n \"今开\",\n \"最高\",\n \"最低\",\n \"昨收\",\n \"成交量\",\n \"成交额\",\n ]\n ]\n return temp_df\n\n\ndef stock_hk_hist(\n symbol: str = \"00593\",\n start_date: str = \"19700101\",\n end_date: str = \"22220101\",\n adjust: str = \"\",\n) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-港股-每日行情\n http://quote.eastmoney.com/hk/08367.html\n :param symbol: 港股-每日行情\n :type symbol: str\n :param start_date: 开始日期\n :type start_date: str\n :param end_date: 结束日期\n :type end_date: str\n :param adjust: choice of {\"qfq\": \"1\", \"hfq\": \"2\", \"\": \"不复权\"}\n :type adjust: str\n :return: 每日行情\n :rtype: pandas.DataFrame\n \"\"\"\n adjust_dict = {\"qfq\": \"1\", \"hfq\": \"2\", \"\": \"0\"}\n url = \"http://33.push2his.eastmoney.com/api/qt/stock/kline/get\"\n params = {\n \"secid\": f\"116.{symbol}\",\n \"ut\": \"fa5fd1943c7b386f172d6893dbfba10b\",\n \"fields1\": \"f1,f2,f3,f4,f5,f6\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61\",\n \"klt\": \"101\",\n \"fqt\": adjust_dict[adjust],\n \"end\": \"20500000\",\n \"lmt\": \"1000000\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"klines\"]])\n temp_df.columns = [\n \"日期\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"涨跌幅\",\n \"涨跌额\",\n \"换手率\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"日期\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(inplace=True, drop=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"振幅\"] = pd.to_numeric(temp_df[\"振幅\"])\n temp_df[\"涨跌幅\"] = pd.to_numeric(temp_df[\"涨跌幅\"])\n temp_df[\"涨跌额\"] = pd.to_numeric(temp_df[\"涨跌额\"])\n temp_df[\"换手率\"] = pd.to_numeric(temp_df[\"换手率\"])\n return temp_df\n\n\ndef stock_hk_hist_min_em(symbol: str = \"01611\",\n period: str = '1',\n adjust: str = '',\n start_date: str = \"1979-09-01 09:32:00\",\n end_date: str = \"2222-01-01 09:32:00\",\n ) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-港股-每日分时行情\n http://quote.eastmoney.com/hk/00948.html\n :param symbol: 股票代码\n :type symbol: str\n :param period: choice of {'1', '5', '15', '30', '60'}\n :type period: str\n :param adjust: choice of {'', 'qfq', 'hfq'}\n :type adjust: str\n :param start_date: 开始日期\n :type start_date: str\n :param end_date: 结束日期\n :type end_date: str\n :return: 每日分时行情\n :rtype: pandas.DataFrame\n \"\"\"\n adjust_map = {\n '': '0',\n 'qfq': '1',\n 'hfq': '2',\n }\n if period == '1':\n url = \"http://push2his.eastmoney.com/api/qt/stock/trends2/get\"\n params = {\n \"fields1\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58\",\n \"ut\": \"fa5fd1943c7b386f172d6893dbfba10b\",\n \"iscr\": \"0\",\n \"ndays\": \"5\",\n \"secid\": f\"116.{symbol}\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"trends\"]])\n temp_df.columns = [\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"最新价\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"时间\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(drop=True, inplace=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"最新价\"] = pd.to_numeric(temp_df[\"最新价\"])\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str)\n return temp_df\n else:\n url = 'http://push2his.eastmoney.com/api/qt/stock/kline/get'\n params = {\n 'fields1': 'f1,f2,f3,f4,f5,f6',\n 'fields2': 'f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61',\n 'ut': 'bd1d9ddb04089700cf9c27f6f7426281',\n 'klt': period,\n 'fqt': adjust_map[adjust],\n 'secid': f\"116.{symbol}\",\n 'beg': '0',\n 'end': '20500000',\n '_': '1630930917857',\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"klines\"]])\n temp_df.columns = [\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"涨跌幅\",\n \"涨跌额\",\n \"换手率\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"时间\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(drop=True, inplace=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"振幅\"] = pd.to_numeric(temp_df[\"振幅\"])\n temp_df[\"涨跌幅\"] = pd.to_numeric(temp_df[\"涨跌幅\"])\n temp_df[\"涨跌额\"] = pd.to_numeric(temp_df[\"涨跌额\"])\n temp_df[\"换手率\"] = pd.to_numeric(temp_df[\"换手率\"])\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str)\n temp_df = temp_df[[\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"涨跌幅\",\n \"涨跌额\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"换手率\",\n ]]\n return temp_df\n\n\ndef stock_us_spot_em() -> pd.DataFrame:\n \"\"\"\n 东方财富-美股-实时行情\n http://quote.eastmoney.com/center/gridlist.html#us_stocks\n :return: 美股-实时行情; 延迟 15 min\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://72.push2.eastmoney.com/api/qt/clist/get\"\n params = {\n \"pn\": \"1\",\n \"pz\": \"20000\",\n \"po\": \"1\",\n \"np\": \"1\",\n \"ut\": \"bd1d9ddb04089700cf9c27f6f7426281\",\n \"fltt\": \"2\",\n \"invt\": \"2\",\n \"fid\": \"f3\",\n \"fs\": \"m:105,m:106,m:107\",\n \"fields\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152\",\n \"_\": \"1624010056945\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"][\"diff\"])\n temp_df.columns = [\n \"_\",\n \"最新价\",\n \"涨跌幅\",\n \"涨跌额\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"简称\",\n \"编码\",\n \"名称\",\n \"最高价\",\n \"最低价\",\n \"开盘价\",\n \"昨收价\",\n \"总市值\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"市盈率\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ]\n temp_df.reset_index(inplace=True)\n temp_df[\"index\"] = range(1, len(temp_df) + 1)\n temp_df.rename(columns={\"index\": \"序号\"}, inplace=True)\n temp_df[\"代码\"] = temp_df[\"编码\"].astype(str) + \".\" + temp_df[\"简称\"]\n temp_df = temp_df[\n [\n \"序号\",\n \"名称\",\n \"最新价\",\n \"涨跌额\",\n \"涨跌幅\",\n \"开盘价\",\n \"最高价\",\n \"最低价\",\n \"昨收价\",\n \"总市值\",\n \"市盈率\",\n \"代码\",\n ]\n ]\n temp_df[\"最新价\"] = pd.to_numeric(temp_df[\"最新价\"], errors=\"coerce\")\n temp_df[\"涨跌额\"] = pd.to_numeric(temp_df[\"涨跌额\"], errors=\"coerce\")\n temp_df[\"涨跌幅\"] = pd.to_numeric(temp_df[\"涨跌幅\"], errors=\"coerce\")\n temp_df[\"开盘价\"] = pd.to_numeric(temp_df[\"开盘价\"], errors=\"coerce\")\n temp_df[\"最高价\"] = pd.to_numeric(temp_df[\"最高价\"], errors=\"coerce\")\n temp_df[\"最低价\"] = pd.to_numeric(temp_df[\"最低价\"], errors=\"coerce\")\n temp_df[\"昨收价\"] = pd.to_numeric(temp_df[\"昨收价\"], errors=\"coerce\")\n temp_df[\"总市值\"] = pd.to_numeric(temp_df[\"总市值\"], errors=\"coerce\")\n temp_df[\"市盈率\"] = pd.to_numeric(temp_df[\"市盈率\"], errors=\"coerce\")\n return temp_df\n\n\ndef stock_us_hist(\n symbol: str = \"105.LI\",\n start_date: str = \"19700101\",\n end_date: str = \"22220101\",\n adjust: str = \"\",\n) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-美股-每日行情\n http://quote.eastmoney.com/us/ENTX.html#fullScreenChart\n :param symbol: 股票代码; 此股票代码需要通过调用 ak.stock_us_spot_em 的 `代码` 字段获取\n :type symbol: str\n :param start_date: 开始日期\n :type start_date: str\n :param end_date: 结束日期\n :type end_date: str\n :param adjust: choice of {\"qfq\": \"1\", \"hfq\": \"2\", \"\": \"不复权\"}\n :type adjust: str\n :return: 每日行情\n :rtype: pandas.DataFrame\n \"\"\"\n adjust_dict = {\"qfq\": \"1\", \"hfq\": \"2\", \"\": \"0\"}\n url = \"http://63.push2his.eastmoney.com/api/qt/stock/kline/get\"\n params = {\n \"secid\": f\"{symbol}\",\n \"ut\": \"fa5fd1943c7b386f172d6893dbfba10b\",\n \"fields1\": \"f1,f2,f3,f4,f5,f6\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61\",\n \"klt\": \"101\",\n \"fqt\": adjust_dict[adjust],\n \"end\": \"20500000\",\n \"lmt\": \"1000000\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"klines\"]])\n temp_df.columns = [\n \"日期\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"振幅\",\n \"涨跌幅\",\n \"涨跌额\",\n \"换手率\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"日期\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(inplace=True, drop=True)\n temp_df = temp_df.astype(\n {\n \"开盘\": float,\n \"收盘\": float,\n \"最高\": float,\n \"最低\": float,\n \"成交量\": int,\n \"成交额\": float,\n \"振幅\": float,\n \"涨跌幅\": float,\n \"涨跌额\": float,\n \"换手率\": float,\n }\n )\n return temp_df\n\n\ndef stock_us_hist_min_em(symbol: str = \"105.ATER\",\n start_date: str = \"1979-09-01 09:32:00\",\n end_date: str = \"2222-01-01 09:32:00\",\n ) -> pd.DataFrame:\n \"\"\"\n 东方财富网-行情首页-美股-每日分时行情\n http://quote.eastmoney.com/us/ATER.html\n :param symbol: 股票代码\n :type symbol: str\n :param start_date: 开始日期\n :type start_date: str\n :param end_date: 结束日期\n :type end_date: str\n :return: 每日分时行情\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://push2his.eastmoney.com/api/qt/stock/trends2/get\"\n params = {\n \"fields1\": \"f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58\",\n \"ut\": \"fa5fd1943c7b386f172d6893dbfba10b\",\n \"iscr\": \"0\",\n \"ndays\": \"5\",\n \"secid\": f\"{symbol.split('.')[0]}.{symbol.split('.')[1]}\",\n \"_\": \"1623766962675\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"][\"trends\"]])\n temp_df.columns = [\n \"时间\",\n \"开盘\",\n \"收盘\",\n \"最高\",\n \"最低\",\n \"成交量\",\n \"成交额\",\n \"最新价\",\n ]\n temp_df.index = pd.to_datetime(temp_df[\"时间\"])\n temp_df = temp_df[start_date:end_date]\n temp_df.reset_index(drop=True, inplace=True)\n temp_df[\"开盘\"] = pd.to_numeric(temp_df[\"开盘\"])\n temp_df[\"收盘\"] = pd.to_numeric(temp_df[\"收盘\"])\n temp_df[\"最高\"] = pd.to_numeric(temp_df[\"最高\"])\n temp_df[\"最低\"] = pd.to_numeric(temp_df[\"最低\"])\n temp_df[\"成交量\"] = pd.to_numeric(temp_df[\"成交量\"])\n temp_df[\"成交额\"] = pd.to_numeric(temp_df[\"成交额\"])\n temp_df[\"最新价\"] = pd.to_numeric(temp_df[\"最新价\"])\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str)\n return temp_df\n\n\nif __name__ == \"__main__\":\n stock_hk_spot_em_df = stock_hk_spot_em()\n print(stock_hk_spot_em_df)\n\n stock_hk_hist_df = stock_hk_hist(\n symbol=\"01246\", start_date=\"19700101\", end_date=\"22220101\", adjust=\"\"\n )\n print(stock_hk_hist_df)\n\n stock_hk_hist_qfq_df = stock_hk_hist(\n symbol=\"00593\", start_date=\"19700101\", end_date=\"22220101\", adjust=\"qfq\"\n )\n print(stock_hk_hist_qfq_df)\n\n stock_hk_hist_hfq_df = stock_hk_hist(\n symbol=\"00326\", start_date=\"19700101\", end_date=\"22220101\", adjust=\"hfq\"\n )\n print(stock_hk_hist_hfq_df)\n\n stock_us_spot_em_df = stock_us_spot_em()\n print(stock_us_spot_em_df)\n\n stock_us_hist_df = stock_us_hist(\n symbol=\"105.MTP\", start_date=\"19700101\", end_date=\"22220101\", adjust=\"qfq\"\n )\n print(stock_us_hist_df)\n\n stock_zh_a_spot_em_df = stock_zh_a_spot_em()\n print(stock_zh_a_spot_em_df)\n\n stock_zh_a_hist_df = stock_zh_a_hist(\n symbol=\"000001\", period='monthly', start_date=\"20101010\", end_date=\"20210812\", adjust=\"hfq\"\n )\n print(stock_zh_a_hist_df)\n\n stock_zh_a_hist_min_em_df = stock_zh_a_hist_min_em(symbol=\"000001\")\n print(stock_zh_a_hist_min_em_df)\n\n stock_zh_a_hist_pre_min_em_df = stock_zh_a_hist_pre_min_em(symbol=\"000001\")\n print(stock_zh_a_hist_pre_min_em_df)\n\n stock_hk_hist_min_em_df = stock_hk_hist_min_em(symbol=\"01611\")\n print(stock_hk_hist_min_em_df)\n\n stock_us_hist_min_em_df = stock_us_hist_min_em(symbol=\"105.ATER\")\n print(stock_us_hist_min_em_df)\n\n stock_zh_a_hist_min_em_df = stock_zh_a_hist_min_em(symbol=\"000001\", period='5', adjust='hfq', start_date=\"2021-09-01 09:32:00\", end_date=\"2021-09-06 09:32:00\")\n print(stock_zh_a_hist_min_em_df)\n\n stock_zh_a_hist_df = stock_zh_a_hist(symbol=\"000001\", period=\"daily\", start_date=\"20170301\", end_date='20210907', adjust=\"\")\n print(stock_zh_a_hist_df)\n\n stock_hk_hist_min_em_df = stock_hk_hist_min_em(symbol=\"01611\", period='1', adjust='', start_date=\"2021-09-14 09:32:00\", end_date=\"2021-09-14 18:32:00\")\n print(stock_hk_hist_min_em_df)\n"
]
| [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.to_numeric"
]
]
|
Wootai/CNTK | [
"0ee09cf771bda9d4912790e0fed7322e89d86d87"
]
| [
"bindings/python/cntk/ops/tests/ops_test_utils.py"
]
| [
"# Copyright (c) Microsoft. All rights reserved.\n\n# Licensed under the MIT license. See LICENSE.md file in the project root\n# for full license information.\n# ==============================================================================\n\n\"\"\"\nUtils for operations unit tests\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom cntk.tests.test_utils import *\n\nfrom cntk.device import cpu, gpu\nfrom ...ops.functions import Function\nfrom cntk.internal import sanitize_dtype_cntk\nfrom cntk.internal.utils import eval as cntk_eval\nfrom .. import constant, input\n\ndef cntk_device(device_id):\n '''\n Converts the legacy device ID as it was used in CNTK 1 to a :class:`~cntk.device.DeviceDescriptor` instance.\n\n Args:\n device_id (int): device id, -1 for CPU, 0 or higher for GPU\n\n Returns:\n :class:`~cntk.device.DeviceDescriptor`\n '''\n if device_id == -1:\n return cpu()\n else:\n return gpu(device_id)\n\n\ndef _test_unary_op(precision, device_id, op_func,\n value, expected_forward, expected_backward_all, op_param_dict={}):\n\n value = AA(value, dtype=PRECISION_TO_TYPE[precision])\n\n a = input(shape=value.shape,\n dtype=sanitize_dtype_cntk(PRECISION_TO_TYPE[precision]),\n needs_gradient=True,\n name='a')\n\n # create batch\n value.shape = (1,) + value.shape\n\n if (type(op_func) == str):\n input_op = eval('%s a' % op_func)\n else:\n input_op = op_func(a, **op_param_dict)\n\n forward_input = {a: value}\n expected_backward = {a: expected_backward_all['arg'], } if expected_backward_all is not None else None\n unittest_helper(input_op,\n forward_input, expected_forward, expected_backward,\n device_id=device_id, precision=precision)\n\n\ndef _test_binary_op(precision, device_id, op_func, left_operand, right_operand,\n expected_forward, expected_backward_all, wrap_batch_seq=True, op_param_dict={}):\n dt = PRECISION_TO_TYPE[precision]\n dev = cntk_device(device_id)\n\n left_value = AA(left_operand, dtype=dt)\n right_value = AA(right_operand, dtype=dt)\n\n a = input(shape=left_value.shape,\n dtype=sanitize_dtype_cntk(precision),\n needs_gradient=True,\n name='a')\n\n b = input(shape=right_value.shape,\n dtype=sanitize_dtype_cntk(precision),\n needs_gradient=True,\n name='b')\n\n const_a = constant(left_value, device=dev)\n const_b = constant(right_value, device=dev)\n\n if (type(op_func) == str):\n input_op_constant = eval('a %s const_b' % op_func)\n constant_op_input = eval('const_a %s b' % op_func)\n input_op_input = eval('a %s b' % op_func)\n else:\n input_op_constant = op_func(a, const_b, **op_param_dict)\n constant_op_input = op_func(const_a, b, **op_param_dict)\n input_op_input = op_func(a, b, **op_param_dict)\n\n # create batch by wrapping the data point into a batch of one sample\n if wrap_batch_seq:\n left_value.shape = (1,) + left_value.shape\n right_value.shape = (1,) + right_value.shape\n\n forward_input = {a: left_value, b: right_value}\n expected_backward = {a: expected_backward_all[\n 'left_arg'], b: expected_backward_all['right_arg'], }\n unittest_helper(input_op_input,\n forward_input, expected_forward, expected_backward,\n device_id=device_id, precision=precision)\n\n forward_input = {a: left_value}\n expected_backward = {a: expected_backward_all['left_arg'], }\n unittest_helper(input_op_constant,\n forward_input, expected_forward, expected_backward,\n device_id=device_id, precision=precision)\n\n forward_input = {b: right_value}\n expected_backward = {b: expected_backward_all['right_arg'], }\n unittest_helper(constant_op_input,\n forward_input, expected_forward, expected_backward,\n device_id=device_id, precision=precision)\n\n\ndef unittest_helper(root_node,\n forward_input, expected_forward, expected_backward,\n device_id=-1, precision=\"float\"):\n\n assert isinstance(root_node, Function)\n\n backward_pass = expected_backward is not None\n forward, backward = cntk_eval(root_node, forward_input, precision,\n cntk_device(device_id), backward_pass, expected_backward)\n\n # for forward we always expect only one result\n assert len(forward) == 1\n forward = list(forward.values())[0]\n\n forward = np.atleast_1d(forward)\n\n for res, exp in zip(forward, expected_forward):\n assert res.shape == AA(exp).shape\n assert np.allclose(res, exp, atol=TOLERANCE_ABSOLUTE)\n\n if expected_backward:\n for key in expected_backward:\n res, exp = backward[key], expected_backward[key]\n if isinstance(res, list):\n assert len(res) == len(exp)\n for res_seq, exp_seq in zip(res, exp):\n assert res_seq.shape == AA(exp_seq).shape\n assert np.allclose(\n res_seq, exp_seq, atol=TOLERANCE_ABSOLUTE)\n\n elif isinstance(res, np.ndarray):\n assert res.shape == AA(exp).shape\n assert np.allclose(res, exp, atol=TOLERANCE_ABSOLUTE)\n\n\ndef batch_dense_to_sparse(batch, dynamic_axis=''):\n '''\n Helper test function that converts a batch of dense tensors into sparse\n representation that can be consumed by :func:`cntk.ops.sparse_input_numpy`.\n\n Args:\n batch (list): list of samples. If ``dynamic_axis`` is given, samples are sequences\n of tensors. Otherwise, they are simple tensors.\n dynamic_axis (str or :func:`cntk.ops.dynamic_axis` instance): the dynamic axis\n\n Returns:\n (indices, values, shape)\n '''\n\n batch_indices = []\n batch_values = []\n\n shapes_in_tensor = set()\n\n for tensor in batch:\n if isinstance(tensor, list):\n tensor = np.asarray(tensor)\n\n if dynamic_axis:\n # collecting the shapes ignoring the dynamic axis\n shapes_in_tensor.add(tensor.shape[1:])\n else:\n shapes_in_tensor.add(tensor.shape)\n\n if len(shapes_in_tensor) != 1:\n raise ValueError('except for the sequence dimensions all shapes ' +\n 'should be the same - instead we %s' %\n (\", \".join(str(s) for s in shapes_in_tensor)))\n\n t_indices = range(tensor.size)\n t_values = tensor.ravel(order='F')\n mask = t_values != 0\n\n batch_indices.append(list(np.asarray(t_indices)[mask]))\n batch_values.append(list(np.asarray(t_values)[mask]))\n\n return batch_indices, batch_values, shapes_in_tensor.pop()\n\n\ndef test_batch_dense_to_sparse_full():\n i, v, s = batch_dense_to_sparse(\n [\n [[1, 2, 3], [4, 5, 6]],\n [[10, 20, 30], [40, 50, 60]],\n ])\n assert i == [\n [0, 1, 2, 3, 4, 5],\n [0, 1, 2, 3, 4, 5],\n ]\n assert v == [\n [1, 4, 2, 5, 3, 6],\n [10, 40, 20, 50, 30, 60]\n ]\n assert s == (2, 3)\n\n i, v, s = batch_dense_to_sparse([[1]])\n assert i == [[0]]\n assert v == [[1]]\n assert s == (1,)\n\n\ndef test_batch_dense_to_sparse_zeros():\n i, v, s = batch_dense_to_sparse(\n [\n [[1, 2, 3], [4, 0, 6]],\n [[0, 0, 0], [40, 50, 60]],\n ])\n assert i == [\n [0, 1, 2, 4, 5],\n [1, 3, 5],\n ]\n assert v == [\n [1, 4, 2, 3, 6],\n [40, 50, 60]\n ]\n assert s == (2, 3)\n\ndef remove_np_array_in_list(arr, l):\n index = 0\n size = len(l)\n while index != size and not np.allclose(l[index], arr, atol=TOLERANCE_ABSOLUTE):\n index += 1\n if index != size:\n l.pop(index)\n else:\n raise ValueError('array not found in list.')\n\n# compare two unordered lists of np arrays\ndef compare_lists_of_np_arrays(first_list, second_list):\n second_list = list(second_list) # make a mutable copy\n try:\n for elem in first_list:\n remove_np_array_in_list(elem, second_list)\n except ValueError:\n return False\n return not second_list\n"
]
| [
[
"numpy.allclose",
"numpy.atleast_1d",
"numpy.asarray"
]
]
|
AlexanderSemenyak/webviz-config | [
"3602901f215033bddd484ea1c13013a8addaf012"
]
| [
"webviz_config/generic_plugins/_table_plotter.py"
]
| [
"import base64\nimport inspect\nfrom pathlib import Path\nfrom collections import OrderedDict\nfrom typing import Optional, List, Dict, Any\n\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nfrom dash import html, dcc, Input, Output, Dash\nimport webviz_core_components as wcc\n\nfrom .. import WebvizPluginABC, WebvizSettings, EncodedFile\nfrom ..webviz_store import webvizstore\nfrom ..common_cache import CACHE\n\n\n# pylint: disable=too-many-arguments\nclass TablePlotter(WebvizPluginABC):\n \"\"\"Adds a plotter to the webviz instance, using tabular data from a provided csv file.\nIf feature is requested, the data could also come from a database.\n\n---\n\n* **`csv_file`:** Path to the csv file containing the tabular data. \\\n Either absolute path or relative to the configuration file.\n* **`plot_options`:** A dictionary of plot options to initialize the plot with.\n* **`filter_cols`:** Dataframe columns that can be used to filter data.\n* **`filter_defaults`:** A dictionary with column names as keys, \\\n and a list of column values that should be preselected in the filter. \\\n If a columm is not defined, all values are preselected for the column.\n* **`column_color_discrete_maps`:** A dictionary with column names as keys, \\\n each key containing a new dictionary with the columns \\\n unique values as keys, and the color they should be \\\n plotted with as value. Hex values needs quotes '' \\\n to not be read as a comment.\n* **`lock`:** If `True`, only the plot is shown, \\\n all dropdowns for changing plot options are hidden.\n\"\"\"\n\n def __init__(\n self,\n app: Dash,\n webviz_settings: WebvizSettings,\n csv_file: Path,\n plot_options: dict = None,\n filter_cols: list = None,\n filter_defaults: dict = None,\n column_color_discrete_maps: dict = None,\n lock: bool = False,\n ) -> None:\n\n super().__init__()\n\n self.plot_options = plot_options if plot_options else {}\n self.lock = lock\n self.csv_file = csv_file\n self.data = get_data(self.csv_file)\n self.set_filters(filter_cols)\n self.columns = list(self.data.columns)\n self.numeric_columns = list(\n self.data.select_dtypes(include=[np.number]).columns\n )\n self.filter_defaults = filter_defaults\n self.column_color_discrete_maps = column_color_discrete_maps\n self.plotly_theme = webviz_settings.theme.plotly_theme\n self.set_callbacks(app)\n\n def set_filters(self, filter_cols: Optional[list]) -> None:\n self.filter_cols = []\n self.use_filter = False\n if filter_cols:\n for col in filter_cols:\n if col in self.data.columns:\n if self.data[col].nunique() != 1:\n self.filter_cols.append(col)\n if self.filter_cols:\n self.use_filter = True\n\n def add_webvizstore(self) -> List[tuple]:\n return [(get_data, [{\"csv_file\": self.csv_file}])]\n\n @property\n def plots(self) -> dict:\n \"\"\"A dict of available plots and their options\"\"\"\n return {\n \"scatter\": [\"x\", \"y\", \"size\", \"color\", \"facet_col\"],\n \"histogram\": [\n \"x\",\n \"color\",\n \"facet_col\",\n \"barmode\",\n \"barnorm\",\n \"histnorm\",\n ],\n \"bar\": [\"x\", \"y\", \"color\", \"facet_col\", \"barmode\"],\n \"pie\": [\"values\", \"names\"],\n \"scatter_3d\": [\"x\", \"y\", \"z\", \"size\", \"color\"],\n \"line\": [\"x\", \"y\", \"color\", \"line_group\", \"facet_col\"],\n \"line_3d\": [\"x\", \"y\", \"z\", \"color\"],\n \"box\": [\"x\", \"y\", \"color\", \"facet_col\"],\n \"violin\": [\"x\", \"y\", \"color\", \"facet_col\"],\n \"scatter_matrix\": [\"dimensions\", \"size\", \"color\"],\n \"parallel_coordinates\": [\"dimensions\"],\n \"parallel_categories\": [\"dimensions\"],\n \"density_contour\": [\"x\", \"y\", \"color\", \"facet_col\"],\n }\n\n @property\n def plot_args(self) -> dict:\n \"\"\"A dict of possible plot options and their default values\"\"\"\n return OrderedDict(\n {\n \"x\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"x\", self.columns[0]),\n \"multi\": False,\n \"clearable\": False,\n },\n \"y\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"y\", self.columns[0]),\n \"multi\": False,\n \"clearable\": False,\n },\n \"z\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"z\", self.columns[0]),\n \"multi\": False,\n \"clearable\": False,\n },\n \"values\": {\n \"options\": self.numeric_columns,\n \"value\": self.plot_options.get(\"values\", self.numeric_columns[0]),\n \"multi\": False,\n \"clearable\": True,\n },\n \"names\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"names\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"size\": {\n \"options\": self.numeric_columns,\n \"value\": self.plot_options.get(\"size\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"color\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"color\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"facet_col\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"facet_col\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"line_group\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"line_group\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"barmode\": {\n \"options\": [\"stack\", \"group\", \"overlay\", \"relative\"],\n \"value\": self.plot_options.get(\"barmode\", \"stack\"),\n \"multi\": False,\n \"clearable\": True,\n },\n \"barnorm\": {\n \"options\": [\"fraction\", \"percent\"],\n \"value\": self.plot_options.get(\"barnorm\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"histnorm\": {\n \"options\": [\n \"percent\",\n \"propability\",\n \"density\",\n \"propability density\",\n ],\n \"value\": self.plot_options.get(\"histnorm\", None),\n \"multi\": False,\n \"clearable\": True,\n },\n \"trendline\": {\n \"options\": self.numeric_columns,\n \"value\": None,\n \"multi\": False,\n \"clearable\": True,\n },\n \"dimensions\": {\n \"options\": self.columns,\n \"value\": self.plot_options.get(\"dimensions\", self.columns),\n \"multi\": True,\n \"clearable\": True,\n },\n }\n )\n\n def filter_layout(self) -> Optional[list]:\n \"\"\"Makes dropdowns for each dataframe column used for filtering.\"\"\"\n if not self.use_filter:\n return None\n df = self.data\n dropdowns = [html.H4(\"Set filters\")]\n for col in self.filter_cols:\n if df[col].dtype in [np.float64, np.int64]:\n min_val = df[col].min()\n max_val = df[col].max()\n mean_val = df[col].mean()\n dropdowns.append(\n html.Div(\n children=[\n html.Details(\n open=True,\n children=[\n html.Summary(col.lower().capitalize()),\n dcc.RangeSlider(\n id=self.uuid(f\"filter-{col}\"),\n min=min_val,\n max=max_val,\n step=(max_val - min_val) / 10,\n marks={\n min_val: f\"{min_val:.2f}\",\n mean_val: f\"{mean_val:.2f}\",\n max_val: f\"{max_val:.2f}\",\n },\n value=[min_val, max_val],\n ),\n ],\n )\n ]\n )\n )\n else:\n elements = list(self.data[col].unique())\n dropdowns.append(\n html.Div(\n children=[\n html.Details(\n open=True,\n children=[\n html.Summary(col.lower().capitalize()),\n wcc.Select(\n id=self.uuid(f\"filter-{col}\"),\n options=[\n {\"label\": i, \"value\": i} for i in elements\n ],\n value=elements\n if self.filter_defaults is None\n else [\n element\n for element in self.filter_defaults.get(\n col, elements\n )\n if element in elements\n ],\n size=min(15, len(elements)),\n ),\n ],\n )\n ]\n )\n )\n return dropdowns\n\n def plot_option_layout(self) -> List[html.Div]:\n \"\"\"Renders a dropdown widget for each plot option\"\"\"\n divs = []\n # The plot type dropdown is handled separate\n divs.append(\n html.Div(\n style=self.style_options_div,\n children=[\n html.H4(\"Set plot options\"),\n html.P(\"Plot type\"),\n dcc.Dropdown(\n id=self.uuid(\"plottype\"),\n clearable=False,\n options=[{\"label\": i, \"value\": i} for i in self.plots],\n value=self.plot_options.get(\"type\", \"scatter\"),\n ),\n ],\n )\n )\n # Looping through all available plot options\n # and renders a dropdown widget\n for key, arg in self.plot_args.items():\n divs.append(\n html.Div(\n style=self.style_options_div_hidden,\n id=self.uuid(f\"div-{key}\"),\n children=[\n html.P(key),\n dcc.Dropdown(\n id=self.uuid(f\"dropdown-{key}\"),\n clearable=arg[\"clearable\"],\n options=[{\"label\": i, \"value\": i} for i in arg[\"options\"]],\n value=arg[\"value\"],\n multi=arg[\"multi\"],\n ),\n ],\n )\n )\n return divs\n\n @property\n def style_options_div(self) -> Dict[str, str]:\n \"\"\"Style for active plot options\"\"\"\n return {\"display\": \"grid\"}\n\n @property\n def style_options_div_hidden(self) -> Dict[str, str]:\n \"\"\"Style for hidden plot options\"\"\"\n return {\"display\": \"none\"}\n\n @property\n def layout(self) -> html.Div:\n return html.Div(\n children=[\n wcc.FlexBox(\n children=[\n html.Div(\n id=self.uuid(\"selector-row\"),\n style={\"display\": \"none\"}\n if self.lock\n else {\"width\": \"15%\"},\n children=self.plot_option_layout(),\n ),\n wcc.Graph(\n id=self.uuid(\"graph-id\"),\n style={\"height\": \"80vh\", \"width\": \"60%\"},\n ),\n html.Div(style={\"width\": \"15%\"}, children=self.filter_layout()),\n ],\n )\n ]\n )\n\n @property\n def plot_output_callbacks(self) -> List[Output]:\n \"\"\"Creates list of output dependencies for callback\n The outputs are the graph, and the style of the plot options\"\"\"\n outputs = []\n outputs.append(Output(self.uuid(\"graph-id\"), \"figure\"))\n for plot_arg in self.plot_args.keys():\n outputs.append(Output(self.uuid(f\"div-{plot_arg}\"), \"style\"))\n return outputs\n\n @property\n def plot_input_callbacks(self) -> List[Input]:\n \"\"\"Creates list of input dependencies for callback\n The inputs are the plot type and the current value\n for each plot option\n \"\"\"\n inputs = []\n inputs.append(Input(self.uuid(\"plottype\"), \"value\"))\n for plot_arg in self.plot_args.keys():\n inputs.append(Input(self.uuid(f\"dropdown-{plot_arg}\"), \"value\"))\n for filtcol in self.filter_cols:\n inputs.append(Input(self.uuid(f\"filter-{filtcol}\"), \"value\"))\n return inputs\n\n def set_callbacks(self, app: Dash) -> None:\n @app.callback(self.plugin_data_output, self.plugin_data_requested)\n def _user_download_data(data_requested: Optional[int]) -> Optional[EncodedFile]:\n return (\n {\n \"filename\": \"table-plotter.csv\",\n \"content\": base64.b64encode(\n get_data(self.csv_file).to_csv().encode()\n ).decode(\"ascii\"),\n \"mime_type\": \"text/csv\",\n }\n if data_requested\n else None\n )\n\n @app.callback(self.plot_output_callbacks, self.plot_input_callbacks)\n def _update_output(*args: Any) -> tuple:\n \"\"\"Updates the graph and shows/hides plot options\"\"\"\n plot_type = args[0]\n # pylint: disable=protected-access\n plotfunc = getattr(px._chart_types, plot_type)\n plotargs = {}\n div_style = []\n data = self.data\n # Filter dataframe if filter columns are available\n if self.use_filter:\n plot_inputs = args[1 : -len(self.filter_cols)]\n filter_inputs = args[-len(self.filter_cols) :]\n data = filter_dataframe(data, self.filter_cols, filter_inputs)\n else:\n plot_inputs = args[1:]\n for name, plot_arg in zip(self.plot_args.keys(), plot_inputs):\n if plot_type in [\"parallel_coordinates\"] and name == \"dimensions\":\n # This plot type only accepts numerical data\n plot_arg = [val for val in plot_arg if val in self.numeric_columns]\n if name in self.plots[plot_type]:\n plotargs[name] = plot_arg\n div_style.append(self.style_options_div)\n\n if (\n name == \"color\"\n and self.column_color_discrete_maps is not None\n and plot_arg in self.column_color_discrete_maps\n and \"color_discrete_map\"\n in inspect.signature(plotfunc).parameters\n ):\n plotargs[\n \"color_discrete_map\"\n ] = self.column_color_discrete_maps.get(plot_arg)\n else:\n div_style.append(self.style_options_div_hidden)\n return (plotfunc(data, template=self.plotly_theme, **plotargs), *div_style)\n\n\[email protected](timeout=CACHE.TIMEOUT)\n@webvizstore\ndef get_data(csv_file: Path) -> pd.DataFrame:\n return pd.read_csv(csv_file, index_col=None)\n\n\[email protected](timeout=CACHE.TIMEOUT)\ndef filter_dataframe(\n dframe: pd.DataFrame, columns: list, column_values: List[list]\n) -> pd.DataFrame:\n df = dframe.copy()\n if not isinstance(columns, list):\n columns = [columns]\n for filt, col in zip(column_values, columns):\n if isinstance(filt, list):\n if df[col].dtype in [np.float64, np.int64]:\n df = df.loc[df[col].between(filt[0], filt[1])]\n else:\n df = df.loc[df[col].isin(filt)]\n else:\n df = df.loc[df[col] == filt]\n return df\n"
]
| [
[
"pandas.read_csv"
]
]
|
sunbing7/backdoor | [
"a0188ee3da1fac4b923b64a9e6238e5372dd0065"
]
| [
"mnist/source/causal_analysis.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-11-05 11:30:01\n# @Author : Bolun Wang ([email protected])\n# @Link : http://cs.ucsb.edu/~bolunwang\n\nimport os\nimport time\n\nimport numpy as np\nimport random\nimport tensorflow\nimport keras\nfrom tensorflow import set_random_seed\nrandom.seed(123)\nnp.random.seed(123)\nset_random_seed(123)\n\nfrom keras.models import load_model\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom causal_inference import causal_analyzer\nfrom causal_attribution import causal_attribution\n\nimport utils_backdoor\n\nimport sys\n\n\n##############################\n# PARAMETERS #\n##############################\n\nDEVICE = '3' # specify which GPU to use\n\n#DATA_DIR = '../data' # data folder\n#DATA_FILE = 'vgg_dataset.h5' # dataset file\nMODEL_DIR = '../models' # model directory\nMODEL_FILENAME = 'mnist_backdoor_3.h5' # model file\n#MODEL_FILENAME = 'trojaned_face_model_wm.h5'\nRESULT_DIR = '../results' # directory for storing results\n# image filename template for visualization results\nIMG_FILENAME_TEMPLATE = 'mnist_visualize_%s_label_%d.png'\n\n# input size\nIMG_ROWS = 28\nIMG_COLS = 28\nIMG_COLOR = 1\nINPUT_SHAPE = (IMG_ROWS, IMG_COLS, IMG_COLOR)\n\nNUM_CLASSES = 10 # total number of classes in the model\nY_TARGET = 3 # (optional) infected target label, used for prioritizing label scanning\n\nINTENSITY_RANGE = 'mnist' # preprocessing method for the task, GTSRB uses raw pixel intensities\n\n# parameters for optimization\nBATCH_SIZE = 32 # batch size used for optimization\nLR = 0.1 # learning rate\nSTEPS = 1000 # total optimization iterations\nNB_SAMPLE = 1000 # number of samples in each mini batch\nMINI_BATCH = NB_SAMPLE // BATCH_SIZE # mini batch size used for early stop\nINIT_COST = 1e-3 # initial weight used for balancing two objectives\n\nREGULARIZATION = 'l1' # reg term to control the mask's norm\n\nATTACK_SUCC_THRESHOLD = 0.99 # attack success threshold of the reversed attack\nPATIENCE = 5 # patience for adjusting weight, number of mini batches\nCOST_MULTIPLIER = 2 # multiplier for auto-control of weight (COST)\nSAVE_LAST = False # whether to save the last result or best result\n\nEARLY_STOP = True # whether to early stop\nEARLY_STOP_THRESHOLD = 1.0 # loss threshold for early stop\nEARLY_STOP_PATIENCE = 5 * PATIENCE # patience for early stop\n\n# the following part is not used in our experiment\n# but our code implementation also supports super-pixel mask\nUPSAMPLE_SIZE = 1 # size of the super pixel\nMASK_SHAPE = np.ceil(np.array(INPUT_SHAPE[0:2], dtype=float) / UPSAMPLE_SIZE)\nMASK_SHAPE = MASK_SHAPE.astype(int)\n\n# parameters of the original injected trigger\n# this is NOT used during optimization\n# start inclusive, end exclusive\n# PATTERN_START_ROW, PATTERN_END_ROW = 27, 31\n# PATTERN_START_COL, PATTERN_END_COL = 27, 31\n# PATTERN_COLOR = (255.0, 255.0, 255.0)\n# PATTERN_LIST = [\n# (row_idx, col_idx, PATTERN_COLOR)\n# for row_idx in range(PATTERN_START_ROW, PATTERN_END_ROW)\n# for col_idx in range(PATTERN_START_COL, PATTERN_END_COL)\n# ]\n\n##############################\n# END PARAMETERS #\n##############################\n\ndef load_dataset():\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = tensorflow.keras.datasets.mnist.load_data()\n\n # Scale images to the [0, 1] range\n x_train = x_train.astype(\"float32\") / 255\n x_test = x_test.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n x_train = np.expand_dims(x_train, -1)\n x_test = np.expand_dims(x_test, -1)\n print(\"x_train shape:\", x_train.shape)\n print(x_train.shape[0], \"train samples\")\n print(x_test.shape[0], \"test samples\")\n\n # convert class vectors to binary class matrices\n y_train = tensorflow.keras.utils.to_categorical(y_train, NUM_CLASSES)\n y_test = tensorflow.keras.utils.to_categorical(y_test, NUM_CLASSES)\n return x_test, y_test\n\ndef load_dataset_class(target_class):\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = tensorflow.keras.datasets.mnist.load_data()\n\n # Scale images to the [0, 1] range\n x_train = x_train.astype(\"float32\") / 255\n x_test = x_test.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n x_train = np.expand_dims(x_train, -1)\n x_test = np.expand_dims(x_test, -1)\n print(\"x_train shape:\", x_train.shape)\n print(x_train.shape[0], \"train samples\")\n print(x_test.shape[0], \"test samples\")\n\n # convert class vectors to binary class matrices\n y_train = tensorflow.keras.utils.to_categorical(y_train, NUM_CLASSES)\n y_test = tensorflow.keras.utils.to_categorical(y_test, NUM_CLASSES)\n x_t_out = []\n y_t_out = []\n i = 0\n for y_i in y_test:\n if np.argmax(y_i) == target_class:\n x_t_out.append(x_test[i])\n y_t_out.append(y_i)\n i = i + 1\n return np.asarray(x_t_out), np.asarray(y_t_out)\n\ndef build_data_loader(X, Y):\n\n datagen = ImageDataGenerator()\n generator = datagen.flow(\n X, Y, batch_size=BATCH_SIZE)\n\n return generator\n\n\ndef trigger_analyzer(analyzer, gen):\n\n visualize_start_time = time.time()\n\n # execute reverse engineering\n analyzer.analyze(gen)\n\n visualize_end_time = time.time()\n print('visualization cost %f seconds' %\n (visualize_end_time - visualize_start_time))\n\n return\n\ndef save_pattern(pattern, mask, y_target):\n\n # create result dir\n if not os.path.exists(RESULT_DIR):\n os.mkdir(RESULT_DIR)\n\n img_filename = (\n '%s/%s' % (RESULT_DIR,\n IMG_FILENAME_TEMPLATE % ('pattern', y_target)))\n utils_backdoor.dump_image(pattern, img_filename, 'png')\n\n img_filename = (\n '%s/%s' % (RESULT_DIR,\n IMG_FILENAME_TEMPLATE % ('mask', y_target)))\n utils_backdoor.dump_image(np.expand_dims(mask, axis=2) * 255,\n img_filename,\n 'png')\n\n fusion = np.multiply(pattern, np.expand_dims(mask, axis=2))\n img_filename = (\n '%s/%s' % (RESULT_DIR,\n IMG_FILENAME_TEMPLATE % ('fusion', y_target)))\n utils_backdoor.dump_image(fusion, img_filename, 'png')\n\n pass\n\n\ndef start_analysis():\n\n print('loading dataset')\n #X_test, Y_test = load_dataset_class(1)\n X_test, Y_test = load_dataset()\n # transform numpy arrays into data generator\n test_generator = build_data_loader(X_test, Y_test)\n\n print('loading model')\n model_file = '%s/%s' % (MODEL_DIR, MODEL_FILENAME)\n model = load_model(model_file)\n\n # initialize analyzer\n analyzer = causal_attribution(\n model,\n test_generator,\n input_shape=INPUT_SHAPE,\n steps=STEPS, num_classes=NUM_CLASSES,\n mini_batch=MINI_BATCH,\n img_color=IMG_COLOR, batch_size=BATCH_SIZE, verbose=2)\n\n # y_label list to analyze\n y_target_list = list(range(NUM_CLASSES))\n y_target_list.remove(Y_TARGET)\n y_target_list = [Y_TARGET] + y_target_list\n\n y_target_list = [Y_TARGET]\n for y_target in y_target_list:\n\n #print('processing label %d' % y_target)\n\n trigger_analyzer(\n analyzer, test_generator)\n pass\n\n\ndef main():\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = DEVICE\n utils_backdoor.fix_gpu_memory()\n for i in range (0, 3):\n print(i)\n start_analysis()\n\n pass\n\n\nif __name__ == '__main__':\n #sys.stdout = open('file', 'w')\n start_time = time.time()\n main()\n elapsed_time = time.time() - start_time\n print('elapsed time %s s' % elapsed_time)\n #sys.stdout.close()"
]
| [
[
"tensorflow.set_random_seed",
"tensorflow.keras.utils.to_categorical",
"numpy.array",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.asarray",
"numpy.random.seed",
"numpy.argmax",
"numpy.expand_dims"
]
]
|
madhavadama/HungaBunga | [
"93270c57fe84d201f6ee32ebb19c900772bc3629"
]
| [
"hunga_bunga/classification.py"
]
| [
"\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression, Perceptron, PassiveAggressiveClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KNeighborsClassifier, NearestCentroid, RadiusNeighborsClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel, DotProduct, Matern, StationaryKernelMixin, WhiteKernel\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.ensemble import AdaBoostRegressor, ExtraTreesRegressor, RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import ClassifierMixin\nfrom sklearn.base import RegressorMixin\nfrom sklearn.base import is_classifier\n\nfrom core import *\nfrom params import *\n\n\nlinear_models_n_params = [\n (SGDClassifier,\n {'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge'],\n 'alpha': [0.0001, 0.001, 0.1],\n 'penalty': penalty_12none\n }),\n\n (LogisticRegression,\n {'penalty': penalty_12, 'max_iter': max_iter, 'tol': tol, 'warm_start': warm_start, 'C':C, 'solver': ['liblinear']\n }),\n\n (Perceptron,\n {'penalty': penalty_all, 'alpha': alpha, 'n_iter': n_iter, 'eta0': eta0, 'warm_start': warm_start\n }),\n\n (PassiveAggressiveClassifier,\n {'C': C, 'n_iter': n_iter, 'warm_start': warm_start,\n 'loss': ['hinge', 'squared_hinge'],\n })\n]\n\nlinear_models_n_params_small = linear_models_n_params\n\nsvm_models_n_params = [\n (SVC,\n {'C':C, 'kernel': kernel, 'degree': degree, 'gamma': gamma, 'coef0': coef0, 'shrinking': shrinking, 'tol': tol, 'max_iter': max_iter_inf2}),\n\n (NuSVC,\n {'nu': nu, 'kernel': kernel, 'degree': degree, 'gamma': gamma, 'coef0': coef0, 'shrinking': shrinking, 'tol': tol\n }),\n\n (LinearSVC,\n { 'C': C, 'penalty_12': penalty_12, 'tol': tol, 'max_iter': max_iter,\n 'loss': ['hinge', 'squared_hinge'],\n })\n]\n\nsvm_models_n_params_small = [\n (SVC,\n {'C':C, 'kernel': kernel, 'degree': degree, 'gamma': gamma, 'coef0': coef0, 'shrinking': shrinking, 'tol': tol, 'max_iter': max_iter_inf2}),\n\n (NuSVC,\n {'nu': nu, 'kernel': kernel, 'degree': degree, 'gamma': gamma, 'coef0': coef0, 'shrinking': shrinking, 'tol': tol\n }),\n\n (LinearSVC,\n { 'C': C, 'penalty': penalty_12, 'tol': tol, 'max_iter': max_iter,\n 'loss': ['hinge', 'squared_hinge'],\n })\n]\n\nneighbor_models_n_params = [\n\n (KMeans,\n {'algorithm': ['auto', 'full', 'elkan'],\n 'init': ['k-means++', 'random']}),\n\n (KNeighborsClassifier,\n {'n_neighbors': n_neighbors, 'algo': neighbor_algo, 'leaf_size': neighbor_leaf_size, 'metric': neighbor_metric,\n 'weights': ['uniform', 'distance'],\n 'p': [1, 2]\n }),\n\n (NearestCentroid,\n {'metric': neighbor_metric,\n 'shrink_threshold': [1e-3, 1e-2, 0.1, 0.5, 0.9, 2]\n }),\n\n (RadiusNeighborsClassifier,\n {'radius': neighbor_radius, 'algo': neighbor_algo, 'leaf_size': neighbor_leaf_size, 'metric': neighbor_metric,\n 'weights': ['uniform', 'distance'],\n 'p': [1, 2],\n 'outlier_label': [-1]\n })\n]\n\ngaussianprocess_models_n_params = [\n (GaussianProcessClassifier,\n {'warm_start': warm_start,\n 'kernel': [RBF(), ConstantKernel(), DotProduct(), WhiteKernel()],\n 'max_iter_predict': [500],\n 'n_restarts_optimizer': [3],\n })\n]\n\nbayes_models_n_params = [\n (GaussianNB, {})\n]\n\nnn_models_n_params = [\n (MLPClassifier,\n { 'hidden_layer_sizes': [(16,), (64,), (100,), (32, 32)],\n 'activation': ['identity', 'logistic', 'tanh', 'relu'],\n 'alpha': alpha, 'learning_rate': learning_rate, 'tol': tol, 'warm_start': warm_start,\n 'batch_size': ['auto', 50],\n 'max_iter': [1000],\n 'early_stopping': [True, False],\n 'epsilon': [1e-8, 1e-5]\n })\n]\n\nnn_models_n_params_small = [\n (MLPClassifier,\n { 'hidden_layer_sizes': [(64,), (32, 64)],\n 'batch_size': ['auto', 50],\n 'activation': ['identity', 'tanh', 'relu'],\n 'max_iter': [500],\n 'early_stopping': [True],\n 'learning_rate': learning_rate_small\n })\n]\n\ntree_models_n_params = [\n\n (RandomForestClassifier,\n {'criterion': ['gini', 'entropy'],\n 'max_features': max_features, 'n_estimators': n_estimators, 'max_depth': max_depth,\n 'min_samples_split': min_samples_split, 'min_impurity_split': min_impurity_split, 'warm_start': warm_start, 'min_samples_leaf': min_samples_leaf,\n }),\n\n (DecisionTreeClassifier,\n {'criterion': ['gini', 'entropy'],\n 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_impurity_split':min_impurity_split, 'min_samples_leaf': min_samples_leaf\n }),\n\n (ExtraTreesClassifier,\n {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth,\n 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'min_impurity_split': min_impurity_split, 'warm_start': warm_start,\n 'criterion': ['gini', 'entropy']})\n]\n\ntree_models_n_params_small = [\n\n (RandomForestClassifier,\n {'max_features_small': max_features_small, 'n_estimators_small': n_estimators_small, 'min_samples_split': min_samples_split, 'max_depth_small': max_depth_small, 'min_samples_leaf': min_samples_leaf\n }),\n\n (DecisionTreeClassifier,\n {'max_features_small': max_features_small, 'max_depth_small': max_depth_small, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf\n }),\n\n (ExtraTreesClassifier,\n {'n_estimators_small': n_estimators_small, 'max_features_small': max_features_small, 'max_depth_small': max_depth_small,\n 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf})\n]\n\n\ndef run_all_classifiers(x, y, small = True, normalize_x = True, n_jobs=cpu_count()-1, brain=False, test_size=0.2, n_splits=5, upsample=True, scoring=None, verbose=False, grid_search=True):\n all_params = (linear_models_n_params_small if small else linear_models_n_params) + (nn_models_n_params_small if small else nn_models_n_params) + ([] if small else gaussianprocess_models_n_params) + neighbor_models_n_params + (svm_models_n_params_small if small else svm_models_n_params) + (tree_models_n_params_small if small else tree_models_n_params)\n return main_loop(all_params, StandardScaler().fit_transform(x) if normalize_x else x, y, isClassification=True, n_jobs=n_jobs, verbose=verbose, brain=brain, test_size=test_size, n_splits=n_splits, upsample=upsample, scoring=scoring, grid_search=grid_search)\n\n\nclass HungaBungaClassifier(ClassifierMixin):\n def __init__(self, brain=False, test_size = 0.2, n_splits = 5, random_state=None, upsample=True, scoring=None, verbose=False, normalize_x = True, n_jobs =cpu_count() - 1, grid_search=True):\n self.model = None\n self.brain = brain\n self.test_size = test_size\n self.n_splits = n_splits\n self.random_state = random_state\n self.upsample = upsample\n self.scoring = None\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.normalize_x = normalize_x\n self.grid_search = grid_search\n super(HungaBungaClassifier, self).__init__()\n\n def fit(self, x, y):\n self.model = run_all_classifiers(x, y, normalize_x=self.normalize_x, test_size=self.test_size, n_splits=self.n_splits, upsample=self.upsample, scoring=self.scoring, verbose=self.verbose, brain=self.brain, n_jobs=self.n_jobs, grid_search=self.grid_search)[0]\n return self\n\n def predict(self, x):\n return self.model.predict(x)\n\n\nif __name__ == '__main__':\n iris = datasets.load_iris()\n X, y = iris.data, iris.target\n clf = HungaBungaClassifier()\n clf.fit(X, y)\n print(clf.predict(X).shape)\n\n"
]
| [
[
"sklearn.preprocessing.StandardScaler",
"sklearn.gaussian_process.kernels.DotProduct",
"sklearn.gaussian_process.kernels.RBF",
"sklearn.gaussian_process.kernels.WhiteKernel",
"sklearn.datasets.load_iris",
"sklearn.gaussian_process.kernels.ConstantKernel"
]
]
|
ailabteam/Daily-Working | [
"0a36b5b6e92941e2e101a151eda202cb57567f4a"
]
| [
"COLAB-GOOGLE-Practices/colab google/data/L1/l1.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 18 22:06:34 2019\n\n@author: DELL\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#numOfPoint = 30\n#noise = np.random.normal(0,1,numOfPoint).reshape(-1,1)\n#x = np.linspace(30, 100, numOfPoint).reshape(-1,1)\n#N = x.shape[0]\n#y = 15*x + 8 + 20*noise\n#plt.scatter(x, y)\n\ndata = pd.read_csv('data_linear.csv').values\nN = data.shape[0]\nx = data[:, 0].reshape(-1, 1)\ny = data[:, 1].reshape(-1, 1)\nplt.scatter(x, y)\nplt.xlabel('mét vuông')\nplt.ylabel('giá')\n\nx = np.hstack((np.ones((N, 1)), x))\n\nw = np.array([0.,1.]).reshape(-1,1)\n\nnumOfIteration = 100\ncost = np.zeros((numOfIteration,1))\nlearning_rate = 0.000001\nfor i in range(1, numOfIteration):\n r = np.dot(x, w) - y\n cost[i] = 0.5*np.sum(r*r)\n w[0] -= learning_rate*np.sum(r)\n # correct the shape dimension\n w[1] -= learning_rate*np.sum(np.multiply(r, x[:,1].reshape(-1,1)))\n print(cost[i])\npredict = np.dot(x, w)\nplt.plot((x[0][1], x[N-1][1]),(predict[0], predict[N-1]), 'r')\nplt.show()\n\nx1 = 50\ny1 = w[0] + w[1] * 50\nprint('Giá nhà cho 50m^2 là : ', y1)"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"pandas.read_csv"
]
]
|
zscr/PyGEM | [
"fa357b16660c4dbe1617c17995b6f0455c35c4d7"
]
| [
"class_mbdata.py"
]
| [
"\"\"\"class of mass balance data and functions associated with manipulating the dataset to be in the proper format\"\"\"\n\n# External libraries\nimport pandas as pd\nimport numpy as np\nimport calendar\nimport collections\nimport datetime\n# Local libraries\nimport pygem_input as input\nimport pygemfxns_modelsetup as modelsetup\n\n\nclass MBData():\n \"\"\"\n Mass balance data properties and functions used to automatically retrieve data for calibration.\n \n Attributes\n ----------\n name : str\n name of mass balance dataset.\n ds_fp : str\n file path \n \"\"\"\n def __init__(self, \n name='wgms_d',\n ):\n \"\"\"\n Add variable name and specific properties associated with each variable.\n \"\"\"\n \n # Source of climate data\n self.name = name\n # Set parameters for ERA-Interim and CMIP5 netcdf files\n if self.name == 'shean': \n self.ds_fp = input.shean_fp\n self.ds_fn = input.shean_fn\n self.rgi_glacno_cn = input.shean_rgi_glacno_cn\n self.mb_mwea_cn = input.shean_mb_cn\n self.mb_mwea_err_cn = input.shean_mb_err_cn\n self.t1_cn = input.shean_time1_cn\n self.t2_cn = input.shean_time2_cn\n self.area_cn = input.shean_area_cn\n \n elif self.name == 'berthier': \n self.ds_fp = input.berthier_fp\n self.ds_fn = input.berthier_fn\n self.rgi_glacno_cn = input.berthier_rgi_glacno_cn\n self.mb_mwea_cn = input.berthier_mb_cn\n self.mb_mwea_err_cn = input.berthier_mb_err_cn\n self.t1_cn = input.berthier_time1_cn\n self.t2_cn = input.berthier_time2_cn\n self.area_cn = input.berthier_area_cn\n \n elif self.name == 'braun': \n self.ds_fp = input.braun_fp\n self.ds_fn = input.braun_fn\n self.rgi_glacno_cn = input.braun_rgi_glacno_cn\n self.mb_mwea_cn = input.braun_mb_cn\n self.mb_mwea_err_cn = input.braun_mb_err_cn\n self.t1_cn = input.braun_time1_cn\n self.t2_cn = input.braun_time2_cn\n self.area_cn = input.braun_area_cn\n \n elif self.name == 'mcnabb':\n self.ds_fp = input.mcnabb_fp\n self.ds_fn = input.mcnabb_fn\n self.rgi_glacno_cn = input.mcnabb_rgiid_cn\n self.mb_mwea_cn = input.mcnabb_mb_cn\n self.mb_mwea_err_cn = input.mcnabb_mb_err_cn\n self.t1_cn = input.mcnabb_time1_cn\n self.t2_cn = input.mcnabb_time2_cn\n self.area_cn = input.mcnabb_area_cn\n \n elif self.name == 'larsen':\n self.ds_fp = input.larsen_fp\n self.ds_fn = input.larsen_fn\n self.rgi_glacno_cn = input.larsen_rgiid_cn\n self.mb_mwea_cn = input.larsen_mb_cn\n self.mb_mwea_err_cn = input.larsen_mb_err_cn\n self.t1_cn = input.larsen_time1_cn\n self.t2_cn = input.larsen_time2_cn\n self.area_cn = input.larsen_area_cn\n \n elif self.name == 'brun':\n self.data_fp = input.brun_fp\n \n elif self.name == 'mauer':\n self.ds_fp = input.mauer_fp\n self.ds_fn = input.mauer_fn\n self.rgi_glacno_cn = input.mauer_rgi_glacno_cn\n self.mb_mwea_cn = input.mauer_mb_cn\n self.mb_mwea_err_cn = input.mauer_mb_err_cn\n self.t1_cn = input.mauer_time1_cn\n self.t2_cn = input.mauer_time2_cn\n \n elif self.name == 'wgms_d':\n self.ds_fp = input.wgms_fp\n self.ds_fn = input.wgms_d_fn_preprocessed\n self.rgi_glacno_cn = input.wgms_rgi_glacno_cn\n self.thickness_chg_cn = input.wgms_d_thickness_chg_cn\n self.thickness_chg_err_cn = input.wgms_d_thickness_chg_err_cn\n self.volume_chg_cn = input.wgms_d_volume_chg_cn\n self.volume_chg_err_cn = input.wgms_d_volume_chg_err_cn\n self.z1_cn = input.wgms_d_z1_cn\n self.z2_cn = input.wgms_d_z2_cn\n self.obs_type_cn = input.wgms_obs_type_cn\n \n elif self.name == 'wgms_ee':\n self.ds_fp = input.wgms_fp\n self.ds_fn = input.wgms_ee_fn_preprocessed\n self.rgi_glacno_cn = input.wgms_rgi_glacno_cn\n self.mb_mwe_cn = input.wgms_ee_mb_cn\n self.mb_mwe_err_cn = input.wgms_ee_mb_err_cn\n self.t1_cn = input.wgms_ee_t1_cn\n self.period_cn = input.wgms_ee_period_cn\n self.z1_cn = input.wgms_ee_z1_cn\n self.z2_cn = input.wgms_ee_z2_cn\n self.obs_type_cn = input.wgms_obs_type_cn\n \n elif self.name == 'cogley':\n self.ds_fp = input.cogley_fp\n self.ds_fn = input.cogley_fn_preprocessed\n self.rgi_glacno_cn = input.cogley_rgi_glacno_cn\n self.mass_chg_cn = input.cogley_mass_chg_cn\n self.mass_chg_err_cn = input.cogley_mass_chg_err_cn\n self.z1_cn = input.cogley_z1_cn\n self.z2_cn = input.cogley_z2_cn\n self.obs_type_cn = input.cogley_obs_type_cn\n \n elif self.name == 'group':\n self.ds_fp = input.mb_group_fp\n self.ds_fn = input.mb_group_data_fn\n self.ds_dict_fn = input.mb_group_dict_fn\n self.rgi_regionO1_cn = 'rgi_regionO1'\n self.t1_cn = input.mb_group_t1_cn\n self.t2_cn = input.mb_group_t2_cn\n \n \n def retrieve_mb(self, main_glac_rgi, main_glac_hyps, dates_table):\n \"\"\"\n Retrieve the mass balance for various datasets to be used in the calibration.\n \n \n Parameters\n ----------\n main_glac_rgi : pandas dataframe\n dataframe containing relevant rgi glacier information\n main_glac_hyps : pandas dataframe\n dataframe containing glacier hypsometry\n dates_table : pandas dataframe\n dataframe containing dates of model run\n \n Returns\n -------\n ds_output : pandas dataframe\n dataframe of mass balance observations and other relevant information for calibration \n \"\"\" \n # Dictionary linking glacier number (glacno) to index for selecting elevation indices\n glacnodict = dict(zip(main_glac_rgi['rgino_str'], main_glac_rgi.index.values))\n # Column names of output\n ds_output_cols = ['RGIId', 'glacno', 'group_name', 'obs_type', 'mb_mwe', 'mb_mwe_err', 'sla_m', 'z1_idx', \n 'z2_idx', 'z1', 'z2', 't1_idx', 't2_idx', 't1', 't2', 'area_km2', 'WGMS_ID']\n # Avoid group data as processing is slightly different\n if self.name is not 'group':\n # Load all data\n ds_all = pd.read_csv(self.ds_fp + self.ds_fn) \n if str(ds_all.loc[0,self.rgi_glacno_cn]).startswith('RGI'):\n ds_all['glacno'] = [str(x).split('-')[1] for x in ds_all[self.rgi_glacno_cn].values]\n else:\n ds_all['glacno'] = [str(int(x)).zfill(2) + '.' + str(int(np.round(x%1*10**5))).zfill(5) \n for x in ds_all[self.rgi_glacno_cn]]\n ds = ds_all.iloc[np.where(ds_all['glacno'].isin(list(main_glac_rgi.rgino_str.values)))[0],:].copy()\n ds.reset_index(drop=True, inplace=True)\n # Elevation indices\n elev_bins = main_glac_hyps.columns.values.astype(int)\n elev_bin_interval = elev_bins[1] - elev_bins[0]\n \n # DATASET SPECIFIC CALCULATIONS\n # ===== SHEAN GEODETIC DATA =====\n if self.name in ['shean', 'berthier', 'braun']:\n ds['z1_idx'] = (\n (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))\n ds['z2_idx'] = (\n (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))\n # Lower and upper bin elevations [masl]\n ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n # Area [km2]\n ds['area_km2'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'area_km2'] = (\n main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())\n # Time indices\n ds['t1'] = ds[self.t1_cn].astype(np.float64)\n ds['t2'] = ds[self.t2_cn].astype(np.float64)\n ds['t1_year'] = ds['t1'].astype(int)\n ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)\n ds.loc[ds['t1_month'] == 13, 't1_year'] = ds.loc[ds['t1_month'] == 13, 't1_year'] + 1\n ds.loc[ds['t1_month'] == 13, 't1_month'] = 1\n # add 1 to account for the fact that January starts with value of 1\n ds['t2_year'] = ds['t2'].astype(int)\n ds['t2_month'] = round(ds['t2'] % ds['t2_year'] * 12)\n ds.loc[ds['t2_month'] == 0, 't2_month'] = 1\n # do not need to add one for t2 because we want the last full time step\n # Remove data with dates outside of calibration period\n year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12\n year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] + \n (dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)\n ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]\n ds = ds[ds['t2_year'] + ds['t2_month'] / 12 <= year_decimal_max]\n ds.reset_index(drop=True, inplace=True) \n \n # Determine time indices (exclude spinup years, since massbal fxn discards spinup years)\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n if x == 10539:\n print(x, ds.loc[x,'RGIId'], ds.loc[x,'t1'], ds.loc[x,'t1_month'], ds.loc[x,'t2_month'])\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n ds['t1_idx'] = ds['t1_idx'].astype(int)\n # Specific mass balance [mwea]\n ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])\n ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1']) \n# # Total mass change [Gt]\n# ds['mb_gt'] = ds[self.mb_vol_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000\n# ds['mb_gt_err'] = ds[self.mb_vol_err_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000\n if 'obs_type' not in list(ds.columns.values):\n # Observation type\n ds['obs_type'] = 'mb_geo'\n # Add columns with nan for things not in list\n ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]\n for colname in ds_addcols:\n ds[colname] = np.nan\n \n# # ===== BERTHIER =====\n# if self.name == 'berthier':\n# ds['z1_idx'] = (\n# (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))\n# ds['z2_idx'] = (\n# (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))\n# # Lower and upper bin elevations [masl]\n# ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n# ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n# # Area [km2]\n# ds['area_km2'] = np.nan\n# for x in range(ds.shape[0]):\n# ds.loc[x,'area_km2'] = (\n# main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n# ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())\n# # Time indices\n# ds['t1'] = ds[self.t1_cn]\n# ds['t2'] = ds[self.t2_cn]\n# print(ds)\n# ds['t1_year'] = ds['t1'].astype(int)\n# ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)\n# # add 1 to account for the fact that January starts with value of 1\n# ds['t2_year'] = ds['t2'].astype(int)\n# ds['t2_month'] = round(ds['t2'] % ds['t2_year'] * 12)\n# # do not need to add one for t2 because we want the last full time step\n# # Remove data with dates outside of calibration period\n# year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12\n# year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] + \n# (dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)\n# ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]\n# ds = ds[ds['t2_year'] + ds['t2_month'] / 12 <= year_decimal_max]\n# ds.reset_index(drop=True, inplace=True) \n# # Determine time indices (exclude spinup years, since massbal fxn discards spinup years)\n# ds['t1_idx'] = np.nan\n# ds['t2_idx'] = np.nan\n# for x in range(ds.shape[0]):\n# ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n# (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n# ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n# (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n# ds['t1_idx'] = ds['t1_idx'].astype(int)\n# # Specific mass balance [mwea]\n# print(ds[self.mb_mwea_cn])\n# ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])\n# ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1']) \n# # Observation type\n# ds['obs_type'] = 'mb_geo'\n# # Add columns with nan for things not in list\n# ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]\n# for colname in ds_addcols:\n# ds[colname] = np.nan\n \n # ===== BRUN GEODETIC DATA =====\n elif self.name == 'brun':\n print('code brun')\n \n # ===== MAUER GEODETIC DATA =====\n elif self.name == 'mauer':\n ds['z1_idx'] = (\n (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))\n ds['z2_idx'] = (\n (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))\n # Lower and upper bin elevations [masl]\n ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n # Area [km2]\n ds['area_km2'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'area_km2'] = (\n main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())\n # Time indices\n ds['t1'] = ds[self.t1_cn]\n ds['t2'] = ds[self.t2_cn]\n ds['t1_year'] = ds['t1'].astype(int)\n ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)\n # add 1 to account for the fact that January starts with value of 1\n ds.loc[ds['t1_month'] > 12, 't1_month'] = 12\n ds['t2_year'] = ds['t2'].astype(int)\n ds['t2_month'] = 2\n # Remove data with dates outside of calibration period\n year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12\n year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] + \n (dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)\n ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]\n ds = ds[ds['t2_year'] + ds['t2_month'] / 12 <= year_decimal_max]\n ds.reset_index(drop=True, inplace=True) \n # Determine time indices (exclude spinup years, since massbal fxn discards spinup years)\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n ds['t1_idx'] = ds['t1_idx'].astype(int)\n # Specific mass balance [mwea]\n ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])\n ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1']) \n # Observation type\n ds['obs_type'] = 'mb_geo'\n \n # ===== WGMS GEODETIC DATA =====\n elif self.name == 'wgms_d':\n ds['z1_idx'] = np.nan\n ds['z2_idx'] = np.nan\n ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (\n (main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)\n .argmax(axis=1))\n ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (\n (main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))\n .argmax(axis=1))\n ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (\n ((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) - \n ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))\n ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (\n ((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) - \n ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)\n ds['z1_idx'] = ds['z1_idx'].values.astype(int)\n ds['z2_idx'] = ds['z2_idx'].values.astype(int)\n # Lower and upper bin elevations [masl]\n ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n # Area [km2]\n # use WGMS area when provided; otherwise use area from RGI\n ds['area_km2_rgi'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'area_km2_rgi'] = (\n main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum()) \n ds['area_km2'] = np.nan\n ds.loc[ds.AREA_SURVEY_YEAR.isnull(), 'area_km2'] = ds.loc[ds.AREA_SURVEY_YEAR.isnull(), 'area_km2_rgi']\n ds.loc[ds.AREA_SURVEY_YEAR.notnull(), 'area_km2'] = ds.loc[ds.AREA_SURVEY_YEAR.notnull(), \n 'AREA_SURVEY_YEAR']\n # Time indices\n # remove data that does not have reference date or survey data\n ds = ds[np.isnan(ds['REFERENCE_DATE']) == False]\n ds = ds[np.isnan(ds['SURVEY_DATE']) == False]\n ds.reset_index(drop=True, inplace=True)\n # Extract date information\n ds['t1_year'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t1_month'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t1_day'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)\n ds['t2_year'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t2_month'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t2_day'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)\n # if month/day unknown for start or end period, then replace with water year\n # Add latitude \n latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))\n ds['CenLat'] = ds['RGIId'].map(latdict)\n ds['lat_category'] = np.nan\n ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'\n ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'\n ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'\n ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'\n ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)\n ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])\n ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])\n ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])\n ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])\n ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']\n ds.loc[ds['t1_day'] == 99, 't1_day'] = 1\n ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1\n for x in range(ds.shape[0]):\n if ds.loc[x, 't2_day'] == 99:\n try:\n ds.loc[x, 't2_day'] = (\n dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']\n .values[0])\n except:\n ds.loc[x, 't2_day'] = 28 \n # Replace poor values of months\n ds['t1_month'] = ds['t1_month'].map(lambda x: x if x <=12 else x%12)\n ds['t2_month'] = ds['t2_month'].map(lambda x: x if x <=12 else x%12)\n # Replace poor values of days\n ds['t1_daysinmonth'] = (\n [calendar.monthrange(ds.loc[x,'t1_year'], ds.loc[x,'t1_month'])[1] for x in range(ds.shape[0])])\n ds['t2_daysinmonth'] = (\n [calendar.monthrange(ds.loc[x,'t2_year'], ds.loc[x,'t2_month'])[1] for x in range(ds.shape[0])])\n ds['t1_day'] = (ds.apply(lambda x: x['t1_day'] if x['t1_day'] <= x['t1_daysinmonth'] \n else x['t1_daysinmonth'], axis=1))\n ds['t2_day'] = (ds.apply(lambda x: x['t2_day'] if x['t2_day'] <= x['t2_daysinmonth'] \n else x['t2_daysinmonth'], axis=1))\n # Calculate decimal year and drop measurements outside of calibration period\n ds['t1_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))\n ds['t2_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))\n ds['t1_doy'] = ds.t1_datetime.dt.strftime(\"%j\").astype(float)\n ds['t2_doy'] = ds.t2_datetime.dt.strftime(\"%j\").astype(float)\n ds['t1_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t2_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear\n ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear\n end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']\n end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)\n ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]\n ds = ds[ds['t2_datetime'] < end_datetime]\n ds.reset_index(drop=True, inplace=True)\n # Time indices\n # exclude spinup years, since massbal fxn discards spinup years\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n # Specific mass balance [mwe]\n # if thickness change is available, then compute the specific mass balance with the thickness change\n # otherwise, use the volume change and area to estimate the specific mass balance\n # using thickness change\n ds['mb_mwe'] = ds[self.thickness_chg_cn] / 1000 * input.density_ice / input.density_water\n ds['mb_mwe_err'] = ds[self.thickness_chg_err_cn] / 1000 * input.density_ice / input.density_water\n # using volume change (note: units volume change [1000 m3] and area [km2])\n ds.loc[ds.mb_mwe.isnull(), 'mb_mwe'] = (\n ds.loc[ds.mb_mwe.isnull(), self.volume_chg_cn] * 1000 / ds.loc[ds.mb_mwe.isnull(), 'area_km2'] * \n (1/1000)**2 * input.density_ice / input.density_water)\n ds.loc[ds.mb_mwe.isnull(), 'mb_mwe'] = (\n ds.loc[ds.mb_mwe.isnull(), self.volume_chg_err_cn] * 1000 / ds.loc[ds.mb_mwe.isnull(), 'area_km2'] * \n (1/1000)**2 * input.density_ice / input.density_water)\n # Observation type\n ds['obs_type'] = 'mb_geo'\n \n # ===== WGMS GLACIOLOGICAL DATA =====\n elif self.name == 'wgms_ee':\n ds['z1_idx'] = np.nan\n ds['z2_idx'] = np.nan\n ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (\n (main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)\n .argmax(axis=1))\n ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (\n (main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))\n .argmax(axis=1))\n ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (\n ((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) - \n ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))\n ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (\n ((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) - \n ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)\n ds['z1_idx'] = ds['z1_idx'].values.astype(int)\n ds['z2_idx'] = ds['z2_idx'].values.astype(int)\n # Lower and upper bin elevations [masl]\n ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n # Area [km2]\n ds['area_km2'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'area_km2'] = (\n main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())\n ds = ds[ds['area_km2'] > 0]\n ds.reset_index(drop=True, inplace=True)\n # Time indices\n # winter and summer balances typically have the same data for 'BEGIN_PERIOD' and 'END_PERIOD' as the annual\n # measurements, so need to set these dates manually\n # Remove glaciers without begin or end period\n ds = ds.drop(np.where(np.isnan(ds['BEGIN_PERIOD'].values))[0].tolist(), axis=0)\n ds = ds.drop(np.where(np.isnan(ds['END_PERIOD'].values))[0].tolist(), axis=0)\n ds.reset_index(drop=True, inplace=True)\n ds['t1_year'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t1_month'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t1_day'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[6:].astype(int)\n ds['t2_year'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t2_month'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t2_day'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[6:].astype(int) \n # if annual measurement and month/day unknown for start or end period, then replace with water year\n # Add latitude \n latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))\n ds['CenLat'] = ds['RGIId'].map(latdict)\n ds['lat_category'] = np.nan\n ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'\n ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'\n ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'\n ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'\n ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)\n ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])\n ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])\n ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])\n ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])\n # annual start\n ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']\n ds.loc[ds['t1_day'] == 99, 't1_day'] = 1\n ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1\n for x in range(ds.shape[0]):\n if ds.loc[x, 't2_day'] == 99:\n try:\n ds.loc[x, 't2_day'] = (\n dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']\n .values[0])\n except:\n ds.loc[x, 't2_day'] = 28\n # If period is summer/winter, adjust dates accordingly\n for x in range(ds.shape[0]):\n if (((ds.loc[x, 'lat_category'] == 'north') or (ds.loc[x, 'lat_category'] == 'northern')) and \n (ds.loc[x, 'period'] == 'summer')):\n ds.loc[x, 't1_year'] = ds.loc[x, 't1_year'] + 1\n ds.loc[x, 't1_month'] = ds.loc[x, 'summer_begin']\n ds.loc[x, 't2_month'] = ds.loc[x, 'summer_end']\n elif (((ds.loc[x, 'lat_category'] == 'south') or (ds.loc[x, 'lat_category'] == 'southernmost')) and \n (ds.loc[x, 'period'] == 'summer')):\n ds.loc[x, 't1_month'] = ds.loc[x, 'summer_begin']\n ds.loc[x, 't2_month'] = ds.loc[x, 'summer_end']\n elif (((ds.loc[x, 'lat_category'] == 'north') or (ds.loc[x, 'lat_category'] == 'northern')) and \n (ds.loc[x, 'period'] == 'winter')):\n ds.loc[x, 't1_month'] = ds.loc[x, 'winter_begin']\n ds.loc[x, 't2_month'] = ds.loc[x, 'winter_end']\n elif (((ds.loc[x, 'lat_category'] == 'south') or (ds.loc[x, 'lat_category'] == 'southernmost')) and \n (ds.loc[x, 'period'] == 'summer')):\n ds.loc[x, 't1_year'] = ds.loc[x, 't1_year'] + 1\n ds.loc[x, 't1_month'] = ds.loc[x, 'winter_begin']\n ds.loc[x, 't2_month'] = ds.loc[x, 'winter_end']\n ds.loc[x, 't1_day'] = 1\n ds.loc[x, 't2_day'] = calendar.monthrange(ds.loc[x, 't2_year'], ds.loc[x, 't2_month'])[1]\n # Replace poor values of months\n ds['t1_month'] = ds['t1_month'].map(lambda x: x if x <=12 else x%12)\n ds['t2_month'] = ds['t2_month'].map(lambda x: x if x <=12 else x%12)\n # Calculate decimal year and drop measurements outside of calibration period\n ds['t1_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))\n ds['t2_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))\n ds['t1_doy'] = ds.t1_datetime.dt.strftime(\"%j\").astype(float)\n ds['t2_doy'] = ds.t2_datetime.dt.strftime(\"%j\").astype(float)\n ds['t1_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t2_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear\n ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear\n end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']\n end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)\n ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]\n ds = ds[ds['t2_datetime'] < end_datetime]\n ds.reset_index(drop=True, inplace=True)\n # Annual, summer, and winter time indices\n # exclude spinup years, since massbal fxn discards spinup years\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n # Specific mass balance [mwe]\n ds['mb_mwe'] = ds[self.mb_mwe_cn] / 1000\n ds['mb_mwe_err'] = ds[self.mb_mwe_err_cn] / 1000\n# # Total mass change [Gt]\n# ds['mb_gt'] = ds[self.mb_mwe_cn] / 1000 * ds['area_km2'] * 1000**2 * input.density_water / 1000 / 10**9\n# ds['mb_gt_err'] = (ds[self.mb_mwe_err_cn] / 1000 * ds['area_km2'] * 1000**2 * input.density_water / 1000 \n# / 10**9)\n # Observation type\n ds['obs_type'] = 'mb_glac'\n \n # ===== WGMS GLACIOLOGICAL DATA =====\n elif self.name == 'cogley':\n ds['z1_idx'] = np.nan\n ds['z2_idx'] = np.nan\n ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (\n (main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)\n .argmax(axis=1))\n ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (\n (main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))\n .argmax(axis=1))\n ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (\n ((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) - \n ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))\n ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (\n ((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) - \n ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)\n ds['z1_idx'] = ds['z1_idx'].values.astype(int)\n ds['z2_idx'] = ds['z2_idx'].values.astype(int)\n # Lower and upper bin elevations [masl]\n ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n # Area [km2]\n # use WGMS area when provided; otherwise use area from RGI\n ds['area_km2_rgi'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'area_km2_rgi'] = (\n main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())\n # Time indices\n ds['t1_year'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t1_month'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t1_day'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)\n ds['t2_year'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t2_month'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t2_day'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)\n # if month/day unknown for start or end period, then replace with water year\n # Add latitude \n latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))\n ds['CenLat'] = ds['RGIId'].map(latdict)\n ds['lat_category'] = np.nan\n ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'\n ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'\n ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'\n ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'\n ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)\n ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])\n ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])\n ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])\n ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])\n ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']\n ds.loc[ds['t1_day'] == 99, 't1_day'] = 1\n ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1\n for x in range(ds.shape[0]):\n if ds.loc[x, 't2_day'] == 99:\n try:\n ds.loc[x, 't2_day'] = (\n dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']\n .values[0])\n except:\n ds.loc[x, 't2_day'] = 28 \n # Calculate decimal year and drop measurements outside of calibration period\n ds['t1_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))\n ds['t2_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))\n ds['t1_doy'] = ds.t1_datetime.dt.strftime(\"%j\").astype(float)\n ds['t2_doy'] = ds.t2_datetime.dt.strftime(\"%j\").astype(float)\n ds['t1_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t2_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear\n ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear\n end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']\n end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)\n ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]\n ds = ds[ds['t2_datetime'] < end_datetime]\n ds.reset_index(drop=True, inplace=True)\n # Time indices\n # exclude spinup years, since massbal fxn discards spinup years\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n # Specific mass balance [mwe]\n ds['mb_mwe'] = ds[self.mass_chg_cn] / input.density_water * (ds['t2'] - ds['t1'])\n ds['mb_mwe_err'] = ds[self.mass_chg_err_cn] / input.density_water * (ds['t2'] - ds['t1'])\n # Observation type\n ds['obs_type'] = 'mb_geo'\n \n # ===== LARSEN OR MCNABB GEODETIC MASS BALANCE =====\n elif self.name == 'mcnabb' or self.name == 'larsen': \n ds['z1_idx'] = (\n (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))\n ds['z2_idx'] = (\n (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))\n # Lower and upper bin elevations [masl]\n ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2\n ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2\n # Area [km2]\n ds['area_km2'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'area_km2'] = (\n main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']], \n ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())\n # Time\n ds['t1_year'] = [int(str(x)[0:4]) for x in ds[self.t1_cn].values]\n ds['t1_month'] = [int(str(x)[4:6]) for x in ds[self.t1_cn].values]\n ds['t1_day'] = [int(str(x)[6:]) for x in ds[self.t1_cn].values]\n ds['t2_year'] = [int(str(x)[0:4]) for x in ds[self.t2_cn].values]\n ds['t2_month'] = [int(str(x)[4:6]) for x in ds[self.t2_cn].values]\n ds['t2_day'] = [int(str(x)[6:]) for x in ds[self.t2_cn].values] \n ds['t1_daysinmonth'] = ds.apply(lambda row: modelsetup.daysinmonth(row['t1_year'], row['t1_month']), axis=1)\n ds['t2_daysinmonth'] = ds.apply(lambda row: modelsetup.daysinmonth(row['t2_year'], row['t2_month']), axis=1)\n ds['t1_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))\n ds['t2_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))\n ds['t1'] = ds['t1_year'] + (ds['t1_month'] + ds['t1_day'] / ds['t1_daysinmonth']) / 12\n ds['t2'] = ds['t2_year'] + (ds['t2_month'] + ds['t2_day'] / ds['t2_daysinmonth']) / 12\n # Remove data with dates outside of calibration period\n year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12\n year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] + \n (dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)\n ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]\n ds = ds[ds['t2_year'] + ds['t2_month'] / 12 < year_decimal_max]\n ds.reset_index(drop=True, inplace=True) \n # Determine time indices (exclude spinup years, since massbal fxn discards spinup years)\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n ds['t1_idx'] = ds['t1_idx'].astype(int)\n # Specific mass balance [mwea]\n ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])\n ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1']) \n # Total mass change [Gt]\n# ds['mb_gt'] = ds[self.mb_vol_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000\n# ds['mb_gt_err'] = ds[self.mb_vol_err_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000\n # Observation type\n ds['obs_type'] = 'mb_geo'\n \n # ====== GROUP DATA ======\n elif self.name == 'group':\n # Load all data\n ds_all = pd.read_csv(self.ds_fp + self.ds_fn, encoding='latin1')\n # Dictionary linking group_names with the RGIIds\n ds_dict_raw = pd.read_csv(self.ds_fp + self.ds_dict_fn)\n ds_dict = dict(zip(ds_dict_raw['RGIId'], ds_dict_raw['group_name']))\n # For each unique group name identify all glaciers associated with the group and test if all those glaciers\n # are included in the model run via main_glac_rgi\n group_names_unique = list(set(ds_dict.values()))\n ds_dict_keyslist = [[] for x in group_names_unique]\n for n, group in enumerate(group_names_unique):\n ds_dict_keyslist[n] = [group, [k for k, v in ds_dict.items() if v == group]]\n ds_all['glaciers_present'] = set(ds_dict_keyslist[n][1]).issubset(main_glac_rgi.RGIId.values.tolist())\n ds_all.loc[n, 'first_RGIId'] = ds_dict_keyslist[n][1][0]\n # Remove groups where all glaciers are not included\n ds = ds_all[ds_all.glaciers_present == True].copy()\n ds.reset_index(drop=True, inplace=True)\n # Time indices\n ds['t1_year'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t1_month'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t1_day'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[6:].astype(int)\n ds['t2_year'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[:4].astype(int)\n ds['t2_month'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[4:6].astype(int)\n ds['t2_day'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[6:].astype(int)\n # if month/day unknown for start or end period, then replace with water year\n # Add latitude \n latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))\n ds['CenLat'] = ds['first_RGIId'].map(latdict)\n ds['lat_category'] = np.nan\n ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'\n ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'\n ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'\n ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'\n ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)\n ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])\n ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])\n ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])\n ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])\n ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']\n ds.loc[ds['t1_day'] == 99, 't1_day'] = 1\n ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1\n for x in range(ds.shape[0]):\n if ds.loc[x, 't2_day'] == 99:\n try:\n ds.loc[x, 't2_day'] = (\n dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']\n .values[0])\n except:\n ds.loc[x, 't2_day'] = 28 \n # Calculate decimal year and drop measurements outside of calibration period\n ds['t1_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))\n ds['t2_datetime'] = pd.to_datetime(\n pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))\n ds['t1_doy'] = ds.t1_datetime.dt.strftime(\"%j\").astype(float)\n ds['t2_doy'] = ds.t2_datetime.dt.strftime(\"%j\").astype(float)\n ds['t1_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t2_daysinyear'] = (\n (pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) - \n pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)\n ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear\n ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear\n end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']\n end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)\n ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]\n ds = ds[ds['t2_datetime'] < end_datetime]\n ds.reset_index(drop=True, inplace=True)\n # Time indices\n # exclude spinup years, since massbal fxn discards spinup years\n ds['t1_idx'] = np.nan\n ds['t2_idx'] = np.nan\n for x in range(ds.shape[0]):\n ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) & \n (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])\n ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) & \n (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])\n # Mass balance [mwe]\n ds['mb_mwe'] = np.nan\n ds['mb_mwe_err'] = np.nan\n ds.loc[ds['dhdt_ma'].notnull(), 'mb_mwe'] = (\n ds.loc[ds['dhdt_ma'].notnull(), 'dhdt_ma'] * input.density_ice / input.density_water * \n (ds['t2'] - ds['t1']))\n ds.loc[ds['dhdt_ma'].notnull(), 'mb_mwe_err'] = (\n ds.loc[ds['dhdt_ma'].notnull(), 'dhdt_unc_ma'] * input.density_ice / input.density_water * \n (ds['t2'] - ds['t1']))\n \n \n # Add columns with nan for things not in list\n ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]\n for colname in ds_addcols:\n ds[colname] = np.nan\n # Select output\n ds_output = ds[ds_output_cols].sort_values(['glacno', 't1_idx'])\n ds_output.reset_index(drop=True, inplace=True)\n\n return ds_output\n \n\ndef select_best_mb(cal_data):\n \"\"\"\n Retrieve 'best' mass balance (observed > extrapolated) and longest time period\n \n Returns\n -------\n cal_data_best : pandas dataframe\n dataframe of 'best' mass balance observations and other relevant information for calibration \n \"\"\" \n cal_data['dt'] = cal_data['t2'] - cal_data['t1']\n rgiids = list(cal_data.RGIId.values)\n rgiids_count = collections.Counter(rgiids)\n rgiids_multiple = []\n rgiids_single_idx = []\n cal_data_rgiids_all = list(cal_data.RGIId.values)\n for x in rgiids_count:\n if rgiids_count[x] > 1:\n rgiids_multiple.append(x)\n else:\n rgiids_single_idx.append(cal_data_rgiids_all.index(x))\n rgiids_multiple = sorted(rgiids_multiple)\n rgiids_single_idx = sorted(rgiids_single_idx) \n \n # Select all data with single value \n cal_data_best = cal_data.loc[rgiids_single_idx,:]\n \n # Append 'best' value for those with multiple observations\n for rgiid in rgiids_multiple:\n cal_data_multiple = cal_data[cal_data['RGIId'] == rgiid]\n # Select observations over extrapolated values\n if 'mb_geo' in list(cal_data_multiple.obs_type.values):\n cal_data_multiple = cal_data_multiple[cal_data_multiple.obs_type == 'mb_geo']\n # Select longest time series\n cal_data_append = cal_data_multiple[cal_data_multiple.dt == cal_data_multiple.dt.max()] \n \n cal_data_best = pd.concat([cal_data_best, cal_data_append], axis=0)\n \n cal_data_best = cal_data_best.sort_values(by=['RGIId'])\n cal_data_best.reset_index(inplace=True, drop=True)\n \n return cal_data_best\n \n\n#%% Testing\nif __name__ == '__main__':\n # Glacier selection\n rgi_regionsO1 = [1]\n rgi_glac_number = 'all'\n glac_no = input.glac_no\n startyear = 1950\n endyear = 2018\n \n# # Select glaciers\n# for rgi_regionsO1 in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]:\n# main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=[rgi_regionsO1], rgi_regionsO2 = 'all', \n# rgi_glac_number='all')\n# marine = main_glac_rgi[main_glac_rgi['TermType'] == 1]\n# lake = main_glac_rgi[main_glac_rgi['TermType'] == 2]\n# print('Region ' + str(rgi_regionsO1) + ':')\n# print(' marine:', np.round(marine.Area.sum() / main_glac_rgi.Area.sum() * 100,0))\n# print(' lake:', np.round(lake.Area.sum() / main_glac_rgi.Area.sum() * 100,0))\n main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=rgi_regionsO1, rgi_regionsO2 = 'all', \n rgi_glac_number=rgi_glac_number, glac_no=input.glac_no)\n # Glacier hypsometry [km**2], total area\n main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, input.hyps_filedict, \n input.hyps_colsdrop)\n # Determine dates_table_idx that coincides with data\n dates_table = modelsetup.datesmodelrun(startyear, endyear, spinupyears=0, option_wateryear=3)\n \n elev_bins = main_glac_hyps.columns.values.astype(int)\n elev_bin_interval = elev_bins[1] - elev_bins[0]\n \n \n #%%\n# cal_datasets = ['shean'] \n# cal_datasets = ['braun', 'mcnabb', 'larsen', 'berthier']\n# cal_datasets = ['braun', 'larsen', 'mcnabb']\n cal_datasets = ['braun']\n# cal_datasets = ['shean', 'mauer', 'wgms_d', 'wgms_ee', 'cogley', 'mcnabb', 'larsen']\n# cal_datasets = ['group']\n \n cal_data = pd.DataFrame()\n for dataset in cal_datasets:\n cal_subset = MBData(name=dataset)\n cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table)\n cal_data = cal_data.append(cal_subset_data, ignore_index=True)\n \n # Count unique glaciers and fraction of total area\n glacno_unique = list(cal_subset_data.glacno.unique())\n main_glac_rgi_cal = modelsetup.selectglaciersrgitable(glac_no = glacno_unique)\n print(dataset, '- glacier area covered: ', \n np.round(main_glac_rgi_cal.Area.sum() / main_glac_rgi.Area.sum() * 100,1),'%')\n \n cal_data = cal_data.sort_values(['glacno', 't1_idx'])\n cal_data.reset_index(drop=True, inplace=True)\n \n # Count unique glaciers and fraction of total area\n if len(cal_datasets) > 1:\n glacno_unique = list(cal_data.glacno.unique())\n main_glac_rgi_cal = modelsetup.selectglaciersrgitable(glac_no = glacno_unique)\n print('All datasets glacier area covered: ', \n np.round(main_glac_rgi_cal.Area.sum() / main_glac_rgi.Area.sum() * 100,1),'%')\n \n# # Export 'best' dataset\n# cal_data_best = select_best_mb(cal_data)\n# cal_data_best = cal_data_best.drop(['group_name', 'sla_m', 'WGMS_ID'], axis=1)\n# cal_data_best['mb_mwea'] = cal_data_best.mb_mwe / cal_data_best.dt\n# cal_data_best['mb_mwea_sigma'] = cal_data_best.mb_mwe_err / cal_data_best.dt\n# cal_data_best.to_csv(input.braun_fp + 'braun_AK_all_20190924_wlarsen_mcnabb_best.csv', index=False)\n \n\n#%% PRE-PROCESS MCNABB DATA\n# # Remove glaciers that:\n# # (1) poor percent coverage\n# # (2) uncertainty is too hig\n# \n# density_ice_brun = 850\n# \n# mcnabb_fn = 'McNabb_data_all_raw.csv'\n# output_fn = 'McNabb_data_all_preprocessed.csv'\n# \n# # Load data\n# ds_raw = pd.read_csv(input.mcnabb_fp + mcnabb_fn)\n# ds_raw['glacno_str'] = [x.split('-')[1] for x in ds_raw.RGIId.values]\n# ds_raw['mb_mwea'] = ds_raw['smb'] * density_ice_brun / input.density_water\n# ds_raw['mb_mwea_sigma'] = ds_raw['e_dh'] * density_ice_brun / input.density_water\n# nraw = ds_raw.shape[0]\n# \n# # remove data with poor coverage\n# ds = ds_raw[ds_raw['pct_data'] > 0.75].copy()\n# ds.reset_index(drop=True, inplace=True)\n# nraw_goodcoverage = ds.shape[0]\n# print('Glaciers removed (poor coverage):', nraw - nraw_goodcoverage, 'points')\n# \n# # remove glaciers with too high uncertainty (> 1.96 stdev)\n# uncertainty_median = ds.e_dh.median()\n# ds['e_mad'] = np.absolute(ds['e_dh'] - uncertainty_median)\n# uncertainty_mad = np.median(ds['e_mad'])\n# print('uncertainty median and mad [m/yr]:', np.round(uncertainty_median,2), np.round(uncertainty_mad,2))\n# ds = ds[ds['e_dh'] < uncertainty_median + 3*uncertainty_mad].copy()\n# ds = ds.sort_values('RGIId')\n# ds.reset_index(drop=True, inplace=True)\n# print('Glaciers removed (too high uncertainty):', nraw_goodcoverage - ds.shape[0], 'points')\n# \n# # Select glaciers\n# glac_no = sorted(set(ds['glacno_str'].values))\n# main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glac_no)\n# \n# # Count unique glaciers and fraction of total area\n# print('Glacier area covered: ', np.round(main_glac_rgi['Area'].sum(),1),'km2')\n# \n## # All values\n## rgiid_values = list(ds.RGIId.values)\n## rgiid_idx = []\n## for rgiid in rgiid_values:\n## rgiid_idx.append(np.where(main_glac_rgi.RGIId.values == rgiid)[0][0])\n## ds['CenLat'] = main_glac_rgi.loc[rgiid_idx, 'CenLat'].values\n## ds['CenLon'] = main_glac_rgi.loc[rgiid_idx, 'CenLon'].values\n#\n#\n# # Only longest value\n# ds_output = pd.DataFrame(np.zeros((len(glac_no), ds.shape[1])), columns=ds.columns)\n# for nglac, glacno in enumerate(glac_no):\n# ds_subset = ds.loc[np.where(ds.glacno_str.values == glacno)[0],:]\n# ds_subset.reset_index(inplace=True)\n# ds_output.loc[nglac,:] = (\n# ds_subset.loc[np.where(ds_subset['pct_data'].values == ds_subset['pct_data'].max())[0][0],:])\n# \n# # Minimum and maximum mass balances\n# print('Max MB:', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.max())[0][0],'smb'],2), \n# '+/-', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.max())[0][0],'e_dh'],2))\n# print('Min MB:', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.min())[0][0],'smb'],2), \n# '+/-', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.min())[0][0],'e_dh'],2))\n#\n# # Adjust date to YYYYMMDD format\n# print('\\nCHECK ALL YEARS AFTER IN 2000s\\n')\n# ds_output['y0'] = ['20' + str(x.split('/')[2]).zfill(2) for x in ds_output['date0'].values]\n# ds_output['m0'] = [str(x.split('/')[0]).zfill(2) for x in ds_output['date0'].values]\n# ds_output['d0'] = [str(x.split('/')[1]).zfill(2) for x in ds_output['date0'].values]\n# ds_output['y1'] = ['20' + str(x.split('/')[2]).zfill(2) for x in ds_output['date1'].values]\n# ds_output['m1'] = [str(x.split('/')[0]).zfill(2) for x in ds_output['date1'].values]\n# ds_output['d1'] = [str(x.split('/')[1]).zfill(2) for x in ds_output['date1'].values]\n# ds_output['date0'] = ds_output['y0'] + ds_output['m0'] + ds_output['d0']\n# ds_output['date1'] = ds_output['y1'] + ds_output['m1'] + ds_output['d1']\n# ds_output.drop(['y0', 'm0', 'd0', 'y1', 'm1', 'd1'], axis=1, inplace=True)\n# \n# ds_output.to_csv(input.mcnabb_fp + output_fn)\n \n\n#%%\n# # PRE-PROCESS MAUER DATA\n# mauer_fn = 'Mauer_geoMB_HMA_1970s_2000.csv'\n# min_pctCov = 80\n# \n# ds = pd.read_csv(input.mauer_fp + mauer_fn)\n# ds.dropna(axis=0, how='any', inplace=True)\n# ds.sort_values('RGIId')\n# ds.reset_index(drop=True, inplace=True)\n# demyears = ds.demYears.tolist()\n# demyears = [x.split(';') for x in demyears]\n# t1_raw = []\n# t2 = []\n# for x in demyears:\n# if '2000' in x:\n# x.remove('2000')\n# t2.append(2000)\n# t1_raw.append([np.float(y) for y in x])\n# t1 = np.array([np.array(x).mean() for x in t1_raw])\n# ds['t1'] = t1\n# ds['t2'] = t2 \n# # Minimum percent coverage\n# ds2 = ds[ds.pctCov > min_pctCov].copy()\n# ds2['RegO1'] = ds2.RGIId.astype(int)\n# # Glacier number and index for comparison\n# ds2['glacno'] = ((ds2['RGIId'] % 1) * 10**5).round(0).astype(int)\n# ds_list = ds2[['RegO1', 'glacno']]\n# ds2['RGIId'] = ds2['RegO1'] + ds2['glacno'] / 10**5\n# ds2.reset_index(drop=True, inplace=True)\n# ds2.drop(['RegO1', 'glacno'], axis=1, inplace=True)\n# ds2.to_csv(input.mauer_fp + input.mauer_fn.split('.csv')[0] + '_min' + str(min_pctCov) + 'pctCov.csv', index=False)\n# \n# # Pickle lists of glacier numbers for each region\n# import pickle\n# for reg in [13, 14, 15]:\n# ds_subset = ds_list[ds_list['RegO1'] == reg]\n# rgi_glacno_list = [str(x).rjust(5,'0') for x in ds_subset['glacno'].tolist()]\n# pickle_fn = 'R' + str(reg) + '_mauer_1970s_2000_rgi_glac_number.pkl'\n# print('Region ' + str(reg) + ' list:', rgi_glacno_list)\n# print(pickle_fn)\n## \n## with open(pickle_fn, 'wb') as f:\n## pickle.dump(rgi_glacno_list, f) \n \n #%%\n# import pickle\n# region = 15\n# \n# mauer_pickle_fn = 'R' + str(region) + '_mauer_1970s_2000_rgi_glac_number.pkl'\n# \n# with open(mauer_pickle_fn, 'rb') as f:\n# rgi_glac_number = pickle.load(f)\n# \n# # Select glaciers\n# main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=[region], rgi_regionsO2 = 'all', \n# rgi_glac_number=rgi_glac_number)\n# # Glacier hypsometry [km**2], total area\n# main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, \n# input.hyps_filedict, input.hyps_colsdrop)\n# # Determine dates_table_idx that coincides with data\n# dates_table = modelsetup.datesmodelrun(1970, 2017, spinupyears=0)\n# \n# \n# # Select mass balance data\n# mb1 = MBData(name='mauer')\n# ds_mb = mb1.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table)"
]
| [
[
"numpy.isnan",
"numpy.round",
"pandas.DataFrame",
"numpy.tile",
"pandas.concat",
"pandas.read_csv"
]
]
|
YasuShimizu/Nays2d_Bed_Deformation | [
"0fa821c6c6bf52bc3ebe2d96629abb257654dc82"
]
| [
"uniform.py"
]
| [
"from types import SimpleNamespace\r\nimport numpy as np\r\nfrom numba import jit\r\n\r\n@jit\r\ndef down(nx,ny,dn,eta,qp,snm,hs0,slope):\r\n h_max=np.max(eta)+hs0; h_min=np.min(eta)\r\n eps=qp;epsmin=qp/100.\r\n# print(eps,epsmin)\r\n while eps>epsmin:\r\n h_down=(h_max+h_min)*.5\r\n qcd=0.\r\n for j in np.arange(1,ny+1):\r\n hs1=h_down-eta[nx,j]\r\n if hs1<0.:\r\n hs1=0.\r\n u01=0.\r\n else:\r\n u01=1./snm*hs1**(2./3.)*np.sqrt(slope)\r\n qcd=qcd+u01*hs1*dn[nx,j]\r\n eps=np.abs(qp-qcd)\r\n if qcd>qp:\r\n h_max=h_down\r\n else:\r\n h_min=h_down\r\n \r\n return h_down \r\n\r\n@jit\r\ndef h_line(hpos_c,eta,spos_c,h_down,slope,nx,nym,hmin):\r\n tlen=spos_c[nx]\r\n for i in np.arange(1,nx+1):\r\n hpos_c[i]=h_down+(tlen-spos_c[i])*slope\r\n hs_c=hpos_c[i]-eta[i,nym]\r\n if hs_c< hmin:\r\n hpos_c[i]=eta[i,nym]\r\n return hpos_c\r\n\r\n\r\n@jit\r\ndef h_uniform(hpos_c,c_area,vel_ave,nx,ny,dn,eta,qp,snm,hs0,h_down,hmin,slope,g):\r\n hpos_c[nx]=h_down\r\n epsmin=qp/1000.\r\n for i in np.arange(1,nx):\r\n h_max=np.max(eta[i,:])+hs0; h_min=np.min(eta[i,:])\r\n eps=qp\r\n w_width=0.\r\n while eps>epsmin:\r\n hpos_c[i]=(h_max+h_min)*.5\r\n qcd=0.\r\n c_area[i]=0.;w_width=0.\r\n for j in np.arange(1,ny+1):\r\n hs1=hpos_c[i]-eta[i,j]\r\n if hs1<hmin:\r\n hs1=0.\r\n u01=0.\r\n else:\r\n u01=1./snm*hs1**(2./3.)*np.sqrt(slope)\r\n dni=(dn[i,j]+dn[i-1,j])*.5\r\n w_width=w_width+dni\r\n c_area[i]=c_area[i]+dni*hs1\r\n qcd=qcd+u01*hs1*dni\r\n eps=np.abs(qp-qcd)\r\n if qcd>qp:\r\n h_max=hpos_c[i]\r\n else:\r\n h_min=hpos_c[i]\r\n ave_dep=c_area[i]/w_width\r\n vel_ave[i]=qp/c_area[i]\r\n fr_num=vel_ave[i]/np.sqrt(g*ave_dep)\r\n# print(i,vel_ave[i],ave_dep,fr_num,w_width)\r\n return hpos_c\r\n\r\n\r\n@jit\r\ndef h_nonuni(hpos_c,c_area,vel_ave,e_slope,alf_f,eta,qp,spos_c,hs0,h_down,nx,ny,nym,ds,dn,snm,hmin,g):\r\n hpos_c[nx]=h_down\r\n epsmin=hmin\r\n for i in np.arange(nx,1,-1):\r\n c_area[i]=0.;b1=0.; b2=0.; w_width=0.\r\n for j in np.arange(1,ny+1):\r\n hs1=hpos_c[i]-eta[i,j]\r\n if hs1>hmin:\r\n dnn=(dn[i,j]+dn[i-1,j])*.5\r\n w_width=w_width+dnn\r\n c_area[i]=c_area[i]+hs1*dnn\r\n b1=b1+dnn*hs1**3/snm**3\r\n b2=b2+dnn*hs1**(5./3.)/snm\r\n alf_f[i]=b1/b2**3\r\n e_slope[i]=qp**2/b2**2\r\n vel_ave[i]=qp/c_area[i]\r\n ave_dep=c_area[i]/w_width\r\n fr_num=vel_ave[i]/np.sqrt(g*ave_dep)\r\n# print(i,ave_dep,vel_ave[i],fr_num)\r\n\r\n if i>1:\r\n dsx=(ds[i,nym]+ds[i-1,nym])*.5\r\n sslope=(eta[i-1,nym]-eta[i,nym])/dsx\r\n hpos_c[i-1]=hpos_c[i]+dsx*sslope\r\n eps=hs0; nc=0\r\n while eps>epsmin and nc<500:\r\n c_area[i-1]=0.\r\n b1=0.;b2=0.\r\n for j in np.arange(1,ny+1):\r\n hs1=hpos_c[i-1]-eta[i-1,j]\r\n if hs1>hmin:\r\n dnn=(dn[i-1,j]+dn[i-2,j])*.5\r\n c_area[i-1]=c_area[i-1]+hs1*dnn\r\n b1=b1+dnn*hs1**3/snm**3\r\n b2=b2+dnn*hs1**(5./3.)/snm\r\n alf_f[i-1]=b1/b2**3\r\n e_slope[i-1]=qp**2/b2**2\r\n h_a1=hpos_c[i]+qp**2/(2.*g)*(alf_f[i]-alf_f[i-1])+dsx*.5*(e_slope[i]+e_slope[i-1])\r\n eps=np.abs(h_a1-hpos_c[i-1])\r\n nc=nc+1\r\n hpos_c[i-1]=h_a1\r\n return hpos_c\r\n"
]
| [
[
"numpy.max",
"numpy.min",
"numpy.arange",
"numpy.sqrt",
"numpy.abs"
]
]
|
PyFstat/PyFstat | [
"a9a66f8477462808e83f966f5f06fb3b9efb5a1b"
]
| [
"examples/grid_examples/PyFstat_example_grid_search_F0.py"
]
| [
"\"\"\"\nDirected grid search: Monochromatic source\n==========================================\n\nSearch for a monochromatic (no spindown) signal using\na parameter space grid (i.e. no MCMC).\n\"\"\"\nimport pyfstat\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nlabel = \"PyFstat_example_grid_search_F0\"\noutdir = os.path.join(\"PyFstat_example_data\", label)\n\n# Properties of the GW data\nsqrtS = \"1e-23\"\nIFOs = \"H1\"\n# IFOs = \"H1,L1\"\nsqrtSX = \",\".join(np.repeat(sqrtS, len(IFOs.split(\",\"))))\ntstart = 1000000000\nduration = 100 * 86400\ntend = tstart + duration\ntref = 0.5 * (tstart + tend)\n\n# parameters for injected signals\ndepth = 70\ninj = {\n \"tref\": tref,\n \"F0\": 30.0,\n \"F1\": 0,\n \"F2\": 0,\n \"Alpha\": 1.0,\n \"Delta\": 1.5,\n \"h0\": float(sqrtS) / depth,\n \"cosi\": 0.0,\n}\n\ndata = pyfstat.Writer(\n label=label,\n outdir=outdir,\n tstart=tstart,\n duration=duration,\n sqrtSX=sqrtSX,\n detectors=IFOs,\n **inj,\n)\ndata.make_data()\n\nm = 0.001\ndF0 = np.sqrt(12 * m) / (np.pi * duration)\nDeltaF0 = 800 * dF0\nF0s = [inj[\"F0\"] - DeltaF0 / 2.0, inj[\"F0\"] + DeltaF0 / 2.0, dF0]\nF1s = [inj[\"F1\"]]\nF2s = [inj[\"F2\"]]\nAlphas = [inj[\"Alpha\"]]\nDeltas = [inj[\"Delta\"]]\nsearch = pyfstat.GridSearch(\n label=label,\n outdir=outdir,\n sftfilepattern=os.path.join(outdir, \"*\" + label + \"*sft\"),\n F0s=F0s,\n F1s=F1s,\n F2s=F2s,\n Alphas=Alphas,\n Deltas=Deltas,\n tref=tref,\n minStartTime=tstart,\n maxStartTime=tend,\n)\nsearch.run()\n\n# report details of the maximum point\nmax_dict = search.get_max_twoF()\nprint(\n \"max2F={:.4f} from GridSearch, offsets from injection: {:s}.\".format(\n max_dict[\"twoF\"],\n \", \".join(\n [\n \"{:.4e} in {:s}\".format(max_dict[key] - inj[key], key)\n for key in max_dict.keys()\n if not key == \"twoF\"\n ]\n ),\n )\n)\nsearch.generate_loudest()\n\nprint(\"Plotting 2F(F0)...\")\nfig, ax = plt.subplots()\nfrequencies = search.data[\"F0\"]\ntwoF = search.data[\"twoF\"]\n# mismatch = np.sign(x-inj[\"F0\"])*(duration * np.pi * (x - inj[\"F0\"]))**2 / 12.0\nax.plot(frequencies, twoF, \"k\", lw=1)\nDeltaF = frequencies - inj[\"F0\"]\nsinc = np.sin(np.pi * DeltaF * duration) / (np.pi * DeltaF * duration)\nA = np.abs((np.max(twoF) - 4) * sinc ** 2 + 4)\nax.plot(frequencies, A, \"-r\", lw=1)\nax.set_ylabel(\"$\\\\widetilde{2\\\\mathcal{F}}$\")\nax.set_xlabel(\"Frequency\")\nax.set_xlim(F0s[0], F0s[1])\ndF0 = np.sqrt(12 * 1) / (np.pi * duration)\nxticks = [inj[\"F0\"] - 10 * dF0, inj[\"F0\"], inj[\"F0\"] + 10 * dF0]\nax.set_xticks(xticks)\nxticklabels = [\"$f_0 {-} 10\\\\Delta f$\", \"$f_0$\", \"$f_0 {+} 10\\\\Delta f$\"]\nax.set_xticklabels(xticklabels)\nplt.tight_layout()\nfig.savefig(os.path.join(outdir, label + \"_1D.png\"), dpi=300)\n"
]
| [
[
"numpy.max",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt"
]
]
|
danwley/synbols-benchmarks | [
"799f85c4bf6a84e0f6b6ad05878bc21c2d40e4c9"
]
| [
"active_learning/datasets/synbols.py"
]
| [
"import json\nimport os\n\nimport h5py\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom datasets import DATA_ROOT\n\n\nclass Synbols(Dataset):\n def __init__(self, path, split, key='font', transform=None):\n self.path = path\n self.split = split\n self.x, self.y = self._load_data(path)\n _y = []\n for y in self.y:\n _y.append(json.loads(y)[key])\n self.y = _y\n self.labelset = list(sorted(set(self.y)))\n self.y = np.array([self.labelset.index(y) for y in self.y])\n if transform is None:\n self.transform = lambda x: x\n else:\n self.transform = transform\n self.num_classes = len(self.labelset)\n self.make_splits()\n\n def _load_data(self, f):\n if f.endswith('h5py'):\n with h5py.File(f, 'r') as f:\n return f['x'].value, f['y'].value\n else:\n data = np.load(f)\n return data['x'], data['y']\n\n def make_splits(self, seed=42):\n start, end = self.get_splits(self.x)\n rng = np.random.RandomState(seed)\n self.indices = rng.permutation(len(self.x))\n self.x = self.x[self.indices[start:end]]\n self.y = self.y[self.indices[start:end]]\n\n def get_splits(self, source):\n if self.split == 'train':\n start = 0\n end = int(0.8 * len(source))\n elif self.split == 'val':\n start = int(0.8 * len(source))\n end = int(0.9 * len(source))\n elif self.split == 'test':\n start = int(0.9 * len(source))\n end = len(source)\n return start, end\n\n def __getitem__(self, item):\n return self.transform(self.x[item]), self.y[item]\n\n def __len__(self):\n return len(self.x)\n\n\nif __name__ == '__main__':\n # Check that we can load the dataset.\n synbols = Synbols(os.path.join(DATA_ROOT, '/latin_res=32x32_n=100000.npz'),\n 'val')\n"
]
| [
[
"numpy.load",
"numpy.random.RandomState"
]
]
|
labdeeman7/real-nvp | [
"5fcfbbdf28417bb708e40b990b1bc9cd48c15d7a"
]
| [
"train.py"
]
| [
"\"\"\"Train Real NVP on CIFAR-10.\n\nTrain script adapted from: https://github.com/kuangliu/pytorch-cifar/\n\"\"\"\nimport argparse\nimport os\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.transforms as transforms\nimport util\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.enabled=False\n\n\nfrom models import RealNVP, RealNVPLoss\nfrom tqdm import tqdm\n\n\ndef main(args):\n device = 'cuda' if torch.cuda.is_available() and len(args.gpu_ids) > 0 else 'cpu'\n start_epoch = 0\n\n\n if args.dataset == \"MNIST\": \n # Note: No normalization applied, since RealNVP expects inputs in (0, 1).\n transform_train = transforms.Compose([\n transforms.ToTensor()\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor()\n ])\n no_of_channels = 1\n trainset = torchvision.datasets.MNIST(root='data', train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.MNIST(root='data', train=False, download=True, transform=transform_test)\n elif args.dataset == \"CIFAR\":\n no_of_channels = 3\n # Note: No normalization applied, since RealNVP expects inputs in (0, 1).\n transform_train = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor()\n ])\n trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test)\n else:\n os.error(\"only MNIST and CIFAR currently supported working on LSUN\")\n \n trainloader = data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n testloader = data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)\n\n # Model\n print('Building model..')\n net = RealNVP(num_scales=2, in_channels=no_of_channels, mid_channels=64, num_blocks=8)\n # net = RealNVP(num_scales=2, in_channels=no_of_channels, mid_channels=32, num_blocks=4)\n # net = RealNVP(num_scales=2, in_channels=no_of_channels, mid_channels=8, num_blocks=4)\n net = net.to(device)\n if device == 'cuda':\n net = torch.nn.DataParallel(net, args.gpu_ids)\n cudnn.benchmark = args.benchmark\n\n if args.resume:\n # Load checkpoint.\n print('Resuming from checkpoint at ckpts/best.pth.tar...')\n assert os.path.isdir('ckpts'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('ckpts/best.pth.tar')\n net.load_state_dict(checkpoint['net'])\n global best_loss\n global loss_arr_test\n global loss_arr_train\n best_loss = checkpoint['test_loss']\n loss_arr_test = checkpoint['loss_arr_test']\n loss_arr_train = checkpoint['loss_arr_train']\n start_epoch = checkpoint['epoch']\n\n loss_fn = RealNVPLoss()\n param_groups = util.get_param_groups(net, args.weight_decay, norm_suffix='weight_g')\n optimizer = optim.Adam(param_groups, lr=args.lr)\n\n for epoch in range(start_epoch, start_epoch + args.num_epochs):\n train(epoch, net, trainloader, device, optimizer, loss_fn, args.max_grad_norm)\n test(epoch, net, testloader, device, loss_fn, args.num_samples, no_of_channels)\n\n #store loss and bpd values\n with open(f'samples/loss_arr_test.npy', 'wb') as f:\n np.save(f, loss_arr_test)\n with open(f'samples/loss_arr_train.npy', 'wb') as f:\n np.save(f, loss_arr_train)\n\n\ndef train(epoch, net, trainloader, device, optimizer, loss_fn, max_grad_norm):\n print('\\nEpoch: %d' % epoch)\n net.train()\n loss_meter = util.AverageMeter()\n with tqdm(total=len(trainloader.dataset)) as progress_bar:\n for x, _ in trainloader:\n x = x.to(device)\n optimizer.zero_grad()\n z, sldj = net(x, reverse=False)\n loss = loss_fn(z, sldj)\n loss_meter.update(loss.item(), x.size(0))\n loss.backward()\n util.clip_grad_norm(optimizer, max_grad_norm)\n optimizer.step()\n\n progress_bar.set_postfix(loss=loss_meter.avg,\n bpd=util.bits_per_dim(x, loss_meter.avg))\n progress_bar.update(x.size(0))\n \n loss_arr_train.append((loss_meter.avg, util.bits_per_dim(x, loss_meter.avg))) \n\n\ndef sample(net, batch_size, device, no_of_channels):\n \"\"\"Sample from RealNVP model.\n\n Args:\n net (torch.nn.DataParallel): The RealNVP model wrapped in DataParallel.\n batch_size (int): Number of samples to generate.\n device (torch.device): Device to use.\n \"\"\"\n z = torch.randn((batch_size, no_of_channels, 32, 32), dtype=torch.float32, device=device)\n x, _ = net(z, reverse=True)\n x = torch.sigmoid(x)\n\n return x\n\n\ndef test(epoch, net, testloader, device, loss_fn, num_samples, no_of_channels):\n global best_loss\n net.eval()\n loss_meter = util.AverageMeter()\n with torch.no_grad():\n with tqdm(total=len(testloader.dataset)) as progress_bar:\n for x, _ in testloader:\n x = x.to(device)\n z, sldj = net(x, reverse=False) #ohh that is why the equation works, the directions are reversed. \n loss = loss_fn(z, sldj)\n loss_meter.update(loss.item(), x.size(0))\n progress_bar.set_postfix(loss=loss_meter.avg,\n bpd=util.bits_per_dim(x, loss_meter.avg))\n progress_bar.update(x.size(0))\n\n # Save checkpoint\n print(\"loss_meter.avg\", loss_meter.avg)\n print(\"best_loss\", best_loss)\n \n state = {\n 'net': net.state_dict(),\n 'test_loss': loss_meter.avg,\n 'epoch': epoch,\n 'loss_arr_test': loss_arr_test,\n 'loss_arr_train': loss_arr_train\n }\n os.makedirs('ckpts', exist_ok=True)\n torch.save(state, f'ckpts/epoch_{epoch}.pth.tar')\n\n if loss_meter.avg < best_loss:\n print('Saving best...')\n os.makedirs('ckpts', exist_ok=True)\n torch.save(state, 'ckpts/best.pth.tar')\n best_loss = loss_meter.avg\n \n # Save samples and data\n images = sample(net, num_samples, device, no_of_channels)\n os.makedirs('samples', exist_ok=True)\n images_concat = torchvision.utils.make_grid(images, nrow=int(num_samples ** 0.5), padding=2, pad_value=255)\n torchvision.utils.save_image(images_concat, 'samples/epoch_{}.png'.format(epoch))\n torchvision.utils.save_image(images[0], 'samples/epoch_{}_specific.png'.format(epoch))\n\n loss_arr_test.append((loss_meter.avg, util.bits_per_dim(x, loss_meter.avg))) \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='RealNVP on CIFAR-10/MNIST')\n\n parser.add_argument('--dataset', default=\"CIFAR\", type=str, help='dataset name: CIFAR or MNIST are current options')\n parser.add_argument('--batch_size', default=64, type=int, help='Batch size')\n parser.add_argument('--benchmark', action='store_true', help='Turn on CUDNN benchmarking')\n parser.add_argument('--gpu_ids', default='[0]', type=eval, help='IDs of GPUs to use')\n parser.add_argument('--lr', default=1e-3, type=float, help='Learning rate')\n parser.add_argument('--max_grad_norm', type=float, default=100., help='Max gradient norm for clipping')\n parser.add_argument('--num_epochs', default=100, type=int, help='Number of epochs to train')\n parser.add_argument('--num_samples', default=64, type=int, help='Number of samples at test time')\n parser.add_argument('--num_workers', default=8, type=int, help='Number of data loader threads')\n parser.add_argument('--resume', '-r', action='store_true', help='Resume from checkpoint')\n parser.add_argument('--weight_decay', default=5e-5, type=float,\n help='L2 regularization (only applied to the weight norm scale factors)')\n\n best_loss = 100000\n loss_arr_test = [] #tuple of loss, bpd for test\n loss_arr_train = [] #tuple of loss, bpd for train\n main(parser.parse_args())\n"
]
| [
[
"torch.sigmoid",
"torch.save",
"torch.optim.Adam",
"torch.no_grad",
"numpy.save",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.randn",
"torch.nn.DataParallel"
]
]
|
PankajChohan9820/FAST_SRGAN | [
"7f29db8063d06cb003b2dc7a880b644c87214f6f"
]
| [
"infer.py"
]
| [
"from argparse import ArgumentParser\nfrom tensorflow import keras\nimport numpy as np\nimport cv2\nimport os\n\nparser = ArgumentParser()\nparser.add_argument('--image_dir', type=str, help='Directory where images are kept.')\nparser.add_argument('--output_dir', type=str, help='Directory where to output high res images.')\n\n\ndef main():\n args = parser.parse_args()\n print(\"he\",args.image_dir)\n for x in os.listdir(args.image_dir):\n print(x)\n # Get all image paths\n image_paths = [os.path.join(args.image_dir, x) for x in os.listdir(args.image_dir)]\n\n # Change model input shape to accept all size inputs\n model = keras.models.load_model('models/generator.h5')\n inputs = keras.Input((None, None, 3))\n output = model(inputs)\n model = keras.models.Model(inputs, output)\n\n # Loop over all images\n for image_path in image_paths:\n \n # Read image\n low_res = cv2.imread(image_path, 1)\n low_res = cv2.resize(low_res, (96,96))\n # Convert to RGB (opencv uses BGR as default)\n low_res = cv2.cvtColor(low_res, cv2.COLOR_BGR2RGB)\n\n # Rescale to 0-1.\n low_res = low_res / 255.0\n print(\"super res\",np.expand_dims(low_res, axis=0))\n # Get super resolution image\n sr = model.predict(np.expand_dims(low_res, axis=0))[0]\n\n # Rescale values in range 0-255\n sr = (((sr + 1) / 2.) * 255).astype(np.uint8)\n\n # Convert back to BGR for opencv\n sr = cv2.cvtColor(sr, cv2.COLOR_RGB2BGR)\n\n # Save the results:\n cv2.imwrite(os.path.join(args.output_dir, os.path.basename(image_path)), sr)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.expand_dims",
"tensorflow.keras.models.load_model",
"tensorflow.keras.Input",
"tensorflow.keras.models.Model"
]
]
|
kuffmode/msa | [
"cdbdca2ddc907a1abd2391226ece584d6b277be0"
]
| [
"msapy/msa.py"
]
| [
"import warnings\nfrom typing import Callable, Optional, Dict, Tuple, Union\nimport numpy as np\nimport pandas as pd\nfrom ordered_set import OrderedSet\nfrom itertools import combinations\nfrom typeguard import typechecked\nfrom tqdm import tqdm\n\nfrom msapy import utils as ut\nfrom msapy.checks import _check_valid_elements, _check_valid_n_permutations, _check_valid_permutation_space, _get_contribution_type, _is_number\n\n\n@typechecked\ndef make_permutation_space(*,\n elements: list,\n n_permutations: int,\n pair: Optional[Tuple] = None,\n rng: Optional[np.random.Generator] = None,\n random_seed: Optional[int] = None) -> list:\n \"\"\"\n Generates a list of tuples containing n_permutations of the given elements.\n This will be used later in make_combination_space so you can have the same permutation and combination spaces for\n different games played by the same set. Probably makes things more reproducible!\n The elements themselves can be anything I guess, I tried str (names/labels) and integers (indexes),\n and tuples (edges, from-to style).\n Briefly, the permutation space of (A,B,C) is something like this:\n\n (A,B,C)\n (B,C,A)\n (C,B,A)\n (A,C,B)\n (C,A,B)\n (B,A,C)\n (C,A,B)\n .\n .\n .\n As you might have seen, there might be repetitions for small set of players and that's fine.\n\n Args:\n elements (list):\n A list of players to be shuffled n times.\n\n n_permutations (int):\n Number of permutations, Didn't check it systematically yet but just based on random explorations I'd say\n something around 1_000 is enough.\n\n pair (Optional[Tuple]):\n pair of elements that will always be together in every permutation\n\n rng (Optional[np.random.Generator]): Numpy random generator object used for reproducable results. Default is None.\n\n random_seed (Optional[int]):\n sets the random seed of the sampling process. Only used when `rng` is None. Default is None.\n\n Returns:\n (list[tuple]): Permutation space as a list of lists with shape (n_permutations, len(elements))\n \"\"\"\n\n # ------------------------------#\n _check_valid_elements(elements)\n _check_valid_n_permutations(n_permutations)\n # ------------------------------#\n if not rng:\n rng = np.random.default_rng(random_seed) if random_seed \\\n else np.random.default_rng()\n if not pair:\n permutation_space = [tuple(rng.permutation(elements))\n for _ in range(n_permutations)]\n return permutation_space\n\n elements = [e for e in elements if e != pair[0]]\n permutation_space = []\n\n for _ in range(n_permutations):\n permutation = list(rng.permutation(elements))\n permutation.insert(permutation.index(pair[1]), pair[0])\n permutation_space.append(tuple(permutation))\n\n return permutation_space\n\n\n@typechecked\ndef make_combination_space(*, permutation_space: list, pair: Optional[Tuple] = None, lesioned: Optional[any] = None) -> OrderedSet:\n \"\"\"\n Generates a dataset (OrderedSet) of coalitions from the permutation_space.\n In principle, this could be directly filled and passed to the make_shapley_values function\n but then the function wouldn't be pure so **this will be just an empty template**.\n Don't mix up this and the later-filled combination space.\n Briefly, the combination space of **one permutation of** (A,B,C) is something like this:\n\n (A,B,C)\n (A,B)\n (B,C)\n (A,C)\n (C)\n (B)\n (A)\n ()\n\n This will happen for every permutation of the permutation space so either there will be a large number\n of combinations here or if the set is small enough, it will be exhausted.\n\n Args:\n permutation_space (list):\n A list of players to be shuffled n times.\n\n pair (Optional[Tuple]):\n pair of elements that will always be together in every combination\n\n lesioned (Optional[any]):\n leseioned element that will not be present in any combination\n\n Returns:\n (OrderedSet): Combination space as an OrderedSet of frozensets.\n \"\"\"\n\n _check_valid_permutation_space(permutation_space)\n\n lesioned = set(lesioned) if lesioned else set()\n\n combination_space = OrderedSet()\n\n for permutation in permutation_space:\n skip_next = False\n # we really don't care about the element itself here\n for index, element in enumerate(permutation):\n if skip_next:\n skip_next = False\n continue\n if pair and element == pair[0]:\n index += 1\n skip_next = True\n\n # forming the coalition with the target element\n including = frozenset(permutation[:index + 1]) - lesioned\n # forming it without the target element\n excluding = frozenset(permutation[:index]) - lesioned\n\n combination_space.add(including)\n combination_space.add(excluding)\n\n return combination_space\n\n\n@typechecked\ndef make_complement_space(*,\n combination_space: OrderedSet,\n elements: list,\n lesioned: Optional[any] = None) -> OrderedSet:\n \"\"\"\n Produces the complement space of the combination space, useful for debugging\n and the multiprocessing function.\n Args:\n combination_space (OrderedSet):\n ordered set of target combinations (coalitions).\n elements (list):\n list of players.\n lesioned (Optional[any]):\n leseioned element that will not be present in any combination but every complement\n\n Returns:\n (OrderedSet): complements to be passed for lesioning.\n \"\"\"\n _check_valid_elements(elements)\n\n elements = frozenset(elements)\n diff = max(combination_space, key=len) ^ elements - \\\n {lesioned if lesioned else None}\n\n # ------------------------------#\n if len(diff) != 0:\n raise ValueError(f\"Elements in the combination space are different from what's in the elements list.\"\n f\"The symmetric difference-set is: {list(diff)}\")\n # ------------------------------#\n\n complement_space = OrderedSet()\n for combination in combination_space:\n complement_space.add(tuple(elements.difference(combination)))\n return complement_space\n\n\n@typechecked\ndef take_contributions(*,\n elements: list,\n complement_space: OrderedSet,\n combination_space: OrderedSet,\n objective_function: Callable,\n objective_function_params: Optional[Dict] = None) -> Tuple[Dict, Dict]:\n \"\"\"\n This function fills up the combination_space with the game you define (objective function). There is an important\n point to keep in mind, Shapley values are the added contributions of elements while in MSA we calculate them by\n perturbation so although it's intuitive to think the combination in combination space is the element that will be\n lesioned, it is not the case, it will be everything else but the coalition, i.e., the target coalition are the\n only intact elements. This function takes care of this by passing the complement of each coalition to the\n game while assigning the results to the target coalition, just keep the logic in mind.\n\n A second point is that this function returns a filled combination_space, it is not filling it in-place for the\n sake of purity.\n\n ---------------\n Note on returns:\n Contributions and lesion effects are virtually the same thing it's just about how you're looking at them.\n For example, you might want to use lesion effects by conditioning elements' length and see the effect of\n single lesions, dual, triple,... so, for contributions we have a value contributed by the intact coalition,\n the same result can be compared to the intact system to see how big was the impact of lesioning the complements.\n \"Same same, but different, but still same!\" - James Franco\n\n Args:\n elements (list):\n List of the players. Obviously, should be the same passed to make_permutation.\n\n complement_space (OrderedSet):\n The actual targets for lesioning. Shapley values are the added contributions of elements\n while in MSA we calculate them by perturbation so although it's intuitive to think the combination\n in combination space is the element that will be lesioned, it is not the case,\n it will be everything else but the coalition, i.e., the target coalition are the only intact elements.\n\n combination_space (OrderedSet):\n The template, will be copied, filled by the objective_function, and returned.\n\n objective_function (Callable):\n The game, it should get the complement set and return one numeric value either int or float.\n This function is just calling it as: objective_function(complement, **objective_function_params)\n so design accordingly.\n\n An example using networkx with some tips:\n (you sometimes need to specify what should happen during edge-cases like an all-lesioned network)\n\n def local_efficiency(complements, graph):\n if len(complements) < 0:\n # the network is intact so:\n return nx.local_efficiency(graph)\n\n elif len(complements) == len(graph):\n # the network is fully lesioned so:\n return 0.0\n\n else:\n # lesion the system, calculate things\n lesioned = graph.copy()\n lesioned.remove_nodes_from(complements)\n return nx.local_efficiency(lesioned)\n\n objective_function_params (Optional[Dict]):\n Kwargs for the objective_function.\n\n Returns:\n (Dict): A dictionary of combinations:results\n \"\"\"\n\n elements = frozenset(elements)\n contributions = dict.fromkeys(combination_space)\n lesion_effects = dict.fromkeys(complement_space)\n objective_function_params = objective_function_params if objective_function_params else {}\n\n # ------------------------------#\n if len(complement_space.items[1]) == 0:\n warnings.warn(\"Are you sure you're not mistaking complement and combination spaces?\"\n \"Length of the first element in complement space (really, complement_space[1]) is 0. \"\n \"It should be equal to the number of elements.\",\n stacklevel=2)\n # ------------------------------#\n\n for combination in tqdm(combination_space):\n # lesion everything but the target coalition\n complement = tuple(elements.difference(combination))\n result = objective_function(complement, **objective_function_params)\n\n contributions[combination] = result\n lesion_effects[complement] = result\n return contributions, lesion_effects\n\n\n@typechecked\ndef make_shapley_values(*,\n contributions: Dict,\n permutation_space: list,\n pair: Optional[Tuple] = None,\n lesioned: Optional[any] = None) -> pd.DataFrame:\n \"\"\"\n Calculates Shapley values based on the filled contribution_space.\n Briefly, for a permutation (A,B,C) it will be:\n\n (A,B,C) - (B,C) = Contribution of A to the coalition (B,C).\n (B,C) - (C) = Contribution of B to the coalition formed with (C).\n (C) = Contribution of C alone.\n\n This will repeat over all permutations. and the result is a distribution of Shapley values for each element,\n note that the estimation method we're using here is an \"unbiased estimator\" so the variance is fairly large.\n\n Args:\n contributions (Dict):\n Filled Dictionary of coalition:result\n\n permutation_space (list):\n Should be the same passed to make_combination_space.\n\n pair (Optional[Tuple]):\n pair of elements that will always be together in every combination\n\n lesioned (Optional[any]):\n leseioned element that will not be present in any combination\n\n Returns:\n pd.DataFrame: Shapley table or a dict of Shapely tables, columns will be \n elements and indices will be samples (permutations). \n It will be a Multi-Index DataFrame if the contributions are a dict\n i.e. the objective function returns multiple score functions (eg. accuracy, f1_score, etc.)\n It will be a Multi-Index DataFrame if the contributions are a timeseries.\n The index at `level=1` will be the timestamps\n \"\"\"\n _check_valid_permutation_space(permutation_space)\n arbitrary_contrib, multi_scores, is_timeseries = _get_contribution_type(contributions)\n\n if multi_scores:\n scores = list(arbitrary_contrib.keys())\n contributions = {k: np.array(list(v.values()))\n for k, v in contributions.items()}\n\n lesioned = set(lesioned) if lesioned else set()\n shapley_table = {}\n for permutation in permutation_space:\n skip_next = False\n isolated_contributions = [] # got to be a better way!\n\n # if the set is small it's possible that the permutation space exhausts the combination space so:\n if permutation in shapley_table:\n continue\n for index, element in enumerate(permutation):\n if skip_next:\n skip_next = False\n continue\n if pair and element == pair[0]:\n index += 1\n skip_next = True\n\n including = frozenset(permutation[:index + 1]) - lesioned\n excluding = frozenset(permutation[:index]) - lesioned\n isolated_contributions.append(\n contributions[including] - contributions[excluding])\n shapley_table[permutation] = np.array(isolated_contributions)\n\n if not multi_scores:\n shapley_values = pd.DataFrame([\n dict(zip(permutations, shapleys)) for permutations, shapleys in shapley_table.items()])\n return _process_timeseries_shapley(shapley_values) if is_timeseries else shapley_values\n\n shapley_values = []\n for i in range(len(arbitrary_contrib)):\n shapley_values.append(pd.DataFrame([\n dict(zip(permutations, shapleys[:, i])) for permutations, shapleys in shapley_table.items()]))\n\n shapley_values = pd.concat(shapley_values, keys=scores) if multi_scores else shapley_values\n\n return shapley_values\n\n\n@typechecked\ndef interface(*,\n n_permutations: int,\n elements: list,\n objective_function: Callable,\n objective_function_params: Dict = {},\n permutation_space: Optional[list] = None,\n pair: Optional[Tuple] = None,\n lesioned: Optional[any] = None,\n multiprocessing_method: str = 'joblib',\n rng: Optional[np.random.Generator] = None,\n random_seed: Optional[int] = None,\n n_parallel_games: int = -1,\n ) -> Tuple[pd.DataFrame, Dict, Dict]:\n \"\"\"\n A wrapper function to call other related functions internally and produces an easy-to-use pipeline.\n\n Args:\n n_permutations (int):\n Number of permutations (samples) per element.\n\n elements (list):\n List of the players (elements). Can be strings (names), integers (indicies), and tuples.\n\n objective_function (Callable):\n The game (in-silico experiment). It should get the complement set and return one numeric value\n either int or float.\n This function is just calling it as: objective_function(complement, **objective_function_params)\n\n An example using networkx with some tips:\n (you sometimes need to specify what should happen during edge-cases like an all-lesioned network)\n\n def local_efficiency(complements, graph):\n if len(complements) < 0:\n # the network is intact so:\n return nx.local_efficiency(graph)\n\n elif len(complements) == len(graph):\n # the network is fully lesioned so:\n return 0.0\n\n else:\n # lesion the system, calculate things\n lesioned = graph.copy()\n lesioned.remove_nodes_from(complements)\n return nx.local_efficiency(lesioned)\n\n objective_function_params (Dict):\n Kwargs for the objective_function.\n\n permutation_space (Optional[list]):\n Already generated permutation space, in case you want to be more reproducible or something and use the same\n lesion combinations for many metrics.\n \n pair (Optional[Tuple]):\n pair of elements that will always be together in every combination\n\n lesioned (Optional[any]):\n leseioned element that will not be present in any combination\n\n multiprocessing_method (str):\n So far, two methods of parallelization is implemented, 'joblib' and 'ray' and the default method is joblib.\n If using ray tho, you need to decorate your objective function with @ray.remote decorator. Visit their\n documentations to see how to go for it. I guess ray works better on HPC clusters (if they support it tho!)\n and probably doesn't suffer from the sneaky \"memory leakage\" of joblib. But just by playing around,\n I realized joblib is faster for tasks that are small themselves. Remedies are here:\n https://docs.ray.io/en/latest/auto_examples/tips-for-first-time.html\n\n Note: Generally, multiprocessing isn't always faster as explained above. Use it when the function itself\n takes some like each game takes longer than 0.5 seconds or so. For example, a function that sleeps for a\n second on a set of 10 elements with 1000 permutations each (1024 games) performs as follows:\n\n - no parallel: 1020 sec\n - joblib: 63 sec\n - ray: 65 sec\n\n That makes sense since I have 16 cores and 1000/16 is around 62.\n\n rng (Optional[np.random.Generator]): Numpy random generator object used for reproducable results. Default is None.\n\n random_seed (Optional[int]):\n sets the random seed of the sampling process. Only used when `rng` is None. Default is None.\n\n n_parallel_games (int):\n Number of parallel jobs (number of to-be-occupied cores),\n -1 means all CPU cores and 1 means a serial process.\n I suggest using 1 for debugging since things get messy in parallel!\n\n Returns:\n Tuple[pd.DataFrame, Dict, Dict]: shapley_table, contributions, lesion_effects\n\n Note that contributions and lesion_effects are the same values, addressed differently. For example:\n If from a set of ABCD removing AC ends with some value x, you can say the contribution of BD=x and the\n effect of removing AC=x. So the same values are addressed differently in the two returned Dicts.\n Of course, it makes more sense to compare the lesion effects with the intact system but who am I to judge.\n \"\"\"\n\n if not rng:\n rng = np.random.default_rng(random_seed) if random_seed else np.random.default_rng()\n\n if not permutation_space:\n permutation_space = make_permutation_space(elements=elements,\n n_permutations=n_permutations,\n pair=pair,\n rng=rng)\n else:\n warnings.warn(\"A Permutation space is given so n_permutations will fall back to what's specified there.\",\n stacklevel=2)\n\n combination_space = make_combination_space(\n permutation_space=permutation_space, pair=pair, lesioned=lesioned)\n complement_space = make_complement_space(combination_space=combination_space,\n elements=elements,\n lesioned=lesioned)\n\n if n_parallel_games == 1:\n contributions, lesion_effects = take_contributions(elements=elements,\n complement_space=complement_space,\n combination_space=combination_space,\n objective_function=objective_function,\n objective_function_params=objective_function_params)\n else:\n contributions, lesion_effects = ut.parallelized_take_contributions(\n multiprocessing_method=multiprocessing_method,\n n_cores=n_parallel_games,\n complement_space=complement_space,\n combination_space=combination_space,\n objective_function=objective_function,\n objective_function_params=objective_function_params)\n\n shapley_values = make_shapley_values(contributions=contributions,\n permutation_space=permutation_space,\n lesioned=lesioned)[elements]\n return shapley_values, contributions, lesion_effects\n\n\n@typechecked\ndef interaction_2d(*,\n n_permutations: int,\n elements: list,\n pair: tuple,\n objective_function: Callable,\n objective_function_params: Dict = {},\n multiprocessing_method: str = 'joblib',\n rng: Optional[np.random.Generator] = None,\n random_seed: Optional[int] = None,\n n_parallel_games: int = -1,\n ) -> Tuple:\n \"\"\"Performs Two dimensional MSA as explain in section 2.3 of [1]. \n We calculate the Shapley value of element i in the subgame of all elements without element j. \n Intuitively, this is the average marginal importance of element i when element j is perturbed. \n Repeat the process by interchanging i and j and the calculate the shapley values by considering \n i and j as a single unit.\n\n REFERENCES:\n Keinan, Alon, et al. \"Fair attribution of functional contribution in artificial and biological networks.\" \n Neural computation 16.9 (2004): 1887-1915.\n\n Args:\n n_permutations (int): Number of permutations (samples) per element.\n\n elements (list): List of the players (elements). Can be strings (names), integers (indicies), and tuples.\n\n pair (tuple): the pair of elements we want to analyze the interaction between\n\n objective_function (Callable):\n The game (in-silico experiment). It should get the complement set and return one numeric value\n either int or float.\n This function is just calling it as: objective_function(complement, **objective_function_params)\n\n An example using networkx with some tips:\n (you sometimes need to specify what should happen during edge-cases like an all-lesioned network)\n\n def local_efficiency(complements, graph):\n if len(complements) < 0:\n # the network is intact so:\n return nx.local_efficiency(graph)\n\n elif len(complements) == len(graph):\n # the network is fully lesioned so:\n return 0.0\n\n else:\n # lesion the system, calculate things\n lesioned = graph.copy()\n lesioned.remove_nodes_from(complements)\n return nx.local_efficiency(lesioned)\n\n objective_function_params (Dict, optional): Kwargs for the objective_function. Defaults to {}.\n\n multiprocessing_method (str, optional): \n So far, two methods of parallelization is implemented, 'joblib' and 'ray' and the default method is joblib.\n If using ray tho, you need to decorate your objective function with @ray.remote decorator. Visit their\n documentations to see how to go for it. I guess ray works better on HPC clusters (if they support it tho!)\n and probably doesn't suffer from the sneaky \"memory leakage\" of joblib. But just by playing around,\n I realized joblib is faster for tasks that are small themselves. Remedies are here:\n https://docs.ray.io/en/latest/auto_examples/tips-for-first-time.html\n\n Note: Generally, multiprocessing isn't always faster as explained above. Use it when the function itself\n takes some like each game takes longer than 0.5 seconds or so. For example, a function that sleeps for a\n second on a set of 10 elements with 1000 permutations each (1024 games) performs as follows:\n\n - no parallel: 1020 sec\n - joblib: 63 sec\n - ray: 65 sec\n\n That makes sense since I have 16 cores and 1000/16 is around 62. \n Defaults to 'joblib'.\n\n rng (Optional[np.random.Generator], optional): Numpy random generator object used for reproducable results. Default is None. Defaults to None.\n\n random_seed (Optional[int], optional): \n sets the random seed of the sampling process. Only used when `rng` is None. Default is None. Defaults to None.\n\n n_parallel_games (int):\n Number of parallel jobs (number of to-be-occupied cores),\n -1 means all CPU cores and 1 means a serial process.\n I suggest using 1 for debugging since things get messy in parallel!\n\n Returns:\n tuple: \n (shapley value of element (i, j), \n shapley value of element i when j is lesioned, \n shapley value of element j when i is lesioned) \n \"\"\"\n\n interface_args = {\"elements\": elements,\n \"objective_function\": objective_function,\n \"n_permutations\": n_permutations,\n \"objective_function_params\": objective_function_params,\n \"multiprocessing_method\": multiprocessing_method,\n \"rng\": rng,\n \"random_seed\": random_seed,\n \"n_parallel_games\": n_parallel_games}\n\n shapley_A, _, _ = interface(**interface_args, lesioned=pair[1])\n gamma_A = _get_gamma(shapley_A, pair[0]).sum()\n\n shapley_B, _, _ = interface(**interface_args, lesioned=pair[0])\n gamma_B = _get_gamma(shapley_B, pair[1]).sum()\n\n shapley_AB, _, _ = interface(**interface_args, pair=pair)\n gamma_AB = _get_gamma(shapley_AB, pair).sum()\n\n return gamma_AB, gamma_A, gamma_B\n\n\n@typechecked\ndef network_interaction_2d(*,\n n_permutations: int,\n elements: list,\n pairs: Optional[list] = None,\n objective_function: Callable,\n objective_function_params: Dict = {},\n multiprocessing_method: str = 'joblib',\n rng: Optional[np.random.Generator] = None,\n random_seed: Optional[int] = None,\n n_parallel_games: int = -1,\n ) -> np.ndarray:\n \"\"\"Performs Two dimensional MSA as explain in section 2.3 of [1]\n for every possible pair of elements and returns a symmetric matrix of\n interactions between the elements.\n\n Args:\n Args:\n n_permutations (int): Number of permutations (samples) per element.\n\n elements (list): List of the players (elements). Can be strings (names), integers (indicies), and tuples.\n\n objective_function (Callable):\n The game (in-silico experiment). It should get the complement set and return one numeric value\n either int or float.\n This function is just calling it as: objective_function(complement, **objective_function_params)\n\n An example using networkx with some tips:\n (you sometimes need to specify what should happen during edge-cases like an all-lesioned network)\n\n def local_efficiency(complements, graph):\n if len(complements) < 0:\n # the network is intact so:\n return nx.local_efficiency(graph)\n\n elif len(complements) == len(graph):\n # the network is fully lesioned so:\n return 0.0\n\n else:\n # lesion the system, calculate things\n lesioned = graph.copy()\n lesioned.remove_nodes_from(complements)\n return nx.local_efficiency(lesioned)\n\n objective_function_params (Dict, optional): Kwargs for the objective_function. Defaults to {}.\n\n multiprocessing_method (str, optional): \n So far, two methods of parallelization is implemented, 'joblib' and 'ray' and the default method is joblib.\n If using ray tho, you need to decorate your objective function with @ray.remote decorator. Visit their\n documentations to see how to go for it. I guess ray works better on HPC clusters (if they support it tho!)\n and probably doesn't suffer from the sneaky \"memory leakage\" of joblib. But just by playing around,\n I realized joblib is faster for tasks that are small themselves. Remedies are here:\n https://docs.ray.io/en/latest/auto_examples/tips-for-first-time.html\n\n Note: Generally, multiprocessing isn't always faster as explained above. Use it when the function itself\n takes some like each game takes longer than 0.5 seconds or so. For example, a function that sleeps for a\n second on a set of 10 elements with 1000 permutations each (1024 games) performs as follows:\n\n - no parallel: 1020 sec\n - joblib: 63 sec\n - ray: 65 sec\n\n That makes sense since I have 16 cores and 1000/16 is around 62. \n Defaults to 'joblib'.\n\n rng (Optional[np.random.Generator], optional): Numpy random generator object used for reproducable results. Default is None. Defaults to None.\n\n random_seed (Optional[int], optional): \n sets the random seed of the sampling process. Only used when `rng` is None. Default is None. Defaults to None.\n\n n_parallel_games (int):\n Number of parallel jobs (number of to-be-occupied cores),\n -1 means all CPU cores and 1 means a serial process.\n I suggest using 1 for debugging since things get messy in parallel!\n\n\n Raises:\n NotImplementedError: Raises this error in case the contribution is a timeseries or there are\n multiple contributions\n\n Returns:\n np.ndarray: the interaction matrix\n \"\"\"\n elements_idx = list(range(len(elements)))\n all_pairs = [(elements.index(x), elements.index(y)) for x, y in pairs] if pairs else combinations(elements_idx, 2)\n\n interface_args = {\"elements\": elements,\n \"n_permutations\": n_permutations,\n \"objective_function_params\": objective_function_params,\n \"objective_function\": objective_function,\n \"multiprocessing_method\": multiprocessing_method,\n \"rng\": rng,\n \"random_seed\": random_seed,\n \"n_parallel_games\": n_parallel_games}\n\n interactions = np.zeros((len(elements), len(elements)))\n\n for x, y in tqdm(all_pairs, desc=\"Running interface 2d for all pair of nodes:\"):\n gammaAB, gammaA, gammaB = interaction_2d(pair=(elements[x], elements[y]),\n **interface_args)\n if not _is_number(gammaAB):\n raise NotImplementedError(\"`network_interaction_2d` does not work with\"\n \" timeseries or multiscore contributions yet.\")\n interactions[x, y] = interactions[y, x] = gammaAB - gammaA - gammaB\n\n return interactions\n\n\ndef _get_gamma(shapley_table, idx):\n if shapley_table.index.nlevels == 1:\n gamma = shapley_table[list(idx)].mean()\n elif \"timestamp\" in shapley_table.index.names:\n gamma = shapley_table[list(idx)].groupby(level=1).mean()\n else:\n gamma = shapley_table[list(idx)].groupby(level=0).mean()\n return gamma\n\n\n@typechecked\ndef estimate_causal_influences(elements: list,\n objective_function: Callable,\n objective_function_params: Optional[dict] = None,\n multiprocessing_method: str = 'joblib',\n n_cores: int = -1,\n n_permutations: int = 1000,\n permutation_seed: Optional[int] = None,\n ) -> pd.DataFrame:\n \"\"\"\n Estimates the causal contribution (Shapley values) of each node on the rest of the network. Basically, this function\n performs MSA iteratively on each node and tracks the changes in the objective_function of the target node.\n For example we have a chain A -> B -> C, and we want to know how much A and B are contributing to C. We first need to\n define a metric for C (objective_function) which here let's say is the average activity of C. MSA then performs a\n multi-site lesioning analysis of A and B so for each we will end up with a number indicating their contributions to\n the average activity of C.\n\n VERY IMPORTANT NOTES:\n\n 1. The resulting causal contribution matrix does not necessarily reflect the connectome. In the example above\n there's no actual connection A -> C but there might be one in the causal contribution matrix since A is causally\n influencing C via B.\n 2. Think twice (even three times) about your objective function. The same everything will result in different\n causal contribution matrices depending on what are you tracking and how accurate it's capturing the effect of\n lesions. Also don't forget the edge-cases. There will be weird behaviors in your system, for example, what it\n does if every node is perturbed?\n 3. The metric you track is preferred to be non-negative and bounded (at least practically!)\n 4. Obviously this will take N times longer than a normal MSA with N is the number of nodes. So make sure your\n process is as fast as it can be for example use Numba and stuff, but you don't need to implement any parallel\n processes since it's already implemented here. Going below 1000 permutations might be an option depending on\n your specific case but based on experience, it's not a good idea \n 5. Shapley values sum up (or will be close) to the value of the intact coalition. So for example if the\n mean activity of node C here is 50 then causal_contribution_matrix.sum(axis=0) = 50 or close to 50. If not it\n means:\n 1. the number of permutations are not enough\n 2. there is randomness somewhere in the process\n 3. your objective function is not suitable\n\n\n Args:\n elements (list):\n List of the players (elements). Can be strings (names), integers (indicies), and tuples.\n\n objective_function (Callable):\n The game (in-silico experiment). It should get the complement set and return one numeric value\n either int or float.\n This function is just calling it as: objective_function(complement, **objective_function_params)\n\n An example using networkx with some tips:\n\n def lesion_me_senpai(complements, network, index):\n # note \"index\", your function should be able to track the effects on the target and the keyword for\n that is \"index\"\n\n if len(complements) == len(A)-1: # -1 since the target node is active\n return 0\n\n lesioned_network = deepcopy(network)\n for target in complements:\n lesioned_network[target] = 0 # setting all connections of the targets to 0\n\n activity = network.run(lesioned_network) # or really, whatever you want!\n return float(activity[index].mean())\n\n (you sometimes need to specify what should happen during edge-cases like an all-lesioned network)\n\n\n objective_function_params (Optional[Dict]):\n Kwargs for the objective_function. A dictionary pair of {'index': index} will be added to this during\n the process so your function can track the lesion effect.\n\n multiprocessing_method (str = 'joblib'):\n So far, two methods of parallelization is implemented, 'joblib' and 'ray' and the default method is joblib.\n If using ray tho, you need to decorate your objective function with @ray.remote decorator. Visit their\n documentations to see how to go for it.\n\n n_cores (int = -1):\n Number of parallel games. Default is -1, which means all cores so it can make the system\n freeze for a short period, if that happened then maybe go for -2, which means one msapy is\n left out. Or really just specify the number of threads you want to use!\n\n n_permutations (int = 1000):\n Number of permutations per node.\n Didn't check it systematically yet but just based on random explorations\n I'd say something around 1000 is enough.\n\n permutation_seed (Optional[int] = None):\n Sets the random seed of the sampling process. Default is None so if nothing is given every call results in\n a different orderings.\n\n Returns:\n causal_influences (pd.DataFrame)\n\n \"\"\"\n objective_function_params = objective_function_params if objective_function_params else {}\n\n # Initialize the stuff\n shapley_values = []\n # Looping through the nodes\n for index, element in enumerate(elements):\n print(f\"working on node number {index} from {len(elements)} nodes.\")\n objective_function_params['index'] = index\n\n # Takes the target out of the to_be_lesioned list\n without_target = set(elements).difference({element})\n\n shapley_value, contributions, _ = interface(n_permutations=n_permutations,\n elements=list(without_target),\n objective_function=objective_function,\n objective_function_params=objective_function_params,\n n_parallel_games=n_cores,\n multiprocessing_method=multiprocessing_method,\n random_seed=permutation_seed)\n\n _, multi_scores, is_timeseries = _get_contribution_type(\n contributions)\n\n if multi_scores:\n shapley_value = shapley_value.groupby(level=0).mean()\n elif is_timeseries:\n shapley_value = shapley_value.groupby(level=1).mean()\n else:\n shapley_value = shapley_value.mean()\n\n shapley_values.append(shapley_value)\n\n causal_influences = pd.concat(shapley_values, keys=elements) if (\n multi_scores or is_timeseries) else pd.DataFrame(shapley_values, columns=elements)\n return causal_influences.swaplevel().sort_index(level=0) if multi_scores else causal_influences\n\n\n@typechecked\ndef _process_timeseries_shapley(shapley_values: pd.DataFrame) -> pd.DataFrame:\n num_permutation, num_nodes = shapley_values.shape\n data = np.stack(shapley_values.values.flatten())\n num_timestamps = data.shape[-1]\n data = data.reshape(num_permutation, num_nodes, -1)\n data = data.transpose((0, 2, 1)).reshape((-1, num_nodes))\n\n shapley_values = pd.DataFrame(data=data,\n index=pd.MultiIndex.from_product(\n [range(num_permutation), range(num_timestamps)], names=[None, \"timestamp\"]),\n columns=shapley_values.columns\n )\n\n return shapley_values\n"
]
| [
[
"pandas.DataFrame",
"numpy.array",
"numpy.random.default_rng",
"pandas.concat"
]
]
|
foodwaze0/webapp | [
"897043cbbfdbad8d6c54f0556f31e4127d518fc1"
]
| [
"Lib/site-packages/tensorflow/python/ops/gen_manip_ops.py"
]
| [
"\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\r\nfrom tensorflow.python.util import dispatch as _dispatch\r\nfrom tensorflow.python.util.tf_export import tf_export\r\nfrom tensorflow.python.util.tf_export import kwarg_only as _kwarg_only\r\nfrom tensorflow.tools.docs import doc_controls as _doc_controls\r\n\r\n\r\ndef roll(input, shift, axis, name=None):\r\n r\"\"\"Rolls the elements of a tensor along an axis.\r\n\r\n The elements are shifted positively (towards larger indices) by the offset of\r\r\n `shift` along the dimension of `axis`. Negative `shift` values will shift\r\r\n elements in the opposite direction. Elements that roll passed the last position\r\r\n will wrap around to the first and vice versa. Multiple shifts along multiple\r\r\n axes may be specified.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [0, 1, 2, 3, 4]\r\r\n roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]\r\r\n \r\r\n # shifting along multiple dimensions\r\r\n # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\r\r\n roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]\r\r\n \r\r\n # shifting along the same axis multiple times\r\r\n # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\r\r\n roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n shift: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which\r\r\n elements are shifted positively (towards larger indices) along the dimension\r\r\n specified by `axis[i]`. Negative shifts will roll the elements in the opposite\r\r\n direction.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift\r\r\n `shift[i]` should occur. If the same axis is referenced more than once, the\r\r\n total shift for that axis will be the sum of all the shifts that belong to that\r\r\n axis.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context or _context.context()\r\n if _ctx is not None and _ctx._thread_local_data.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"Roll\",\r\n name, _ctx._post_execution_callbacks, input, shift, axis)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return roll_eager_fallback(\r\n input, shift, axis, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Roll\", input=input, shift=shift, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tshift\", _op.get_attr(\"Tshift\"), \"Taxis\",\r\n _op.get_attr(\"Taxis\"))\r\n _execute.record_gradient(\r\n \"Roll\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\ndef Roll(input, shift, axis, name=None):\r\n return roll(input=input, shift=shift, axis=axis, name=name)\r\nRoll.__doc__ = roll.__doc__\r\nRoll = _doc_controls.do_not_generate_docs(_kwarg_only(Roll))\r\ntf_export(\"raw_ops.Roll\")(Roll)\r\n\r\n\r\ndef roll_eager_fallback(input, shift, axis, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function roll\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tshift, (shift,) = _execute.args_to_matching_eager([shift], _ctx)\r\n _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx)\r\n _inputs_flat = [input, shift, axis]\r\n _attrs = (\"T\", _attr_T, \"Tshift\", _attr_Tshift, \"Taxis\", _attr_Taxis)\r\n _result = _execute.execute(b\"Roll\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Roll\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"Roll\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"shift\"\r\n# type_attr: \"Tshift\"\r\n# }\r\n# input_arg {\r\n# name: \"axis\"\r\n# type_attr: \"Taxis\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tshift\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"Taxis\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\ny\\n\\004Roll\\022\\n\\n\\005input\\\"\\001T\\022\\017\\n\\005shift\\\"\\006Tshift\\022\\r\\n\\004axis\\\"\\005Taxis\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\026\\n\\006Tshift\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\005Taxis\\022\\004type:\\006\\n\\0042\\002\\003\\t\")\r\n"
]
| [
[
"tensorflow.python.util.tf_export.kwarg_only",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.python.eager.execute.execute",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.util.tf_export.tf_export"
]
]
|
Crazy-Chick/fast-knn-nmt | [
"7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff"
]
| [
"fast_knn_nmt/data/utils.py"
]
| [
"# encoding: utf-8\n\"\"\"\n\n\n\n@desc: \n\n\"\"\"\n\nimport numpy as np\nimport math\nfrom fairseq.data import data_utils\nfrom fairseq.tasks.translation import TranslationTask\nfrom fairseq.data.language_pair_dataset import LanguagePairDataset\n\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nfrom typing import Tuple\n\nfrom fast_knn_nmt.utils.logger import get_logger\nfrom .path_utils import *\n\nLOGGING = get_logger(__name__)\n\n\ndef warmup_mmap_file(path, n=1000, verbose=True, use_log=True):\n megabytes = 1024 * 1024\n if (use_log):\n LOGGING.info(f\"Warming up file {path}\")\n total = math.floor(os.path.getsize(path)/megabytes)\n pbar = tqdm(total=total, desc=f\"Warm up\") if verbose else None\n with open(path, 'rb') as stream:\n while stream.read(n * megabytes):\n if pbar is not None:\n update = n\n if update + pbar.n > total:\n update = total - pbar.n\n pbar.update(update)\n\n\ndef count_chunk_freq(dataset, start, end, vocab_size) -> np.array:\n freq = np.zeros([vocab_size], dtype=np.int32)\n for sent_idx in range(start, end):\n src_ids = dataset[sent_idx]\n for token_idx in src_ids:\n freq[token_idx] += 1\n return freq\n\n\ndef get_token_freq(data_dir, mode, prefix, lang, dictionary=None, dataset=None, num_workers=1, max_sent=0) -> np.array:\n \"\"\"\n get token frequency\n Returns:\n token_freq: np.array of shape [num_tokens]\n \"\"\"\n cache_path = token_freq_path(data_dir, mode, lang, max_sent=max_sent)\n if os.path.exists(cache_path):\n LOGGING.info(f\"Use cached token freq from {cache_path}\")\n return np.load(cache_path, allow_pickle=True)\n\n dictionary = dictionary or TranslationTask.load_dictionary(dictionary_path(data_dir, lang))\n\n dataset = dataset or data_utils.load_indexed_dataset(\n fairseq_dataset_path(data_dir, mode, prefix, lang),\n dictionary\n )\n max_sent = min(max_sent, len(dataset)) if max_sent else len(dataset)\n freq = np.zeros([len(dictionary)], dtype=np.int32)\n if num_workers == 1:\n for sent_idx in tqdm(range(max_sent), desc=\"Counting token frequencies\"):\n src_ids = dataset[sent_idx]\n for token_idx in src_ids:\n freq[token_idx] += 1\n else:\n pool = Pool(processes=num_workers)\n results = []\n chunk_size = max_sent // num_workers\n offset = 0\n for worker_id in range(num_workers):\n results.append(\n pool.apply_async(\n count_chunk_freq,\n (dataset,\n offset,\n offset + chunk_size if worker_id < num_workers-1 else len(dataset),\n len(dictionary),\n ),\n )\n )\n offset += chunk_size\n pool.close()\n pool.join()\n for r in results:\n freq += r.get()\n\n np.save(cache_path, freq)\n\n return freq\n\n\ndef load_token_2d_offsets(data_dir, mode, prefix, lang, freq=None, dictionary=None, dataset=None, all=False, max_sent=0):\n \"\"\"\n build or load cached token 2d offsets\n Returns:\n token_2d_offsets:\n if all=False, it is a list of token offsets, grouped by token idx.\n token_2d_offsets[token_idx] is an array of shape [token_freq, 2],\n which contains the sentence indexes and intra-sentence offsets where token_idx appears in dataset\n if all = True, it is an array of shape [num_tokens, 2]\n \"\"\"\n cache_file = token_2d_offsets_path(data_dir, mode, lang, all_tokens=all, max_sent=max_sent)\n if os.path.exists(cache_file):\n LOGGING.info(f\"Loading token 2d-offsets from {cache_file}\")\n token_2d_offsets = np.load(cache_file, allow_pickle=True)\n return token_2d_offsets\n\n dictionary = dictionary or TranslationTask.load_dictionary(dictionary_path(data_dir, lang))\n dataset = dataset or data_utils.load_indexed_dataset(\n fairseq_dataset_path(data_dir, mode, prefix, lang),\n dictionary\n )\n max_sent = min(max_sent, len(dataset)) if max_sent else len(dataset)\n if not all:\n freq = freq if freq is not None else get_token_freq(data_dir, mode, prefix, lang, dictionary, dataset,\n num_workers=os.cpu_count(), max_sent=max_sent)\n token_2d_offsets = [np.zeros([freq[idx], 2], dtype=np.int32) for idx in range(len(dictionary))]\n fill_offsets = np.zeros([len(dictionary)], dtype=np.int32)\n offset = 0\n for sent_idx in tqdm(range(max_sent), desc=\"Gathering token offsets\"):\n src_ids = dataset[sent_idx]\n for intra_offset, token_idx in enumerate(src_ids):\n fill_offset = fill_offsets[token_idx]\n if fill_offset >= freq[token_idx]:\n LOGGING.warn(f\"token count of {token_idx} exceeds argument freq {freq[token_idx]}, ignore it\")\n continue\n token_2d_offsets[token_idx][fill_offset][0] = sent_idx\n token_2d_offsets[token_idx][fill_offset][1] = intra_offset\n fill_offsets[token_idx] += 1\n offset += len(src_ids)\n else:\n num_tokens = np.sum(dataset.sizes)\n token_2d_offsets = np.zeros([num_tokens, 2], dtype=np.int32)\n offset = 0\n for sent_idx in tqdm(range(max_sent), desc=\"Gathering token offsets\"):\n for token_idx in range(len(dataset[sent_idx])):\n token_2d_offsets[offset][0] = sent_idx\n token_2d_offsets[offset][1] = token_idx\n offset += 1\n\n np.save(cache_file, token_2d_offsets)\n LOGGING.info(f\"Saved token 2d-offsets to {cache_file}\")\n return token_2d_offsets\n\n\ndef compute_range_aligns(dataset: LanguagePairDataset, start: int, end: int, pid=0) -> Tuple[np.array, np.array]:\n start = max(0, start)\n end = min(end, len(dataset))\n\n align_dataset = dataset.align_dataset\n\n token_aligns_num = np.sum(align_dataset.sizes[start: end])\n assert token_aligns_num % 2 == 0\n token_aligns_num = token_aligns_num // 2\n token_aligns = np.zeros([token_aligns_num], dtype=np.int64)\n\n num_tokens = np.sum(dataset.src_sizes[start: end])\n token_align_offsets = np.zeros([num_tokens, 2], dtype=np.int64)\n\n offset_idx = 0\n align_idx = 0\n iterator = tqdm(range(start, end), desc=\"Computing align array\", ) if pid == 0 else range(start, end)\n for sent_idx in iterator:\n aligns = align_dataset[sent_idx].reshape(-1, 2)\n src_len = dataset.src_sizes[sent_idx]\n\n prev_src = -1\n prev_start = -1\n prev_end = -1\n for i in range(len(aligns)):\n s = aligns[i][0]\n t = aligns[i][1]\n if s != prev_src:\n if prev_src != -1:\n token_align_offsets[offset_idx] = [prev_start, prev_end]\n offset_idx += 1\n for j in range(prev_src + 1, s):\n token_align_offsets[offset_idx] = [prev_end, prev_end]\n offset_idx += 1\n prev_src = s\n prev_start = align_idx\n prev_end = align_idx + 1\n else:\n prev_end += 1\n\n token_aligns[align_idx] = t\n align_idx += 1\n\n token_align_offsets[offset_idx] = [prev_start, prev_end]\n offset_idx += 1\n for j in range(prev_src + 1, src_len):\n token_align_offsets[offset_idx] = [prev_end, prev_end]\n offset_idx += 1\n return token_aligns, token_align_offsets\n\n\ndef get_aligns(data_dir: str, subset: str = \"train\", dataset: LanguagePairDataset = None, workers: int = 1) -> Tuple[np.array, np.array]:\n \"\"\"\n Args:\n data_dir: path to indexed src/align data\n subset: train/valid/test\n dataset: LanguagePairDataset\n workers: cpu cores to build array\n\n Returns:\n token_aligns: [num_aligns]\n token_align_offsets: [num_tokens, 2], each token's start to end aligns in token_aligns\n\n \"\"\"\n cache_file = align_path(data_dir=data_dir, mode=subset)\n if os.path.exists(cache_file):\n LOGGING.info(f\"Loading aligns numpy array from {cache_file}\")\n file = np.load(cache_file)\n token_aligns, token_align_offsets = file[\"aligns\"], file[\"offsets\"]\n return token_aligns, token_align_offsets\n\n if workers <= 1:\n token_aligns, token_align_offsets = compute_range_aligns(dataset, start=0, end=len(dataset))\n else:\n results = []\n pool = Pool(workers)\n chunk_size = math.ceil(len(dataset) / workers)\n for worker_idx in range(workers):\n start = worker_idx * chunk_size\n end = start + chunk_size\n results.append(pool.apply_async(\n func=compute_range_aligns,\n args=(dataset, start, end, worker_idx)\n ))\n pool.close()\n pool.join()\n\n token_aligns_num = np.sum(dataset.align_dataset.sizes) // 2\n token_aligns = np.zeros([token_aligns_num], dtype=np.int64)\n num_tokens = np.sum(dataset.src_sizes)\n token_align_offsets = np.zeros([num_tokens, 2], dtype=np.int64)\n\n align_idx = 0\n offset_idx = 0\n for r in results:\n chunk_aligns, chunk_offsets = r.get()\n token_align_offsets[offset_idx: offset_idx + len(chunk_offsets)] = chunk_offsets + align_idx\n offset_idx += len(chunk_offsets)\n token_aligns[align_idx: align_idx + len(chunk_aligns)] = chunk_aligns\n align_idx += len(chunk_aligns)\n\n LOGGING.info(f\"Saving align numpy array to {cache_file}\")\n np.savez(cache_file, aligns=token_aligns, offsets=token_align_offsets)\n return token_aligns, token_align_offsets\n\n"
]
| [
[
"numpy.zeros",
"numpy.sum",
"numpy.load",
"numpy.save",
"numpy.savez"
]
]
|
cumttang/ray | [
"eb1e5fa2cf26233701ccbda3eb8a301ecd418d8c"
]
| [
"python/ray/tune/examples/tune_mnist_keras.py"
]
| [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport argparse\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nimport ray\nfrom ray import tune\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\n\n\nclass TuneCallback(keras.callbacks.Callback):\n def __init__(self, reporter, logs={}):\n self.reporter = reporter\n self.iteration = 0\n\n def on_train_end(self, epoch, logs={}):\n self.reporter(\n timesteps_total=self.iteration, done=1, mean_accuracy=logs[\"acc\"])\n\n def on_batch_end(self, batch, logs={}):\n self.iteration += 1\n self.reporter(\n timesteps_total=self.iteration, mean_accuracy=logs[\"acc\"])\n\n\ndef train_mnist(args, cfg, reporter):\n # We set threads here to avoid contention, as Keras\n # is heavily parallelized across multiple cores.\n K.set_session(\n K.tf.Session(\n config=K.tf.ConfigProto(\n intra_op_parallelism_threads=args.threads,\n inter_op_parallelism_threads=args.threads)))\n vars(args).update(cfg)\n batch_size = 128\n num_classes = 10\n epochs = 12\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n model = Sequential()\n model.add(\n Conv2D(\n 32,\n kernel_size=(args.kernel1, args.kernel1),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv2D(64, (args.kernel2, args.kernel2), activation='relu'))\n model.add(MaxPooling2D(pool_size=(args.poolsize, args.poolsize)))\n model.add(Dropout(args.dropout1))\n model.add(Flatten())\n model.add(Dense(args.hidden, activation='relu'))\n model.add(Dropout(args.dropout2))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=args.lr, momentum=args.momentum),\n metrics=['accuracy'])\n\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=0,\n validation_data=(x_test, y_test),\n callbacks=[TuneCallback(reporter)])\n\n\ndef create_parser():\n parser = argparse.ArgumentParser(description='Keras MNIST Example')\n parser.add_argument(\n \"--smoke-test\", action=\"store_true\", help=\"Finish quickly for testing\")\n parser.add_argument(\n \"--use-gpu\", action=\"store_true\", help=\"Use GPU in training.\")\n parser.add_argument(\n '--jobs',\n type=int,\n default=1,\n help='number of jobs to run concurrently (default: 1)')\n parser.add_argument(\n '--threads',\n type=int,\n default=2,\n help='threads used in operations (default: 2)')\n parser.add_argument(\n '--steps',\n type=float,\n default=0.01,\n metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument(\n '--lr',\n type=float,\n default=0.01,\n metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument(\n '--momentum',\n type=float,\n default=0.5,\n metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument(\n '--kernel1',\n type=int,\n default=3,\n help='Size of first kernel (default: 3)')\n parser.add_argument(\n '--kernel2',\n type=int,\n default=3,\n help='Size of second kernel (default: 3)')\n parser.add_argument(\n '--poolsize', type=int, default=2, help='Size of Pooling (default: 2)')\n parser.add_argument(\n '--dropout1',\n type=float,\n default=0.25,\n help='Size of first kernel (default: 0.25)')\n parser.add_argument(\n '--hidden',\n type=int,\n default=128,\n help='Size of Hidden Layer (default: 128)')\n parser.add_argument(\n '--dropout2',\n type=float,\n default=0.5,\n help='Size of first kernel (default: 0.5)')\n return parser\n\n\nif __name__ == '__main__':\n parser = create_parser()\n args = parser.parse_args()\n mnist.load_data() # we do this because it's not threadsafe\n\n ray.init()\n sched = AsyncHyperBandScheduler(\n time_attr=\"timesteps_total\",\n reward_attr=\"mean_accuracy\",\n max_t=400,\n grace_period=20)\n tune.register_trainable(\"train_mnist\",\n lambda cfg, rprtr: train_mnist(args, cfg, rprtr))\n tune.run_experiments(\n {\n \"exp\": {\n \"stop\": {\n \"mean_accuracy\": 0.99,\n \"timesteps_total\": 10 if args.smoke_test else 300\n },\n \"run\": \"train_mnist\",\n \"num_samples\": 1 if args.smoke_test else 10,\n \"resources_per_trial\": {\n \"cpu\": args.threads,\n \"gpu\": 0.5 if args.use_gpu else 0\n },\n \"config\": {\n \"lr\": tune.sample_from(\n lambda spec: np.random.uniform(0.001, 0.1)),\n \"momentum\": tune.sample_from(\n lambda spec: np.random.uniform(0.1, 0.9)),\n \"hidden\": tune.sample_from(\n lambda spec: np.random.randint(32, 512)),\n \"dropout1\": tune.sample_from(\n lambda spec: np.random.uniform(0.2, 0.8)),\n }\n }\n },\n verbose=0,\n scheduler=sched)\n"
]
| [
[
"numpy.random.uniform",
"numpy.random.randint"
]
]
|
dodgejesse/bert_on_stilts | [
"63884f37f519fd1d6eafde43ba213a25a5575a82"
]
| [
"glue/runners.py"
]
| [
"import collections as col\nimport logging\nimport numpy as np\nfrom tqdm import tqdm, trange\nimport sys\n\nimport copy\n\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom .core import InputFeatures, Batch, InputExample, TokenizedExample\nfrom .evaluate import compute_metrics\nfrom pytorch_pretrained_bert.utils import truncate_seq_pair\nfrom shared.runners import warmup_linear\n\nfrom data_sampler import OrderedSampler\n\nlogger = logging.getLogger(__name__)\n\n\nclass LabelModes:\n CLASSIFICATION = \"CLASSIFICATION\"\n REGRESSION = \"REGRESSION\"\n\n\nclass TrainEpochState:\n def __init__(self):\n self.tr_loss = 0\n self.global_step = 0\n self.nb_tr_examples = 0\n self.nb_tr_steps = 0\n\n\ndef tokenize_example(example, tokenizer):\n tokens_a = tokenizer.tokenize(example.text_a)\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n else:\n tokens_b = example.text_b\n return TokenizedExample(\n guid=example.guid,\n tokens_a=tokens_a,\n tokens_b=tokens_b,\n label=example.label,\n )\n\n\ndef convert_example_to_feature(example, tokenizer, max_seq_length, label_map):\n if isinstance(example, InputExample):\n example = tokenize_example(example, tokenizer)\n\n tokens_a, tokens_b = example.tokens_a, example.tokens_b\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if is_null_label_map(label_map):\n label_id = example.label\n else:\n label_id = label_map[example.label]\n return InputFeatures(\n guid=example.guid,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n tokens=tokens,\n )\n\n\ndef convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, verbose=True):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n features = []\n for (ex_index, example) in enumerate(examples):\n feature_instance = convert_example_to_feature(\n example=example,\n tokenizer=tokenizer,\n max_seq_length=max_seq_length,\n label_map=label_map,\n )\n if verbose and ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % example.guid)\n logger.info(\"tokens: %s\" % \" \".join([str(x) for x in feature_instance.tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in feature_instance.input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in feature_instance.input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in feature_instance.segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, feature_instance.label_id))\n\n features.append(feature_instance)\n return features\n\n\ndef convert_to_dataset(features, label_mode):\n full_batch = features_to_data(features, label_mode=label_mode)\n if full_batch.label_ids is None:\n dataset = TensorDatasetWithIndex(full_batch.input_ids, full_batch.input_mask,\n full_batch.segment_ids)\n else:\n dataset = TensorDatasetWithIndex(full_batch.input_ids, full_batch.input_mask,\n full_batch.segment_ids, full_batch.label_ids)\n return dataset, full_batch.tokens\n\n\ndef features_to_data(features, label_mode):\n if label_mode == LabelModes.CLASSIFICATION:\n label_type = torch.long\n elif label_mode == LabelModes.REGRESSION:\n label_type = torch.float\n else:\n raise KeyError(label_mode)\n return Batch(\n input_ids=torch.tensor([f.input_ids for f in features], dtype=torch.long),\n input_mask=torch.tensor([f.input_mask for f in features], dtype=torch.long),\n segment_ids=torch.tensor([f.segment_ids for f in features], dtype=torch.long),\n label_ids=torch.tensor([f.label_id for f in features], dtype=label_type),\n tokens=[f.tokens for f in features],\n )\n\nclass TensorDatasetWithIndex(TensorDataset):\n \"\"\"\n Returns the index along with the example\n \"\"\"\n\n def __init__(self, *tensors):\n super().__init__(*tensors)\n\n def __getitem__(self, index):\n data_to_return = tuple(tensor[index] for tensor in self.tensors)\n return [data_to_return,index]\n\n\nclass HybridLoader:\n def __init__(self, dataloader, tokens):\n self.dataloader = dataloader\n self.tokens = tokens\n\n def __iter__(self):\n batch_size = self.dataloader.batch_size\n for i, batch_with_indices in enumerate(self.dataloader):\n batch, indices = batch_with_indices\n if len(batch) == 4:\n input_ids, input_mask, segment_ids, label_ids = batch\n elif len(batch) == 3:\n input_ids, input_mask, segment_ids = batch\n label_ids = None\n else:\n raise RuntimeError()\n batch_tokens = self.tokens[i * batch_size: (i+1) * batch_size]\n yield Batch(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids,\n tokens=batch_tokens,\n indices=indices,\n )\n\n def __len__(self):\n return len(self.dataloader)\n\n\nclass RunnerParameters:\n def __init__(self, max_seq_length, local_rank, n_gpu, fp16,\n learning_rate, gradient_accumulation_steps, t_total, warmup_proportion,\n num_train_epochs, train_batch_size, eval_batch_size):\n self.max_seq_length = max_seq_length\n self.local_rank = local_rank\n self.n_gpu = n_gpu\n self.fp16 = fp16\n self.learning_rate = learning_rate\n self.gradient_accumulation_steps = gradient_accumulation_steps\n self.t_total = t_total\n self.warmup_proportion = warmup_proportion\n self.num_train_epochs = num_train_epochs\n self.train_batch_size = train_batch_size\n self.eval_batch_size = eval_batch_size\n\n\nclass GlueTaskRunner:\n def __init__(self, model, optimizer, tokenizer, label_list, device, rparams):\n self.model = model\n self.optimizer = optimizer\n self.tokenizer = tokenizer\n self.label_list = label_list\n self.label_map = {v: i for i, v in enumerate(label_list)}\n self.device = device\n self.rparams = rparams\n\n def run_train(self, train_examples, verbose=True):\n if verbose:\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", self.rparams.train_batch_size)\n logger.info(\" Num steps = %d\", self.rparams.t_total)\n train_dataloader = self.get_train_dataloader(train_examples, verbose=verbose)\n\n for _ in trange(int(self.rparams.num_train_epochs), desc=\"Epoch\"):\n self.run_train_epoch(train_dataloader)\n\n def run_train_val(self, train_examples, val_examples, task_name, eval_during_train=False, data_order_seed=None):\n epoch_result_dict = col.OrderedDict()\n\n if data_order_seed is not None:\n data_iterators = self.get_data_order(train_examples, data_order_seed, int(self.rparams.num_train_epochs))\n \n for i in trange(int(self.rparams.num_train_epochs), desc=\"Epoch\"):\n if data_order_seed is not None:\n train_dataloader = self.get_train_dataloader(train_examples, verbose=False,\n data_order=data_iterators[i])\n else:\n train_dataloader = self.get_train_dataloader(train_examples, verbose=False)\n\n #self.run_train_epoch(train_dataloader)\n\n if eval_during_train:\n during_train_results = self.run_train_epoch_and_val(train_dataloader, val_examples, task_name, i)\n else:\n self.run_train_epoch(train_dataloader)\n during_train_results = None\n \n epoch_result = self.run_val(val_examples, task_name, verbose=False)\n print(\"validation performance after epoch {}: {}\".format(i,epoch_result[\"metrics\"]))\n del epoch_result[\"logits\"]\n del epoch_result[\"labels\"]\n epoch_result_dict[i] = [during_train_results, epoch_result]\n\n return epoch_result_dict\n\n def run_train_epoch(self, train_dataloader):\n for _ in self.run_train_epoch_context(train_dataloader):\n pass\n\n def run_train_epoch_and_val(self, train_dataloader, val_examples, task_name, epoch_num):\n train_progress = col.OrderedDict()\n for results in self.run_train_epoch_context(train_dataloader):\n\n step, batch, train_epoch_state = results\n \n # evaluate the first 20 steps, then every 10 steps (10 times), then every 100 steps\n \n tenth_of_epoch = min(int(len(train_dataloader)/10), 100)\n tenth_of_tenth_of_epoch = tenth_of_epoch / 10\n if ((step < 50 or\n (step < 300 and step % tenth_of_tenth_of_epoch == 0)) and\n epoch_num == 0) or step % tenth_of_epoch == 0:\n cur_val_result = self.run_val(val_examples, task_name, verbose=False)\n del cur_val_result[\"logits\"]\n del cur_val_result[\"labels\"]\n else:\n cur_val_result = None\n deepcopy_of_train_state = copy.deepcopy(train_epoch_state)\n train_progress[step] = [deepcopy_of_train_state, cur_val_result, batch.indices]\n\n return train_progress\n \n\n def run_train_epoch_context(self, train_dataloader):\n self.model.train()\n train_epoch_state = TrainEpochState()\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Training\",file=sys.stdout)):\n #import pdb; pdb.set_trace()\n self.run_train_step(\n step=step,\n batch=batch,\n train_epoch_state=train_epoch_state,\n )\n yield step, batch, train_epoch_state\n\n def run_train_step(self, step, batch, train_epoch_state):\n batch = batch.to(self.device)\n loss = self.model(batch.input_ids, batch.segment_ids, batch.input_mask, batch.label_ids)\n if self.rparams.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if self.rparams.gradient_accumulation_steps > 1:\n loss = loss / self.rparams.gradient_accumulation_steps\n if self.rparams.fp16:\n self.optimizer.backward(loss)\n else:\n loss.backward()\n\n train_epoch_state.tr_loss += loss.item()\n train_epoch_state.nb_tr_examples += batch.input_ids.size(0)\n train_epoch_state.nb_tr_steps += 1\n if (step + 1) % self.rparams.gradient_accumulation_steps == 0:\n # modify learning rate with special warm up BERT uses\n lr_this_step = self.rparams.learning_rate * warmup_linear(\n train_epoch_state.global_step / self.rparams.t_total, self.rparams.warmup_proportion)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr_this_step\n self.optimizer.step()\n self.optimizer.zero_grad()\n train_epoch_state.global_step += 1\n\n def run_val(self, val_examples, task_name, verbose=True):\n val_dataloader = self.get_eval_dataloader(val_examples, verbose=verbose)\n self.model.eval()\n total_eval_loss = 0\n nb_eval_steps, nb_eval_examples = 0, 0\n all_logits = []\n all_labels = []\n for step, batch in enumerate(tqdm(val_dataloader, desc=\"Evaluating (Val)\", file=sys.stdout)):\n batch = batch.to(self.device)\n\n with torch.no_grad():\n tmp_eval_loss = self.model(batch.input_ids, batch.segment_ids,\n batch.input_mask, batch.label_ids)\n logits = self.model(batch.input_ids, batch.segment_ids, batch.input_mask)\n label_ids = batch.label_ids.cpu().numpy()\n\n logits = logits.detach().cpu().numpy()\n total_eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_examples += batch.input_ids.size(0)\n nb_eval_steps += 1\n all_logits.append(logits)\n all_labels.append(label_ids)\n eval_loss = total_eval_loss / nb_eval_steps\n all_logits = np.concatenate(all_logits, axis=0)\n all_labels = np.concatenate(all_labels, axis=0)\n\n return {\n \"logits\": all_logits,\n \"loss\": eval_loss,\n \"metrics\": compute_task_metrics(task_name, all_logits, all_labels),\n \"labels\": all_labels\n }\n\n def run_test(self, test_examples, verbose=True):\n test_dataloader = self.get_eval_dataloader(test_examples, verbose=verbose)\n self.model.eval()\n all_logits = []\n for step, batch in enumerate(tqdm(test_dataloader, desc=\"Predictions (Test)\")):\n batch = batch.to(self.device)\n with torch.no_grad():\n logits = self.model(batch.input_ids, batch.segment_ids, batch.input_mask)\n logits = logits.detach().cpu().numpy()\n all_logits.append(logits)\n all_logits = np.concatenate(all_logits, axis=0)\n return all_logits\n\n def get_data_order(self, train_examples, data_order_seed, num_train_epochs):\n train_features = convert_examples_to_features(\n train_examples, self.label_map, self.rparams.max_seq_length, self.tokenizer,\n verbose=False,\n )\n train_data, train_tokens = convert_to_dataset(\n train_features, label_mode=get_label_mode(self.label_map),\n )\n \n iterators = []\n train_sampler = RandomSampler(train_data)\n print(\"Setting data order seed to {}\".format(data_order_seed))\n torch.manual_seed(data_order_seed)\n for epoch in range(num_train_epochs):\n iterators.append(train_sampler.__iter__())\n\n return iterators\n\n def debug_data_order_seed(self, train_sampler):\n # to debug setting the seed for the data order.\n # if the seed is set, and interators are created,\n # they will retain their order even if other samples are drawn\n if True:\n torch.manual_seed(1234)\n\n order = train_sampler.__iter__()\n for index in order:\n print(index, end=\", \")\n print(\"\")\n #torch.manual_seed(1234)\n\n order = train_sampler.__iter__()\n for index in order:\n print(index, end=\", \")\n print(\"\")\n torch.manual_seed(1234)\n\n order = train_sampler.__iter__()\n for index in order:\n print(index, end=\", \")\n print(\"\")\n\n data_iterators = self.get_data_order(train_examples, data_order_seed=1234, num_train_epochs = 3)\n counter = 0\n for index in data_iterators[0]:\n print(index, end=\", \")\n if counter == 5:\n break\n counter += 1\n print(\"\")\n counter = 0\n for index in data_iterators[1]:\n print(index, end=\", \")\n if counter == 5:\n break\n counter += 1 \n print(\"\")\n for index in data_iterators[0]:\n print(index, end=\", \")\n print(\"\")\n for index in data_iterators[1]:\n print(index, end=\", \")\n print(\"\")\n \n\n import pdb; pdb.set_trace()\n\n def get_train_dataloader(self, train_examples, verbose=True, data_order=None):\n train_features = convert_examples_to_features(\n train_examples, self.label_map, self.rparams.max_seq_length, self.tokenizer,\n verbose=verbose,\n )\n train_data, train_tokens = convert_to_dataset(\n train_features, label_mode=get_label_mode(self.label_map),\n )\n\n if self.rparams.local_rank == -1:\n if data_order is None:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = OrderedSampler(train_examples, data_order)\n else:\n train_sampler = DistributedSampler(train_data)\n assert False\n\n\n if False:\n debug_data_order_seed()\n \n train_dataloader = DataLoader(\n train_data, sampler=train_sampler, batch_size=self.rparams.train_batch_size,\n )\n\n # DEBUG\n if False:\n to_return = HybridLoader(train_dataloader, train_tokens)\n import pdb; pdb.set_trace()\n for step, batch in enumerate(to_return):\n print(batch.tokens)\n\n \n print(\"Problem! the batch.tokens (from HybridLoader) dont change when we change the seed.\")\n print(\"This is likely because of the mismatch between the RandomSampler or OrderedSampler and \", end=\"\")\n print(\"the way the convert_examples_to_features and convert_to_dataset functions work.\")\n print(\"It's possible this isn't actually a problem.\")\n\n\n\n \n \n return HybridLoader(train_dataloader, train_tokens)\n\n def get_eval_dataloader(self, eval_examples, verbose=True):\n eval_features = convert_examples_to_features(\n eval_examples, self.label_map, self.rparams.max_seq_length, self.tokenizer,\n verbose=verbose,\n )\n eval_data, eval_tokens = convert_to_dataset(\n eval_features, label_mode=get_label_mode(self.label_map),\n )\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(\n eval_data, sampler=eval_sampler, batch_size=self.rparams.eval_batch_size,\n )\n return HybridLoader(eval_dataloader, eval_tokens)\n\n\ndef compute_task_metrics(task_name, logits, labels):\n if logits.shape[1] == 1:\n pred_arr = logits.reshape(-1)\n else:\n pred_arr = np.argmax(logits, axis=1)\n return compute_metrics(\n task_name=task_name,\n pred_srs=pred_arr,\n label_srs=labels,\n )\n\n\ndef is_null_label_map(label_map):\n return len(label_map) == 1 and label_map[None] == 0\n\n\ndef get_label_mode(label_map):\n if is_null_label_map(label_map):\n return LabelModes.REGRESSION\n else:\n return LabelModes.CLASSIFICATION\n"
]
| [
[
"numpy.concatenate",
"torch.utils.data.RandomSampler",
"torch.no_grad",
"torch.utils.data.SequentialSampler",
"torch.manual_seed",
"numpy.argmax",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.utils.data.distributed.DistributedSampler"
]
]
|
ivankreso/LDN | [
"76740ef77fcec851f8abc2380251a9491dc0cdc3"
]
| [
"losses.py"
]
| [
"import torch\nimport torch.nn.functional as F\n\n\ndef segmentation_loss(logits, aux_logits, batch, aux_loss_weight, ignore_index=-1,\n equal_level_weights=False):\n if len(aux_logits) > 0:\n main_wgt = 1 - aux_loss_weight\n else:\n main_wgt = 1\n\n num_classes = logits.shape[1]\n labels = batch['labels']\n log_softmax = F.log_softmax(logits, dim=1)\n loss_val = F.nll_loss(log_softmax, labels, ignore_index=ignore_index)\n loss = main_wgt * loss_val\n separated_losses = [loss_val.detach()]\n # if self.args.class_balancing:\n # loss = main_wgt * F.nll_loss(log_softmax, labels, weight=self.dataset.class_weights,\n # ignore_index=self.dataset.ignore_id)\n # else:\n # loss = main_wgt * F.nll_loss(log_softmax, labels, ignore_index=self.dataset.ignore_id)\n\n if len(aux_logits) > 0:\n aux_targets = batch['aux_targets']\n aux_valid_masks = batch['aux_valid_mask']\n if equal_level_weights:\n aux_wgt = aux_loss_weight / len(aux_logits)\n else:\n aux_loss = []\n for i in range(len(aux_logits)):\n target_dist = aux_targets[i].reshape(-1, num_classes).cuda(non_blocking=True)\n valid_mask = aux_valid_masks[i].reshape(-1, 1).cuda(non_blocking=True)\n logits_1d = aux_logits[i].permute(0,2,3,1).contiguous().reshape(-1, num_classes)\n if equal_level_weights:\n loss_val = softmax_cross_entropy_with_ignore(logits_1d, target_dist, valid_mask)\n loss += aux_wgt * loss_val\n separated_losses.append(loss_val.detach())\n else:\n level_loss = softmax_cross_entropy_with_ignore(\n logits_1d, target_dist, valid_mask, average=False)\n aux_loss.append(level_loss)\n if not equal_level_weights:\n loss += aux_loss_weight * torch.mean(torch.cat(aux_loss, dim=0))\n return loss, separated_losses"
]
| [
[
"torch.nn.functional.nll_loss",
"torch.cat",
"torch.nn.functional.log_softmax"
]
]
|
3288103265/pytorch-lightning-template | [
"5c750db9b5a9c2e483cf620f0cd819e8117f6436"
]
| [
"model/resnet_generator_128.py"
]
| [
"import math\nfrom model.relation_transformer import Encoder, get_rel_encoder_layer\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .norm_module import *\nfrom .mask_regression import *\n# from .sync_batchnorm import SynchronizedBatchNorm2d\nfrom torch.nn import SyncBatchNorm as SynchronizedBatchNorm2d\n# from .transformer import attention\n\nBatchNorm = SynchronizedBatchNorm2d\n\n\nclass ResnetGenerator128(nn.Module):\n def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3, use_trans_enc=False):\n super(ResnetGenerator128, self).__init__()\n self.num_classes = num_classes\n self.use_trans_enc = use_trans_enc\n if use_trans_enc:\n self.label_dim = 180\n layer_norm_eps = 1e-5\n # self.encoder_layer = TransformerEncoderLayer(d_model=self.label_dim, nhead=4, dim_feedforward=512)\n self.encoder_layer = get_rel_encoder_layer(\n d_model=self.label_dim, nhead=4, d_ff=512)\n\n # self.encoder_norm = nn.LayerNorm(self.label_dim, eps=layer_norm_eps)\n self.transformer_encoder = Encoder(\n self.encoder_layer, N=1)\n self.label_embedding = nn.Embedding(num_classes, 180)\n\n num_w = 128+180\n self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*16*ch))\n\n self.res1 = ResBlock(ch*16, ch*16, upsample=True, num_w=num_w)\n self.res2 = ResBlock(ch*16, ch*8, upsample=True, num_w=num_w)\n self.res3 = ResBlock(ch*8, ch*4, upsample=True, num_w=num_w)\n self.res4 = ResBlock(ch*4, ch*2, upsample=True,\n num_w=num_w, psp_module=True)\n self.res5 = ResBlock(ch*2, ch*1, upsample=True,\n num_w=num_w, predict_mask=False)\n self.final = nn.Sequential(BatchNorm(ch),\n nn.ReLU(),\n conv2d(ch, output_dim, 3, 1, 1),\n nn.Tanh())\n\n # mapping function\n mapping = list()\n self.mapping = nn.Sequential(*mapping)\n\n self.alpha1 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha2 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha3 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha4 = nn.Parameter(torch.zeros(1, 184, 1))\n\n self.sigmoid = nn.Sigmoid()\n\n self.mask_regress = MaskRegressNetv2(num_w)\n self.init_parameter()\n\n# TODO: design mask.\n# TODO: mask and attention score.\n def forward(self, z, bbox, z_im=None, y=None, src_mask=None):\n # y is label: (bs, 8), embed->(bs, 8, 180)\n # src_mask is sequence mask for varied lengh sequence.\n b, o = z.size(0), z.size(1)\n label_embedding = self.label_embedding(y)\n if self.use_trans_enc:\n label_embedding = self.label_embedding(\n y) * math.sqrt(self.label_dim) # batch_size*8*180\n # add transformer and self-attention.\n label_embedding = self.transformer_encoder(\n label_embedding, bbox, mask=src_mask)\n\n z = z.view(b * o, -1)\n # label_embedding = label_embedding.view(b * o, -1)\n label_embedding = label_embedding.reshape(b*o, -1)\n\n latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)\n\n w = self.mapping(latent_vector.view(b * o, -1))\n # preprocess bbox\n bmask = self.mask_regress(w, bbox)\n\n if z_im is None:\n z_im = torch.randn((b, 128), device=z.device)\n\n bbox_mask_ = bbox_mask(z, bbox, 64, 64)\n\n # 4x4\n x = self.fc(z_im).view(b, -1, 4, 4)\n # 8x8\n x, stage_mask = self.res1(x, w, bmask)\n\n # 16x16\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n alpha1 = torch.gather(self.sigmoid(self.alpha1).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha1) + seman_bbox * alpha1\n x, stage_mask = self.res2(x, w, stage_bbox)\n\n # 32x32\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n alpha2 = torch.gather(self.sigmoid(self.alpha2).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha2) + seman_bbox * alpha2\n x, stage_mask = self.res3(x, w, stage_bbox)\n\n # 64x64\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n alpha3 = torch.gather(self.sigmoid(self.alpha3).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha3) + seman_bbox * alpha3\n x, stage_mask = self.res4(x, w, stage_bbox)\n\n # 128x128\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n alpha4 = torch.gather(self.sigmoid(self.alpha4).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha4) + seman_bbox * alpha4\n x, _ = self.res5(x, w, stage_bbox)\n\n # to RGB\n x = self.final(x)\n return x\n\n def init_parameter(self):\n for k in self.named_parameters():\n if k[1].dim() > 1:\n torch.nn.init.orthogonal_(k[1])\n if k[0][-4:] == 'bias':\n torch.nn.init.constant_(k[1], 0)\n\n\nclass ResnetGenerator256(nn.Module):\n def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):\n super(ResnetGenerator256, self).__init__()\n self.num_classes = num_classes\n\n self.label_embedding = nn.Embedding(num_classes, 180)\n\n num_w = 128+180\n self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*16*ch))\n\n self.res1 = ResBlock(ch*16, ch*16, upsample=True, num_w=num_w)\n self.res2 = ResBlock(ch*16, ch*8, upsample=True, num_w=num_w)\n self.res3 = ResBlock(ch*8, ch*8, upsample=True, num_w=num_w)\n self.res4 = ResBlock(ch*8, ch*4, upsample=True, num_w=num_w)\n self.res5 = ResBlock(ch*4, ch*2, upsample=True, num_w=num_w)\n self.res6 = ResBlock(ch*2, ch*1, upsample=True,\n num_w=num_w, predict_mask=False)\n self.final = nn.Sequential(BatchNorm(ch),\n nn.ReLU(),\n conv2d(ch, output_dim, 3, 1, 1),\n nn.Tanh())\n\n # mapping function\n mapping = list()\n self.mapping = nn.Sequential(*mapping)\n\n self.alpha1 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha2 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha3 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha4 = nn.Parameter(torch.zeros(1, 184, 1))\n self.alpha5 = nn.Parameter(torch.zeros(1, 184, 1))\n self.sigmoid = nn.Sigmoid()\n\n self.mask_regress = MaskRegressNetv2(num_w)\n self.init_parameter()\n\n def forward(self, z, bbox, z_im=None, y=None, include_mask_loss=False):\n b, o = z.size(0), z.size(1)\n\n label_embedding = self.label_embedding(y)\n\n z = z.view(b * o, -1)\n label_embedding = label_embedding.view(b * o, -1)\n\n latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)\n\n w = self.mapping(latent_vector.view(b * o, -1))\n\n # preprocess bbox\n bmask = self.mask_regress(w, bbox)\n\n if z_im is None:\n z_im = torch.randn((b, 128), device=z.device)\n\n bbox_mask_ = bbox_mask(z, bbox, 128, 128)\n\n latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)\n w = self.mapping(latent_vector.view(b * o, -1))\n\n # 4x4\n x = self.fc(z_im).view(b, -1, 4, 4)\n # 8x8\n # label mask\n x, stage_mask = self.res1(x, w, bmask)\n\n # 16x16\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n alpha1 = torch.gather(self.sigmoid(self.alpha1).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha1) + seman_bbox * alpha1\n x, stage_mask = self.res2(x, w, stage_bbox)\n\n # 32x32\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n\n alpha2 = torch.gather(self.sigmoid(self.alpha2).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha2) + seman_bbox * alpha2\n x, stage_mask = self.res3(x, w, stage_bbox)\n\n # 64x64\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n\n alpha3 = torch.gather(self.sigmoid(self.alpha3).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha3) + seman_bbox * alpha3\n x, stage_mask = self.res4(x, w, stage_bbox)\n\n # 128x128\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n\n alpha4 = torch.gather(self.sigmoid(self.alpha4).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha4) + seman_bbox * alpha4\n x, stage_mask = self.res5(x, w, stage_bbox)\n\n # 256x256\n hh, ww = x.size(2), x.size(3)\n seman_bbox = batched_index_select(\n stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)\n seman_bbox = torch.sigmoid(\n seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')\n\n alpha5 = torch.gather(self.sigmoid(self.alpha5).expand(\n b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)\n stage_bbox = F.interpolate(bmask, size=(\n hh, ww), mode='bilinear') * (1 - alpha5) + seman_bbox * alpha5\n x, _ = self.res6(x, w, stage_bbox)\n # to RGB\n x = self.final(x)\n return x\n\n def init_parameter(self):\n for k in self.named_parameters():\n if k[1].dim() > 1:\n torch.nn.init.orthogonal_(k[1])\n if k[0][-4:] == 'bias':\n torch.nn.init.constant_(k[1], 0)\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, upsample=False, num_w=128, predict_mask=True, psp_module=False):\n super(ResBlock, self).__init__()\n self.upsample = upsample\n self.h_ch = h_ch if h_ch else out_ch\n self.conv1 = conv2d(in_ch, self.h_ch, ksize, pad=pad)\n self.conv2 = conv2d(self.h_ch, out_ch, ksize, pad=pad)\n self.b1 = SpatialAdaptiveSynBatchNorm2d(\n in_ch, num_w=num_w, batchnorm_func=BatchNorm)\n self.b2 = SpatialAdaptiveSynBatchNorm2d(\n self.h_ch, num_w=num_w, batchnorm_func=BatchNorm)\n self.learnable_sc = in_ch != out_ch or upsample\n if self.learnable_sc:\n self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)\n self.activation = nn.ReLU()\n\n self.predict_mask = predict_mask\n if self.predict_mask:\n if psp_module:\n self.conv_mask = nn.Sequential(PSPModule(out_ch, 100),\n nn.Conv2d(100, 184, kernel_size=1))\n else:\n self.conv_mask = nn.Sequential(nn.Conv2d(out_ch, 100, 3, 1, 1),\n BatchNorm(100),\n nn.ReLU(),\n nn.Conv2d(100, 184, 1, 1, 0, bias=True))\n\n def residual(self, in_feat, w, bbox):\n x = in_feat\n x = self.b1(x, w, bbox)\n x = self.activation(x)\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.conv1(x)\n x = self.b2(x, w, bbox)\n x = self.activation(x)\n x = self.conv2(x)\n return x\n\n def shortcut(self, x):\n if self.learnable_sc:\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.c_sc(x)\n return x\n\n def forward(self, in_feat, w, bbox):\n out_feat = self.residual(in_feat, w, bbox) + self.shortcut(in_feat)\n if self.predict_mask:\n mask = self.conv_mask(out_feat)\n else:\n mask = None\n return out_feat, mask\n\n\ndef conv2d(in_feat, out_feat, kernel_size=3, stride=1, pad=1, spectral_norm=True):\n conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, pad)\n if spectral_norm:\n return nn.utils.spectral_norm(conv, eps=1e-4)\n else:\n return conv\n\n\ndef batched_index_select(input, dim, index):\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.expand(expanse)\n return torch.gather(input, dim, index)\n\n\ndef bbox_mask(x, bbox, H, W):\n b, o, _ = bbox.size()\n N = b * o\n\n bbox_1 = bbox.float().view(-1, 4)\n x0, y0 = bbox_1[:, 0], bbox_1[:, 1]\n ww, hh = bbox_1[:, 2], bbox_1[:, 3]\n\n x0 = x0.contiguous().view(N, 1).expand(N, H)\n ww = ww.contiguous().view(N, 1).expand(N, H)\n y0 = y0.contiguous().view(N, 1).expand(N, W)\n hh = hh.contiguous().view(N, 1).expand(N, W)\n\n X = torch.linspace(0, 1, steps=W).view(\n 1, W).expand(N, W).cuda(device=x.device)\n Y = torch.linspace(0, 1, steps=H).view(\n 1, H).expand(N, H).cuda(device=x.device)\n\n X = (X - x0) / ww\n Y = (Y - y0) / hh\n\n X_out_mask = ((X < 0) + (X > 1)).view(N, 1, W).expand(N, H, W)\n Y_out_mask = ((Y < 0) + (Y > 1)).view(N, H, 1).expand(N, H, W)\n\n out_mask = 1 - (X_out_mask + Y_out_mask).float().clamp(max=1)\n return out_mask.view(b, o, H, W)\n\n\nclass PSPModule(nn.Module):\n \"\"\"\n Reference:\n Zhao, Hengshuang, et al. *\"Pyramid scene parsing network.\"*\n \"\"\"\n\n def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):\n super(PSPModule, self).__init__()\n\n self.stages = []\n self.stages = nn.ModuleList(\n [self._make_stage(features, out_features, size) for size in sizes])\n self.bottleneck = nn.Sequential(\n nn.Conv2d(features+len(sizes)*out_features, out_features,\n kernel_size=3, padding=1, dilation=1, bias=False),\n BatchNorm(out_features),\n nn.ReLU(),\n nn.Dropout2d(0.1)\n )\n\n def _make_stage(self, features, out_features, size):\n prior = nn.AdaptiveAvgPool2d(output_size=(size, size))\n conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)\n bn = nn.BatchNorm2d(out_features)\n return nn.Sequential(prior, conv, bn, nn.ReLU())\n\n def forward(self, feats):\n h, w = feats.size(2), feats.size(3)\n priors = [F.interpolate(input=stage(feats), size=(\n h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]\n bottle = self.bottleneck(torch.cat(priors, 1))\n return bottle\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.sigmoid",
"torch.gather",
"torch.nn.init.constant_",
"torch.nn.init.orthogonal_",
"torch.nn.Embedding",
"torch.zeros",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.linspace",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.utils.spectral_norm",
"torch.nn.Sigmoid",
"torch.nn.functional.interpolate",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Dropout2d",
"torch.randn"
]
]
|
Y1fanHE/pushgp-adaptive-replacement-mutation-with-knowledge | [
"d88c483ef6fe197b2188eadd5498d7bb41d4314f"
]
| [
"02/rand_goodv.py"
]
| [
"'''\nAuthor: He,Yifan\nDate: 2021-12-04 20:44:01\nLastEditors: He,Yifan\nLastEditTime: 2021-12-06 15:29:07\n'''\nimport random, time, sys, numpy\nfrom pyshgp.gp.estimators import PushEstimator\nfrom pyshgp.gp.genome import GeneSpawner, Genome, VirusSpawner\nfrom pyshgp.gp.variation import (\n AdditionMutation, DeletionMutation, ReplaceMutation, VariationPipeline\n)\nfrom pyshgp.push.atoms import VirusInput, VirusInstructionMeta\nfrom pyshgp.push.config import PushConfig\nfrom pyshgp.push.instruction_set import InstructionSet\nfrom pyshgp.push.type_library import PushTypeLibrary, infer_virus_literal\nfrom load_benchmark import create_data\nseed = int(sys.argv[1]) # random seed\n\n\n'''Train/Test data\n'''\nproblem = 'small-or-large' # benchmark name\nbenchmark_root = 'psgb' # benchmark root\ntrain_edge_case = 27 # edge in train\ntrain_rand_case = 73 # random in train\ntest_edge_case = 0 # edge in test\ntest_rand_case = 1000 # random in test\n\ntrain = create_data( # create train data\n problem,\n train_edge_case,\n train_rand_case,\n benchmark_root,\n seed=seed\n)\ntest = create_data( # create test data\n problem,\n test_edge_case,\n test_rand_case,\n benchmark_root,\n seed=seed\n)\n\n\n'''Primitives\n'''\nn_inputs = 1 # number of inputs\nlast_str_from_stdout = True # no print string\ntypes = { # primitive types\n 'exec', 'int', 'bool', 'str'\n}\n\nliterals = [ # literals\n 'small', 'large'\n]\n\ndef rand_int(): # integer ERC\n return random.randint(-10000, 10000)\n\nerc_generators = [ # erc generators\n rand_int,\n]\n\nspawner = GeneSpawner( # gene spawner\n n_inputs=n_inputs,\n instruction_set=InstructionSet().register_core_by_stack(types),\n literals=literals,\n erc_generators=erc_generators,\n)\n\n\n'''Virus archive\n'''\nin_sol = [\n Genome([VirusInstructionMeta(name='exec_if', code_blocks=2)]),\n Genome([VirusInput(input_index=0),\n infer_virus_literal(val=1000, type_library=PushTypeLibrary()),\n VirusInstructionMeta(name='int_lt', code_blocks=0)]),\n Genome([VirusInput(input_index=0),\n infer_virus_literal(val=2000, type_library=PushTypeLibrary()),\n VirusInstructionMeta(name='int_gte', code_blocks=0)]),\n Genome([infer_virus_literal(val='small', type_library=PushTypeLibrary()),\n VirusInstructionMeta(name='print_str', code_blocks=0)]),\n Genome([infer_virus_literal(val='large', type_library=PushTypeLibrary()),\n VirusInstructionMeta(name='print_str', code_blocks=0)]),\n]\nvirus_spawner = VirusSpawner(viruses=in_sol)\n\n\n'''GP algorithm\n'''\nsearch = 'VGA' # search algorithm\nvariation = VariationPipeline([ # variation method\n AdditionMutation(0.09),\n DeletionMutation(0.0826),\n ReplaceMutation(0.5)\n])\npopulation_size = 200 # population size\nmax_generations = 300 # max generations\nparallelism = 25 # cpu core number\ninit_genome_size = (20, 100) # init genome length\nmax_genome_size = 800 # max genome length\nstep_limit = 300 # max execute steps\n\nest = PushEstimator( # estimator\n search=search,\n population_size=population_size,\n max_generations=max_generations,\n initial_genome_size=init_genome_size,\n max_genome_size=max_genome_size,\n simplification_steps=500,\n spawner=spawner,\n virus_spawner=virus_spawner,\n variation_strategy=variation,\n last_str_from_stdout=last_str_from_stdout,\n parallelism=parallelism,\n verbose=2,\n push_config=PushConfig(step_limit=step_limit)\n)\n\n\n'''Main program\n'''\nif __name__ == '__main__':\n random.seed(seed); numpy.random.seed(seed)\n\n start_time = time.time()\n\n est.fit(X=train.X, y=train.Y)\n\n print('\\tTest error:\\n\\t', est.score(X=test.X, y=test.Y).sum())\n\n print('\\tTime:\\n\\t {}'.format(time.strftime(\n '%H:%M:%S', time.gmtime(time.time() - start_time\n ))))\n"
]
| [
[
"numpy.random.seed"
]
]
|
PhilippeMarcotte/muzero-general | [
"086f4815c475a479289b8b18818f93fc9485c115"
]
| [
"games/per_cartpole.py"
]
| [
"import datetime\nimport os\n\nimport gym\nimport numpy\nimport torch\n\nfrom games.abstract_game import AbstractGame\n\n\nclass MuZeroConfig:\n def __init__(self):\n self.seed = 0 # Seed for numpy, torch and the game\n\n ### Game\n self.observation_shape = (1, 1,\n 4) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)\n self.action_space = [i for i in range(2)] # Fixed list of all possible actions. You should only edit the length\n self.players = [i for i in range(1)] # List of players. You should only edit the length\n self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation\n\n ### Self-Play\n self.num_actors = 1 # Number of simultaneous threads self-playing to feed the replay buffer\n self.max_moves = 500 # Maximum number of moves if game is not finished before\n self.num_simulations = 50 # Number of future moves self-simulated\n self.discount = 0.997 # Chronological discount of the reward\n self.temperature_threshold = 500 # Number of moves before dropping temperature to 0 (ie playing according to the max)\n\n # Root prior exploration noise\n self.root_dirichlet_alpha = 0.25\n self.root_exploration_fraction = 0.25\n\n # UCB formula\n self.pb_c_base = 19652\n self.pb_c_init = 1.25\n\n ### Network\n self.network = \"fullyconnected\" # \"resnet\" / \"fullyconnected\"\n self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size\n\n # Residual Network\n self.downsample = False # Downsample observations before representation network (See paper appendix Network Architecture)\n self.blocks = 1 # Number of blocks in the ResNet\n self.channels = 2 # Number of channels in the ResNet\n self.reduced_channels = 2 # Number of channels before heads of dynamic and prediction networks\n self.resnet_fc_reward_layers = [] # Define the hidden layers in the reward head of the dynamic network\n self.resnet_fc_value_layers = [] # Define the hidden layers in the value head of the prediction network\n self.resnet_fc_policy_layers = [] # Define the hidden layers in the policy head of the prediction network\n\n # Fully Connected Network\n self.encoding_size = 8\n self.fc_reward_layers = [64] # Define the hidden layers in the reward network\n self.fc_value_layers = [] # Define the hidden layers in the value network\n self.fc_policy_layers = [] # Define the hidden layers in the policy network\n self.fc_representation_layers = [] # Define the hidden layers in the representation network\n self.fc_dynamics_layers = [64] # Define the hidden layers in the dynamics network\n\n ### Training\n self.results_path = os.path.join(os.path.dirname(__file__), \"../results\", os.path.basename(__file__)[:-3],\n datetime.datetime.now().strftime(\n \"%Y-%m-%d--%H-%M-%S\")) # Path to store the model weights and TensorBoard logs\n self.training_steps = 10000 # Total number of training steps (ie weights update according to a batch)\n self.batch_size = 128 # Number of parts of games to train on at each training step\n self.checkpoint_interval = 20 # Number of training steps before using the model for sef-playing\n self.value_loss_weight = 1 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)\n self.training_device = \"cuda\" if torch.cuda.is_available() else \"cpu\" # Train on GPU if available\n\n self.optimizer = \"Adam\" # \"Adam\" or \"SGD\". Paper uses SGD\n self.weight_decay = 1e-4 # L2 weights regularization\n self.momentum = 0.9 # Used only if optimizer is SGD\n\n # Exponential learning rate schedule\n self.lr_init = 0.05 # Initial learning rate\n self.lr_decay_rate = 0.9 # Set it to 1 to use a constant learning rate\n self.lr_decay_steps = 10000\n\n # Muzero Reanalyze\n self.reanalyze_mode = \"true\" # or \"fast\"\n self.num_reanalyze_cpus = 27\n self.policy_update_rate = 0\n\n ### Replay Buffer\n self.window_size = 500 # Number of self-play games to keep in the replay buffer\n self.num_unroll_steps = 5 # Number of game moves to keep for every batch element\n self.td_steps = 50 # Number of steps in the future to take into account for calculating the target value\n self.use_last_model_value = False # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)\n\n # Prioritized Replay (See paper appendix Training)\n self.PER = True # Select in priority the elements in the replay buffer which are unexpected for the network\n self.use_max_priority = False # Use the n-step TD error as initial priority. Better for large replay buffer\n self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1\n self.PER_beta = 1.0\n\n ### Adjust the self play / training ratio to avoid over/underfitting\n self.self_play_delay = 0 # Number of seconds to wait after each played game\n self.training_delay = 0 # Number of seconds to wait after each training step\n self.ratio = None # Desired self played games per training step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it\n\n def visit_softmax_temperature_fn(self, trained_steps):\n \"\"\"\n Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.\n The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.\n\n Returns:\n Positive float.\n \"\"\"\n if trained_steps < 0.5 * self.training_steps:\n return 1.0\n elif trained_steps < 0.75 * self.training_steps:\n return 0.5\n else:\n return 0.25\n\n\nclass Game(AbstractGame):\n \"\"\"\n Game wrapper.\n \"\"\"\n\n def __init__(self, seed=None):\n self.env = gym.make(\"CartPole-v1\")\n if seed is not None:\n self.env.seed(seed)\n\n def step(self, action):\n \"\"\"\n Apply action to the game.\n\n Args:\n action : action of the action_space to take.\n\n Returns:\n The new observation, the reward and a boolean if the game has ended.\n \"\"\"\n observation, reward, done, _ = self.env.step(action)\n return numpy.array([[observation]]), reward, done\n\n def to_play(self):\n \"\"\"\n Return the current player.\n\n Returns:\n The current player, it should be an element of the players list in the config.\n \"\"\"\n return 0\n\n def legal_actions(self):\n \"\"\"\n Should return the legal actions at each turn, if it is not available, it can return\n the whole action space. At each turn, the game have to be able to handle one of returned actions.\n\n For complex game where calculating legal moves is too long, the idea is to define the legal actions\n equal to the action space but to return a negative reward if the action is illegal.\n\n Returns:\n An array of integers, subset of the action space.\n \"\"\"\n return [i for i in range(2)]\n\n def reset(self):\n \"\"\"\n Reset the game for a new game.\n\n Returns:\n Initial observation of the game.\n \"\"\"\n return numpy.array([[self.env.reset()]])\n\n def close(self):\n \"\"\"\n Properly close the game.\n \"\"\"\n self.env.close()\n\n def render(self):\n \"\"\"\n Display the game observation.\n \"\"\"\n self.env.render()\n input(\"Press enter to take a step \")\n\n def human_to_action(self):\n \"\"\"\n For multiplayer games, ask the user for a legal action\n and return the corresponding action number.\n\n Returns:\n An integer from the action space.\n \"\"\"\n pass\n\n def action_to_string(self, action_number):\n \"\"\"\n Convert an action number to a string representing the action.\n\n Args:\n action_number: an integer from the action space.\n\n Returns:\n String representing the action.\n \"\"\"\n actions = {\n 0: \"Push cart to the left\",\n 1: \"Push cart to the right\",\n }\n return \"{}. {}\".format(action_number, actions[action_number])\n"
]
| [
[
"numpy.array",
"torch.cuda.is_available"
]
]
|
HW21/TeachSpice | [
"8cf0ba8603dd82eeb35b45e964df9ca69cd09747"
]
| [
"spice/solve.py"
]
| [
"\"\"\"\nSolver Class(es)\n\"\"\"\n\nimport numpy as np\nfrom typing import Dict, AnyStr, SupportsFloat\n\n\nclass MnaSystem(object):\n \"\"\"\n Represents the non-linear matrix-equation\n G*x + H*g(x) = s\n\n And the attendant break-downs which help us solve it.\n f(x) = G*x + H*g(x) - s # The quantity to be zero'ed\n Jf(x) = df(x)/dx = G + Jg(x) # Definition of the Jacobian matrix `Jf(x)`\n Jf(x) * dx + f(x) = 0 # Newton update equation, to be solved for `dx`\n \"\"\"\n\n def __init__(self, ckt, an):\n self.ckt = ckt\n self.an = an\n ckt.mx = self\n\n self.num_nodes = len(ckt.nodes)\n\n self.G = np.zeros((self.num_nodes, self.num_nodes))\n self.Gt = np.zeros((self.num_nodes, self.num_nodes))\n self.Jg = np.zeros((self.num_nodes, self.num_nodes))\n self.Hg = np.zeros(self.num_nodes)\n self.s = np.zeros(self.num_nodes)\n self.st = np.zeros(self.num_nodes)\n\n def update(self) -> None:\n \"\"\" Update non-linear component operating points \"\"\"\n self.Jg = np.zeros((self.num_nodes, self.num_nodes))\n self.Hg = np.zeros(self.num_nodes)\n for comp in self.ckt.comps:\n comp.mna_update(self.an)\n\n def res(self, x: np.ndarray) -> np.ndarray:\n \"\"\" Return the residual error, given solution `x`. \"\"\"\n return (self.G + self.Gt).dot(x) + self.Hg - self.s - self.st\n\n def solve(self, x: np.ndarray) -> np.ndarray:\n \"\"\" Solve our temporary-valued matrix for a change in x. \"\"\"\n # print(f'G: {self.G}')\n # print(f'G-dot-x: {self.G.dot(x)}')\n # print(f'Jg: {self.Jg}')\n # print(f'Hg: {self.Hg}')\n # print(f's: {self.s}')\n\n lhs = self.G + self.Gt + self.Jg\n rhs = -1 * self.res(x)\n # print(f'lhs: {lhs}')\n # print(f'rhs: {rhs}')\n dx = np.linalg.solve(lhs, rhs)\n return dx\n\n\nclass Solver:\n \"\"\" Newton-Raphson Matrix Solver \"\"\"\n\n def __init__(self, an, x0=None):\n self.an = an\n self.mx = an.mx\n self.x = np.array(x0, dtype='float64') if np.any(x0) else np.zeros(self.mx.num_nodes)\n self.history = [np.copy(self.x)]\n\n def update(self):\n \"\"\" Update non-linear component operating points \"\"\"\n return self.mx.update()\n\n def iterate(self) -> None:\n \"\"\" Update method for Newton iterations \"\"\"\n # Update non-linear component operating points\n self.update()\n # Solve Jf(x) * dx + f(x) = 0\n dx = self.mx.solve(self.x)\n # Step limiting\n MAX_STEP = 0.1\n if np.any(np.abs(dx) > MAX_STEP):\n # print(f'MAX STEPPING {np.max(np.abs(dx))}')\n dx *= MAX_STEP / np.max(np.abs(dx))\n # print(f'Updating by: {dx}')\n self.x += dx\n self.history.append(np.copy(self.x))\n\n def converged(self) -> bool:\n \"\"\" Convergence test, including Newton-iteration-similarity and KCL. \"\"\"\n\n # FIXME: WTF we doing this for?\n # self.update()\n\n # Newton iteration similarity\n v_tol = 1e-6\n v_diff = self.history[-1] - self.history[-2]\n if np.any(np.abs(v_diff) >= v_tol):\n return False\n\n # KCL Residual\n i_tol = 1e-9\n i_res = self.mx.res(self.x)\n if np.any(i_res >= i_tol):\n return False\n\n # If we get here, they all passed!\n return True\n\n def solve(self) -> np.ndarray:\n \"\"\" Compute the Newton-Raphson-based non-linear solution. \"\"\"\n max_iters = 500\n\n for i in range(max_iters):\n # print(f'Iter #{i} - Guessing {self.x}')\n self.iterate()\n if self.converged():\n break\n\n if i >= max_iters - 1:\n print(self.history)\n raise Exception(f'Could Not Converge to Solution ')\n\n # print(f'Successfully Converged to {self.x} in {i+1} iterations')\n return self.x\n\n\nclass ScipySolver:\n \"\"\" Solver based on scipy.optimize.minimize\n The general idea is *minimizing* KCL error, rather than solving for when it equals zero.\n To our knowledge, no other SPICE solver tries this. Maybe it's a bad idea. \"\"\"\n\n def __init__(self, an, *, x0=None):\n self.an = an\n self.x0 = np.array(x0, dtype='float64') if np.any(x0) else np.zeros(len(an.ckt.nodes))\n self.x = self.x0\n self.history = [self.x0]\n self.results = []\n\n def solve(self):\n import scipy.optimize\n\n options = dict(fatol=1e-31, disp=False)\n result = scipy.optimize.minimize(fun=self.guess, x0=self.x0, method='nelder-mead', options=options)\n\n if not result.success:\n raise TabError(str(result))\n # print(f'Solved: {result.x}')\n return result.x\n\n def get_v(self, comp) -> Dict[AnyStr, SupportsFloat]:\n \"\"\" Get a dictionary of port-voltages, of the form `port_name`:`voltage`. \"\"\"\n v = {}\n for name, node in comp.conns.items():\n if node.solve:\n v[name] = self.x[node.num]\n else:\n v[name] = self.an.ckt.forces[node]\n return v\n\n def guess(self, x):\n # print(f'Guessing {x}')\n self.x = x\n self.history.append(x)\n an = self.an\n kcl_results = np.zeros(len(an.ckt.nodes))\n for comp in an.ckt.comps:\n comp_v = self.get_v(comp)\n # print(f'{comp} has voltages {comp_v}')\n comp_i = comp.i(comp_v)\n # {d:1.3, s:=1.3, g:0, b:0}\n for name, i in comp_i.items():\n node = comp.conns[name]\n if node.solve:\n # print(f'{comp} updating {node} by {i}')\n kcl_results[node.num] += i\n # print(f'KCL: {kcl_results}')\n rv = np.sum(kcl_results ** 2)\n self.results.append(rv)\n return rv\n\n\n\"\"\" 'Configuration' of which Solver to use \"\"\"\nTheSolverCls = Solver\n# TheSolverCls = ScipySolver\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.copy",
"numpy.any",
"numpy.linalg.solve",
"numpy.abs"
]
]
|
gb-heimaf/lynx-x-ray-observatory | [
"a1e5a17cab3763975e0d3dc0840f0359de8c8087"
]
| [
"soxs/events.py"
]
| [
"import numpy as np\nfrom astropy.io import fits\nfrom astropy import wcs\nimport os\nfrom soxs.utils import mylog, parse_value, get_rot_mat, \\\n create_region\nfrom soxs.instrument_registry import instrument_registry\nfrom tqdm.auto import tqdm\n\n\ndef wcs_from_event_file(f):\n h = f[\"EVENTS\"].header\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [h[\"TCRVL2\"], h[\"TCRVL3\"]]\n w.wcs.crpix = [h[\"TCRPX2\"], h[\"TCRPX3\"]]\n w.wcs.cdelt = [h[\"TCDLT2\"], h[\"TCDLT3\"]]\n w.wcs.ctype = [h[\"TCTYP2\"], h[\"TCTYP3\"]]\n w.wcs.cunit = [h[\"TCUNI2\"], h[\"TCUNI3\"]]\n return w\n\n\ndef write_event_file(events, parameters, filename, overwrite=False):\n from astropy.time import Time, TimeDelta\n mylog.info(f\"Writing events to file {filename}.\")\n\n t_begin = Time.now()\n dt = TimeDelta(parameters[\"exposure_time\"], format='sec')\n t_end = t_begin + dt\n\n col_x = fits.Column(name='X', format='D', unit='pixel', array=events[\"xpix\"])\n col_y = fits.Column(name='Y', format='D', unit='pixel', array=events[\"ypix\"])\n col_e = fits.Column(name='ENERGY', format='E', unit='eV', array=events[\"energy\"]*1000.)\n col_dx = fits.Column(name='DETX', format='D', unit='pixel', array=events[\"detx\"])\n col_dy = fits.Column(name='DETY', format='D', unit='pixel', array=events[\"dety\"])\n col_id = fits.Column(name='CCD_ID', format='J', unit='pixel', array=events[\"ccd_id\"])\n\n chantype = parameters[\"channel_type\"].lower()\n if chantype == \"pha\":\n cunit = \"adu\"\n elif chantype == \"pi\":\n cunit = \"Chan\"\n col_ch = fits.Column(name=chantype.upper(), format='1J', unit=cunit, \n array=events[chantype])\n\n col_t = fits.Column(name=\"TIME\", format='1D', unit='s', \n array=events['time'])\n\n cols = [col_e, col_x, col_y, col_ch, col_t, col_dx, col_dy, col_id]\n\n coldefs = fits.ColDefs(cols)\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.name = \"EVENTS\"\n\n tbhdu.header[\"MTYPE1\"] = \"sky\"\n tbhdu.header[\"MFORM1\"] = \"x,y\"\n tbhdu.header[\"MTYPE2\"] = \"EQPOS\"\n tbhdu.header[\"MFORM2\"] = \"RA,DEC\"\n tbhdu.header[\"TCTYP2\"] = \"RA---TAN\"\n tbhdu.header[\"TCTYP3\"] = \"DEC--TAN\"\n tbhdu.header[\"TCRVL2\"] = parameters[\"sky_center\"][0]\n tbhdu.header[\"TCRVL3\"] = parameters[\"sky_center\"][1]\n tbhdu.header[\"TCDLT2\"] = -parameters[\"plate_scale\"]\n tbhdu.header[\"TCDLT3\"] = parameters[\"plate_scale\"]\n tbhdu.header[\"TCRPX2\"] = parameters[\"pix_center\"][0]\n tbhdu.header[\"TCRPX3\"] = parameters[\"pix_center\"][1]\n tbhdu.header[\"TCUNI2\"] = \"deg\"\n tbhdu.header[\"TCUNI3\"] = \"deg\"\n tbhdu.header[\"TLMIN2\"] = 0.5\n tbhdu.header[\"TLMIN3\"] = 0.5\n tbhdu.header[\"TLMAX2\"] = 2.0*parameters[\"num_pixels\"]+0.5\n tbhdu.header[\"TLMAX3\"] = 2.0*parameters[\"num_pixels\"]+0.5\n tbhdu.header[\"TLMIN4\"] = parameters[\"chan_lim\"][0]\n tbhdu.header[\"TLMAX4\"] = parameters[\"chan_lim\"][1]\n tbhdu.header[\"TLMIN6\"] = -0.5*parameters[\"num_pixels\"]\n tbhdu.header[\"TLMAX6\"] = 0.5*parameters[\"num_pixels\"]\n tbhdu.header[\"TLMIN7\"] = -0.5*parameters[\"num_pixels\"]\n tbhdu.header[\"TLMAX7\"] = 0.5*parameters[\"num_pixels\"]\n tbhdu.header[\"EXPOSURE\"] = parameters[\"exposure_time\"]\n tbhdu.header[\"TSTART\"] = 0.0\n tbhdu.header[\"TSTOP\"] = parameters[\"exposure_time\"]\n tbhdu.header[\"HDUVERS\"] = \"1.1.0\"\n tbhdu.header[\"RADECSYS\"] = \"FK5\"\n tbhdu.header[\"EQUINOX\"] = 2000.0\n tbhdu.header[\"HDUCLASS\"] = \"OGIP\"\n tbhdu.header[\"HDUCLAS1\"] = \"EVENTS\"\n tbhdu.header[\"HDUCLAS2\"] = \"ACCEPTED\"\n tbhdu.header[\"DATE\"] = t_begin.tt.isot\n tbhdu.header[\"DATE-OBS\"] = t_begin.tt.isot\n tbhdu.header[\"DATE-END\"] = t_end.tt.isot\n tbhdu.header[\"RESPFILE\"] = os.path.split(parameters[\"rmf\"])[-1]\n tbhdu.header[\"PHA_BINS\"] = parameters[\"nchan\"]\n tbhdu.header[\"ANCRFILE\"] = os.path.split(parameters[\"arf\"])[-1]\n tbhdu.header[\"CHANTYPE\"] = parameters[\"channel_type\"]\n tbhdu.header[\"MISSION\"] = parameters[\"mission\"]\n tbhdu.header[\"TELESCOP\"] = parameters[\"telescope\"]\n tbhdu.header[\"INSTRUME\"] = parameters[\"instrument\"]\n tbhdu.header[\"RA_PNT\"] = parameters[\"sky_center\"][0]\n tbhdu.header[\"DEC_PNT\"] = parameters[\"sky_center\"][1]\n tbhdu.header[\"ROLL_PNT\"] = parameters[\"roll_angle\"]\n tbhdu.header[\"AIMPT_X\"] = parameters[\"aimpt_coords\"][0]\n tbhdu.header[\"AIMPT_Y\"] = parameters[\"aimpt_coords\"][1]\n tbhdu.header[\"AIMPT_DX\"] = parameters[\"aimpt_shift\"][0]\n tbhdu.header[\"AIMPT_DY\"] = parameters[\"aimpt_shift\"][1]\n if parameters[\"dither_params\"][\"dither_on\"]:\n tbhdu.header[\"DITHXAMP\"] = parameters[\"dither_params\"][\"x_amp\"]\n tbhdu.header[\"DITHYAMP\"] = parameters[\"dither_params\"][\"y_amp\"]\n tbhdu.header[\"DITHXPER\"] = parameters[\"dither_params\"][\"x_period\"]\n tbhdu.header[\"DITHYPER\"] = parameters[\"dither_params\"][\"y_period\"]\n\n start = fits.Column(name='START', format='1D', unit='s',\n array=np.array([0.0]))\n stop = fits.Column(name='STOP', format='1D', unit='s',\n array=np.array([parameters[\"exposure_time\"]]))\n\n tbhdu_gti = fits.BinTableHDU.from_columns([start,stop])\n tbhdu_gti.name = \"STDGTI\"\n tbhdu_gti.header[\"TSTART\"] = 0.0\n tbhdu_gti.header[\"TSTOP\"] = parameters[\"exposure_time\"]\n tbhdu_gti.header[\"HDUCLASS\"] = \"OGIP\"\n tbhdu_gti.header[\"HDUCLAS1\"] = \"GTI\"\n tbhdu_gti.header[\"HDUCLAS2\"] = \"STANDARD\"\n tbhdu_gti.header[\"RADECSYS\"] = \"FK5\"\n tbhdu_gti.header[\"EQUINOX\"] = 2000.0\n tbhdu_gti.header[\"DATE\"] = t_begin.tt.isot\n tbhdu_gti.header[\"DATE-OBS\"] = t_begin.tt.isot\n tbhdu_gti.header[\"DATE-END\"] = t_end.tt.isot\n\n hdulist = [fits.PrimaryHDU(), tbhdu, tbhdu_gti]\n\n fits.HDUList(hdulist).writeto(filename, overwrite=overwrite)\n\n\ndef make_exposure_map(event_file, expmap_file, energy, weights=None,\n asol_file=None, normalize=True, overwrite=False,\n reblock=1, nhistx=16, nhisty=16):\n \"\"\"\n Make an exposure map for a SOXS event file, and optionally write\n an aspect solution file. The exposure map will be created by\n binning an aspect histogram over the range of the aspect solution.\n\n Parameters\n ----------\n event_file : string\n The path to the event file to use for making the exposure map.\n expmap_file : string\n The path to write the exposure map file to.\n energy : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, or NumPy array\n The energy in keV to use when computing the exposure map, or \n a set of energies to be used with the *weights* parameter. If\n providing a set, it must be in keV.\n weights : array-like, optional\n The weights to use with a set of energies given in the\n *energy* parameter. Used to create a more accurate exposure\n map weighted by a range of energies. Default: None\n asol_file : string, optional\n The path to write the aspect solution file to, if desired.\n Default: None\n normalize : boolean, optional\n If True, the exposure map will be divided by the exposure time\n so that the map's units are cm**2. Default: True\n overwrite : boolean, optional\n Whether or not to overwrite an existing file. Default: False\n reblock : integer, optional\n Supply an integer power of 2 here to make an exposure map\n with a different binning. Default: 1\n nhistx : integer, optional\n The number of bins in the aspect histogram in the DETX\n direction. Default: 16\n nhisty : integer, optional\n The number of bins in the aspect histogram in the DETY\n direction. Default: 16\n order : integer, optional\n The interpolation order to use when making the exposure map. \n Default: 1\n \"\"\"\n from scipy.ndimage.interpolation import rotate\n from soxs.instrument import perform_dither\n from soxs.response import AuxiliaryResponseFile\n if isinstance(energy, np.ndarray) and weights is None:\n raise RuntimeError(\"Must supply a single value for the energy if \"\n \"you do not supply weights!\")\n if not isinstance(energy, np.ndarray):\n energy = parse_value(energy, \"keV\")\n f_evt = fits.open(event_file)\n hdu = f_evt[\"EVENTS\"]\n arf = AuxiliaryResponseFile(hdu.header[\"ANCRFILE\"])\n exp_time = hdu.header[\"EXPOSURE\"]\n nx = int(hdu.header[\"TLMAX2\"]-0.5)//2\n ny = int(hdu.header[\"TLMAX3\"]-0.5)//2\n ra0 = hdu.header[\"TCRVL2\"]\n dec0 = hdu.header[\"TCRVL3\"]\n xdel = hdu.header[\"TCDLT2\"]\n ydel = hdu.header[\"TCDLT3\"]\n x0 = hdu.header[\"TCRPX2\"]\n y0 = hdu.header[\"TCRPX3\"]\n xaim = hdu.header.get(\"AIMPT_X\", 0.0)\n yaim = hdu.header.get(\"AIMPT_Y\", 0.0)\n xaim += hdu.header.get(\"AIMPT_DX\", 0.0)\n yaim += hdu.header.get(\"AIMPT_DY\", 0.0)\n roll = hdu.header[\"ROLL_PNT\"]\n instr = instrument_registry[hdu.header[\"INSTRUME\"].lower()]\n dither_params = {}\n if \"DITHXAMP\" in hdu.header:\n dither_params[\"x_amp\"] = hdu.header[\"DITHXAMP\"]\n dither_params[\"y_amp\"] = hdu.header[\"DITHYAMP\"]\n dither_params[\"x_period\"] = hdu.header[\"DITHXPER\"]\n dither_params[\"y_period\"] = hdu.header[\"DITHYPER\"]\n dither_params[\"plate_scale\"] = ydel*3600.0\n dither_params[\"dither_on\"] = True\n else:\n dither_params[\"dither_on\"] = False\n f_evt.close()\n\n # Create time array for aspect solution\n dt = 1.0 # Seconds\n t = np.arange(0.0, exp_time+dt, dt)\n\n # Construct WCS\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [ra0, dec0]\n w.wcs.crpix = [x0, y0]\n w.wcs.cdelt = [xdel, ydel]\n w.wcs.ctype = [\"RA---TAN\",\"DEC--TAN\"]\n w.wcs.cunit = [\"deg\"]*2\n\n # Create aspect solution if we had dithering.\n # otherwise just set the offsets to zero\n if dither_params[\"dither_on\"]:\n x_off, y_off = perform_dither(t, dither_params)\n # Make the aspect histogram\n x_amp = dither_params[\"x_amp\"]/dither_params[\"plate_scale\"]\n y_amp = dither_params[\"y_amp\"]/dither_params[\"plate_scale\"]\n x_edges = np.linspace(-x_amp, x_amp, nhistx+1, endpoint=True)\n y_edges = np.linspace(-y_amp, y_amp, nhisty+1, endpoint=True)\n asphist = np.histogram2d(x_off, y_off, (x_edges, y_edges))[0]\n asphist *= dt\n x_mid = 0.5*(x_edges[1:]+x_edges[:-1])/reblock\n y_mid = 0.5*(y_edges[1:]+y_edges[:-1])/reblock\n else:\n asphist = exp_time*np.ones((1,1))\n\n # Determine the effective area\n eff_area = arf.interpolate_area(energy).value\n if weights is not None:\n eff_area = np.average(eff_area, weights=weights)\n\n rtypes = []\n args = []\n for i, chip in enumerate(instr[\"chips\"]):\n rtypes.append(chip[0])\n args.append(np.array(chip[1:])/reblock)\n\n xdet0 = 0.5*(2*nx//reblock+1)\n ydet0 = 0.5*(2*ny//reblock+1)\n xaim //= reblock\n yaim //= reblock\n dx = xdet0-xaim-1.0\n dy = ydet0-yaim-1.0\n\n if dither_params[\"dither_on\"]:\n niterx = nhistx\n nitery = nhisty\n else:\n niterx = 1\n nitery = 1\n\n expmap = np.zeros((2*nx//reblock, 2*ny//reblock))\n niter = niterx*nitery\n pbar = tqdm(leave=True, total=niter, desc=\"Creating exposure map \")\n for i in range(niterx):\n for j in range(nitery):\n chips, _ = create_region(rtypes[0], args[0], dx+x_mid[i], dy+y_mid[j])\n for rtype, arg in zip(rtypes[1:], args[1:]):\n r, _ = create_region(rtype, arg, dx+x_mid[i], dy+y_mid[j])\n chips = chips | r\n dexp = chips.to_mask().to_image(expmap.shape).astype(\"float64\")\n expmap += dexp*asphist[i,j]\n pbar.update(nitery)\n pbar.close()\n\n expmap *= eff_area\n if normalize:\n expmap /= exp_time\n\n if roll != 0.0:\n rotate(expmap, roll, output=expmap, reshape=False)\n\n expmap[expmap < 0.0] = 0.0\n\n map_header = {\"EXPOSURE\": exp_time,\n \"MTYPE1\": \"EQPOS\",\n \"MFORM1\": \"RA,DEC\",\n \"CTYPE1\": \"RA---TAN\",\n \"CTYPE2\": \"DEC--TAN\",\n \"CRVAL1\": ra0,\n \"CRVAL2\": dec0,\n \"CUNIT1\": \"deg\",\n \"CUNIT2\": \"deg\",\n \"CDELT1\": xdel*reblock,\n \"CDELT2\": ydel*reblock,\n \"CRPIX1\": 0.5*(2.0*nx//reblock+1),\n \"CRPIX2\": 0.5*(2.0*ny//reblock+1)}\n\n map_hdu = fits.ImageHDU(expmap, header=fits.Header(map_header))\n map_hdu.name = \"EXPMAP\"\n map_hdu.writeto(expmap_file, overwrite=overwrite)\n\n if asol_file is not None:\n\n if dither_params[\"dither_on\"]:\n\n det = np.array([x_off, y_off])\n\n pix = np.dot(get_rot_mat(roll).T, det)\n\n ra, dec = w.wcs_pix2world(pix[0,:]+x0, pix[1,:]+y0, 1)\n\n col_t = fits.Column(name='time', format='D', unit='s', array=t)\n col_ra = fits.Column(name='ra', format='D', unit='deg', array=ra)\n col_dec = fits.Column(name='dec', format='D', unit='deg', array=dec)\n\n coldefs = fits.ColDefs([col_t, col_ra, col_dec])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.name = \"ASPSOL\"\n tbhdu.header[\"EXPOSURE\"] = exp_time\n\n hdulist = [fits.PrimaryHDU(), tbhdu]\n\n fits.HDUList(hdulist).writeto(asol_file, overwrite=overwrite)\n\n else:\n\n mylog.warning(\"Refusing to write an aspect solution file because \"\n \"there was no dithering.\")\n\n\ndef _write_spectrum(bins, spec, exp_time, spectype, parameters,\n specfile, overwrite=False):\n\n col1 = fits.Column(name='CHANNEL', format='1J', array=bins)\n col2 = fits.Column(name=spectype.upper(), format='1D', array=bins.astype(\"float64\"))\n col3 = fits.Column(name='COUNTS', format='1J', array=spec.astype(\"int32\"))\n col4 = fits.Column(name='COUNT_RATE', format='1D', array=spec/exp_time)\n\n coldefs = fits.ColDefs([col1, col2, col3, col4])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.name = \"SPECTRUM\"\n\n tbhdu.header[\"DETCHANS\"] = spec.size\n tbhdu.header[\"TOTCTS\"] = spec.sum()\n tbhdu.header[\"EXPOSURE\"] = exp_time\n tbhdu.header[\"LIVETIME\"] = exp_time\n tbhdu.header[\"CONTENT\"] = spectype\n tbhdu.header[\"HDUCLASS\"] = \"OGIP\"\n tbhdu.header[\"HDUCLAS1\"] = \"SPECTRUM\"\n tbhdu.header[\"HDUCLAS2\"] = \"TOTAL\"\n tbhdu.header[\"HDUCLAS3\"] = \"TYPE:I\"\n tbhdu.header[\"HDUCLAS4\"] = \"COUNT\"\n tbhdu.header[\"HDUVERS\"] = \"1.1.0\"\n tbhdu.header[\"HDUVERS1\"] = \"1.1.0\"\n tbhdu.header[\"CHANTYPE\"] = spectype\n tbhdu.header[\"BACKFILE\"] = \"none\"\n tbhdu.header[\"CORRFILE\"] = \"none\"\n tbhdu.header[\"POISSERR\"] = True\n for key in [\"RESPFILE\", \"ANCRFILE\", \"MISSION\", \"TELESCOP\", \"INSTRUME\"]:\n tbhdu.header[key] = parameters[key]\n tbhdu.header[\"AREASCAL\"] = 1.0\n tbhdu.header[\"CORRSCAL\"] = 0.0\n tbhdu.header[\"BACKSCAL\"] = 1.0\n\n hdulist = fits.HDUList([fits.PrimaryHDU(), tbhdu])\n\n hdulist.writeto(specfile, overwrite=overwrite)\n\n\ndef write_spectrum(evtfile, specfile, overwrite=False):\n r\"\"\"\n Bin event energies into a spectrum and write it to \n a FITS binary table. Does not do any grouping of \n channels, and will automatically determine PI or PHA. \n\n Parameters\n ----------\n evtfile : string\n The name of the event file to read the events from. \n specfile : string\n The name of the spectrum file to be written.\n overwrite : boolean, optional\n Whether or not to overwrite an existing file with \n the same name. Default: False\n \"\"\"\n from soxs.response import RedistributionMatrixFile\n parameters = {}\n if isinstance(evtfile, str):\n f = fits.open(evtfile)\n spectype = f[\"EVENTS\"].header[\"CHANTYPE\"]\n rmf = f[\"EVENTS\"].header[\"RESPFILE\"]\n p = f[\"EVENTS\"].data[spectype]\n exp_time = f[\"EVENTS\"].header[\"EXPOSURE\"]\n for key in [\"RESPFILE\", \"ANCRFILE\", \"MISSION\", \"TELESCOP\", \"INSTRUME\"]:\n parameters[key] = f[\"EVENTS\"].header[key]\n f.close()\n else:\n rmf = evtfile[\"rmf\"]\n spectype = evtfile[\"channel_type\"]\n p = evtfile[spectype]\n parameters[\"RESPFILE\"] = os.path.split(rmf)[-1]\n parameters[\"ANCRFILE\"] = os.path.split(evtfile[\"arf\"])[-1]\n parameters[\"TELESCOP\"] = evtfile[\"telescope\"] \n parameters[\"INSTRUME\"] = evtfile[\"instrument\"]\n parameters[\"MISSION\"] = evtfile[\"mission\"] \n exp_time = evtfile[\"exposure_time\"]\n\n rmf = RedistributionMatrixFile(rmf)\n minlength = rmf.n_ch\n if rmf.cmin == 1:\n minlength += 1\n spec = np.bincount(p, minlength=minlength)\n if rmf.cmin == 1:\n spec = spec[1:]\n bins = (np.arange(rmf.n_ch)+rmf.cmin).astype(\"int32\")\n\n _write_spectrum(bins, spec, exp_time, spectype, parameters,\n specfile, overwrite=overwrite)\n\n\ndef write_radial_profile(evt_file, out_file, ctr, rmin,\n rmax, nbins, ctr_type=\"celestial\",\n emin=None, emax=None, expmap_file=None,\n overwrite=False):\n r\"\"\"\n Bin up events into a radial profile and write them to a FITS\n table. \n\n Parameters\n ----------\n evt_file : string\n Input event file.\n out_file : string\n The output file to write the profile to. \n ctr : array-like\n The central coordinate of the profile. Can either be in \n celestial coordinates (the default) or \"physical\" pixel \n coordinates. If the former, the ``ctr_type`` keyword \n argument must be explicity set to \"physical\".\n rmin : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`\n The minimum radius of the profile, in arcseconds. \n rmax : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`\n The maximum radius of the profile, in arcseconds.\n nbins : integer\n The number of bins in the profile.\n ctr_type : string, optional\n The type of center coordinate. Either \"celestial\" for \n (RA, Dec) coordinates (the default), or \"physical\" for \n pixel coordinates.\n emin : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional\n The minimum energy of the events to be binned in keV. \n Default is the lowest energy available.\n emax : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional\n The maximum energy of the events to be binned in keV. \n Default is the highest energy available.\n overwrite : boolean, optional\n Whether or not to overwrite an existing file with the \n same name. Default: False\n expmap_file : string, optional\n Supply an exposure map file to determine fluxes. \n Default: None\n \"\"\"\n rmin = parse_value(rmin, \"arcsec\")\n rmax = parse_value(rmax, \"arcsec\")\n f = fits.open(evt_file)\n hdu = f[\"EVENTS\"]\n orig_dx = hdu.header[\"TCDLT3\"]\n e = hdu.data[\"ENERGY\"]\n if emin is None:\n emin = e.min()\n else:\n emin = parse_value(emin, \"keV\")\n emin *= 1000.\n if emax is None:\n emax = e.max()\n else:\n emax = parse_value(emax, \"keV\")\n emax *= 1000.\n idxs = np.logical_and(e > emin, e < emax)\n x = hdu.data[\"X\"][idxs]\n y = hdu.data[\"Y\"][idxs]\n exp_time = hdu.header[\"EXPOSURE\"]\n w = wcs_from_event_file(f)\n dtheta = np.abs(w.wcs.cdelt[1])*3600.0\n f.close()\n\n if ctr_type == \"celestial\":\n ctr = w.all_world2pix(ctr[0], ctr[1], 1)\n\n r = np.sqrt((x-ctr[0])**2+(y-ctr[1])**2)\n rr = np.linspace(rmin/dtheta, rmax/dtheta, nbins+1)\n C = np.histogram(r, bins=rr)[0]\n rbin = rr*dtheta\n rmid = 0.5*(rbin[1:]+rbin[:-1])\n\n A = np.pi*(rbin[1:]**2-rbin[:-1]**2)\n\n Cerr = np.sqrt(C)\n\n R = C/exp_time\n Rerr = Cerr/exp_time\n\n S = R/A\n Serr = Rerr/A\n\n col1 = fits.Column(name='RLO', format='D', unit='arcsec', array=rbin[:-1])\n col2 = fits.Column(name='RHI', format='D', unit='arcsec', array=rbin[1:])\n col3 = fits.Column(name='RMID', format='D', unit='arcsec', array=rmid)\n col4 = fits.Column(name='AREA', format='D', unit='arcsec**2', array=A)\n col5 = fits.Column(name='NET_COUNTS', format='D', unit='count', array=C)\n col6 = fits.Column(name='NET_ERR', format='D', unit='count', array=Cerr)\n col7 = fits.Column(name='NET_RATE', format='D', unit='count/s', array=R)\n col8 = fits.Column(name='ERR_RATE', format='D', unit='count/s', array=Rerr)\n col9 = fits.Column(name='SUR_BRI', format='D', unit='count/s/arcsec**2', array=S)\n col10 = fits.Column(name='SUR_BRI_ERR', format='1D', unit='count/s/arcsec**2', array=Serr)\n\n coldefs = [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10]\n\n if expmap_file is not None:\n f = fits.open(expmap_file)\n ehdu = f[\"EXPMAP\"]\n wexp = wcs.WCS(header=ehdu.header)\n cel = w.all_pix2world(ctr[0], ctr[1], 1)\n ectr = wexp.all_world2pix(cel[0], cel[1], 1)\n exp = ehdu.data[:,:]\n nx, ny = exp.shape\n reblock = ehdu.header[\"CDELT2\"]/orig_dx\n x, y = np.mgrid[1:nx+1,1:ny+1]\n r = np.sqrt((x-ectr[0])**2 + (y-ectr[1])**2)\n f.close()\n E = np.histogram(r, bins=rr/reblock, weights=exp)[0] / np.histogram(r, bins=rr/reblock)[0]\n with np.errstate(invalid='ignore', divide='ignore'):\n F = R/E\n Ferr = Rerr/E\n SF = F/A\n SFerr = Ferr/A\n col11 = fits.Column(name='MEAN_SRC_EXP', format='D', unit='cm**2', array=E)\n col12 = fits.Column(name='NET_FLUX', format='D', unit='count/s/cm**2', array=F)\n col13 = fits.Column(name='NET_FLUX_ERR', format='D', unit='count/s/cm**2', array=Ferr)\n col14 = fits.Column(name='SUR_FLUX', format='D', unit='count/s/cm**2/arcsec**2', array=SF)\n col15 = fits.Column(name='SUR_FLUX_ERR', format='D', unit='count/s/cm**2/arcsec**2', array=SFerr)\n coldefs += [col11, col12, col13, col14, col15]\n\n tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(coldefs))\n tbhdu.name = \"PROFILE\"\n\n hdulist = fits.HDUList([fits.PrimaryHDU(), tbhdu])\n\n hdulist.writeto(out_file, overwrite=overwrite)\n\n\ncoord_types = {\"sky\": (\"X\", \"Y\", 2, 3),\n \"det\": (\"DETX\", \"DETY\", 6, 7)}\n\n\ndef write_image(evt_file, out_file, coord_type='sky', emin=None, emax=None,\n overwrite=False, expmap_file=None, reblock=1):\n r\"\"\"\n Generate a image by binning X-ray counts and write \n it to a FITS file.\n\n Parameters\n ----------\n evt_file : string\n The name of the input event file to read.\n out_file : string\n The name of the image file to write.\n coord_type : string, optional\n The type of coordinate to bin into an image. \n Can be \"sky\" or \"det\". Default: \"sky\"\n emin : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional\n The minimum energy of the photons to put in the image, in keV.\n emax : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional\n The maximum energy of the photons to put in the image, in keV.\n overwrite : boolean, optional\n Whether or not to overwrite an existing file with \n the same name. Default: False\n expmap_file : string, optional\n Supply an exposure map file to divide this image by\n to get a flux map. Default: None\n reblock : integer, optional\n Change this value to reblock the image to larger \n pixel sizes (reblock >= 1). Only supported for\n sky coordinates. Default: 1\n \"\"\"\n if emin is None:\n emin = 0.0\n else:\n emin = parse_value(emin, \"keV\")\n emin *= 1000.\n if emax is None:\n emax = 100.0\n else:\n emax = parse_value(emax, \"keV\")\n emax *= 1000.\n if coord_type == \"det\" and reblock > 1:\n raise RuntimeError(\"Reblocking images is not supported \"\n \"for detector coordinates!\")\n f = fits.open(evt_file)\n e = f[\"EVENTS\"].data[\"ENERGY\"]\n idxs = np.logical_and(e > emin, e < emax)\n xcoord, ycoord, xcol, ycol = coord_types[coord_type]\n x = f[\"EVENTS\"].data[xcoord][idxs]\n y = f[\"EVENTS\"].data[ycoord][idxs]\n exp_time = f[\"EVENTS\"].header[\"EXPOSURE\"]\n xmin = f[\"EVENTS\"].header[f\"TLMIN{xcol}\"]\n ymin = f[\"EVENTS\"].header[f\"TLMIN{ycol}\"]\n xmax = f[\"EVENTS\"].header[f\"TLMAX{xcol}\"]\n ymax = f[\"EVENTS\"].header[f\"TLMAX{ycol}\"]\n if coord_type == 'sky':\n xctr = f[\"EVENTS\"].header[f\"TCRVL{xcol}\"]\n yctr = f[\"EVENTS\"].header[f\"TCRVL{ycol}\"]\n xdel = f[\"EVENTS\"].header[f\"TCDLT{xcol}\"]*reblock\n ydel = f[\"EVENTS\"].header[f\"TCDLT{ycol}\"]*reblock\n f.close()\n\n nx = int(xmax-xmin)//reblock\n ny = int(ymax-ymin)//reblock\n\n xbins = np.linspace(xmin, xmax, nx+1, endpoint=True)\n ybins = np.linspace(ymin, ymax, ny+1, endpoint=True)\n\n H, xedges, yedges = np.histogram2d(x, y, bins=[xbins, ybins])\n\n if expmap_file is not None:\n if coord_type == \"det\":\n raise RuntimeError(\"Cannot divide by an exposure map for images \"\n \"binned in detector coordinates!\")\n f = fits.open(expmap_file)\n if f[\"EXPMAP\"].shape != (nx, ny):\n raise RuntimeError(\"Exposure map and image do not have the same shape!!\")\n with np.errstate(invalid='ignore', divide='ignore'):\n H /= f[\"EXPMAP\"].data.T\n H[np.isinf(H)] = 0.0\n H = np.nan_to_num(H)\n H[H < 0.0] = 0.0\n f.close()\n\n hdu = fits.PrimaryHDU(H.T)\n\n if coord_type == 'sky':\n hdu.header[\"MTYPE1\"] = \"EQPOS\"\n hdu.header[\"MFORM1\"] = \"RA,DEC\"\n hdu.header[\"CTYPE1\"] = \"RA---TAN\"\n hdu.header[\"CTYPE2\"] = \"DEC--TAN\"\n hdu.header[\"CRVAL1\"] = xctr\n hdu.header[\"CRVAL2\"] = yctr\n hdu.header[\"CUNIT1\"] = \"deg\"\n hdu.header[\"CUNIT2\"] = \"deg\"\n hdu.header[\"CDELT1\"] = xdel\n hdu.header[\"CDELT2\"] = ydel\n hdu.header[\"CRPIX1\"] = 0.5*(nx+1)\n hdu.header[\"CRPIX2\"] = 0.5*(ny+1)\n else:\n hdu.header[\"CUNIT1\"] = \"pixel\"\n hdu.header[\"CUNIT2\"] = \"pixel\"\n\n hdu.header[\"EXPOSURE\"] = exp_time\n hdu.name = \"IMAGE\"\n\n hdu.writeto(out_file, overwrite=overwrite)\n\n\ndef plot_spectrum(specfile, plot_energy=True, ebins=None, lw=2, \n xmin=None, xmax=None, ymin=None, ymax=None, \n xscale=None, yscale=None, label=None, \n fontsize=18, fig=None, ax=None, plot_counts=False,\n noerr=False, plot_used=False, **kwargs):\n \"\"\"\n Make a quick Matplotlib plot of a convolved spectrum\n from a file. A Matplotlib figure and axis is returned.\n\n Parameters\n ----------\n specfile : string\n The file to be opened for plotting.\n plot_energy : boolean, optional\n Whether to plot in energy or channel space. Default is\n to plot in energy, unless the RMF for the spectrum\n cannot be found.\n ebins : NumPy array, optional\n If set, these are the energy bin edges in which the spectrum\n will be binned. If not set, the counts will be binned according\n to channel. Default: None\n lw : float, optional\n The width of the lines in the plots. Default: 2.0 px.\n xmin : float, optional\n The left-most energy (in keV) or channel to plot. Default is the \n minimum value in the spectrum. \n xmax : float, optional\n The right-most energy (in keV) or channel to plot. Default is the \n maximum value in the spectrum. \n ymin : float, optional\n The lower extent of the y-axis. By default it is set automatically.\n ymax : float, optional\n The upper extent of the y-axis. By default it is set automatically.\n xscale : string, optional\n The scaling of the x-axis of the plot. Default: \"log\"\n yscale : string, optional\n The scaling of the y-axis of the plot. Default: \"log\"\n label : string, optional\n The label of the spectrum. Default: None\n fontsize : int\n Font size for labels and axes. Default: 18\n fig : :class:`~matplotlib.figure.Figure`, optional\n A Figure instance to plot in. Default: None, one will be\n created if not provided.\n ax : :class:`~matplotlib.axes.Axes`, optional\n An Axes instance to plot in. Default: None, one will be\n created if not provided.\n plot_counts : boolean, optional\n If set to True, the counts instead of the count rate will\n be plotted. Default: False\n noerr : boolean, optional\n If True, the spectrum will be plotted without errorbars. \n Default: False\n plot_used : boolean, optional\n If set to True, only the bins which contain more than 0 \n counts will be plotted. Default: False\n\n Returns\n -------\n A tuple of the :class:`~matplotlib.figure.Figure` and the \n :class:`~matplotlib.axes.Axes` objects.\n \"\"\"\n import matplotlib.pyplot as plt\n from soxs.instrument import RedistributionMatrixFile\n f = fits.open(specfile)\n hdu = f[\"SPECTRUM\"]\n chantype = hdu.header[\"CHANTYPE\"]\n y = hdu.data[\"COUNTS\"].astype(\"float64\")\n if plot_energy:\n rmf = hdu.header.get(\"RESPFILE\", None)\n if rmf is not None:\n rmf = RedistributionMatrixFile(rmf)\n e = 0.5*(rmf.ebounds_data[\"E_MIN\"]+rmf.ebounds_data[\"E_MAX\"])\n if ebins is None:\n xmid = e\n xerr = 0.5*(rmf.ebounds_data[\"E_MAX\"]-rmf.ebounds_data[\"E_MIN\"])\n else:\n xmid = 0.5*(ebins[1:]+ebins[:-1])\n xerr = 0.5 * np.diff(ebins)\n y = np.histogram(e, ebins, weights=y)[0].astype(\"float64\")\n xlabel = \"Energy (keV)\"\n else:\n raise RuntimeError(\"Cannot find the RMF associated with this \"\n \"spectrum, so I cannot plot in energy!\")\n else:\n xmid = hdu.data[chantype]\n xerr = 0.5\n xlabel = f\"Channel ({chantype})\"\n dx = 2.0*xerr\n yerr = np.sqrt(y)\n if not plot_counts:\n y /= hdu.header[\"EXPOSURE\"]\n yerr /= hdu.header[\"EXPOSURE\"]\n if plot_energy:\n yunit = \"keV\"\n y /= dx\n yerr /= dx\n else:\n yunit = \"bin\"\n f.close()\n if fig is None:\n fig = plt.figure(figsize=(10, 10))\n if xscale is None:\n if ax is None:\n xscale = \"log\"\n else:\n xscale = ax.get_xscale()\n if yscale is None:\n if ax is None:\n yscale = \"log\"\n else:\n yscale = ax.get_yscale()\n if ax is None:\n ax = fig.add_subplot(111)\n if plot_used:\n used = y > 0\n xmid = xmid[used]\n y = y[used]\n xerr = xerr[used]\n yerr = yerr[used]\n if noerr:\n ax.plot(xmid, y, lw=lw, label=label, **kwargs)\n else:\n ax.errorbar(xmid, y, yerr=yerr, xerr=xerr, lw=lw, label=label, **kwargs)\n ax.set_xscale(xscale)\n ax.set_yscale(yscale)\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n ax.set_xlabel(xlabel, fontsize=fontsize)\n if plot_counts:\n ylabel = \"Counts (counts/{0})\"\n else:\n ylabel = \"Count Rate (counts/s/{0})\"\n ax.set_ylabel(ylabel.format(yunit), fontsize=fontsize)\n ax.tick_params(axis='both', labelsize=fontsize)\n return fig, ax\n\n\ndef plot_image(img_file, hdu=\"IMAGE\", stretch='linear', vmin=None, vmax=None,\n facecolor='black', center=None, width=None, figsize=(10, 10),\n cmap=None):\n \"\"\"\n Plot a FITS image created by SOXS using Matplotlib.\n\n Parameters\n ----------\n img_file : str\n The on-disk FITS image to plot. \n hdu : str or int, optional\n The image extension to plot. Default is \"IMAGE\"\n stretch : str, optional\n The stretch to apply to the colorbar scale. Options are \"linear\",\n \"log\", and \"sqrt\". Default: \"linear\"\n vmin : float, optional\n The minimum value of the colorbar. If not set, it will be the minimum\n value in the image.\n vmax : float, optional\n The maximum value of the colorbar. If not set, it will be the maximum\n value in the image.\n facecolor : str, optional\n The color of zero-valued pixels. Default: \"black\"\n center : array-like\n A 2-element object giving an (RA, Dec) coordinate for the center\n in degrees. If not set, the reference pixel of the image (usually\n the center) is used.\n width : float, optional\n The width of the image in degrees. If not set, the width of the\n entire image will be used.\n figsize : tuple, optional\n A 2-tuple giving the size of the image in inches, e.g. (12, 15).\n Default: (10,10)\n cmap : str, optional\n The colormap to be used. If not set, the default Matplotlib\n colormap will be used.\n\n Returns\n -------\n A tuple of the :class:`~matplotlib.figure.Figure` and the \n :class:`~matplotlib.axes.Axes` objects.\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import PowerNorm, LogNorm, Normalize\n from astropy.wcs.utils import proj_plane_pixel_scales\n from astropy.visualization.wcsaxes import WCSAxes\n if stretch == \"linear\":\n norm = Normalize(vmin=vmin, vmax=vmax)\n elif stretch == \"log\":\n norm = LogNorm(vmin=vmin, vmax=vmax)\n elif stretch == \"sqrt\":\n norm = PowerNorm(0.5, vmin=vmin, vmax=vmax)\n else:\n raise RuntimeError(f\"'{stretch}' is not a valid stretch!\")\n with fits.open(img_file) as f:\n hdu = f[hdu]\n w = wcs.WCS(hdu.header)\n pix_scale = proj_plane_pixel_scales(w)\n if center is None:\n center = w.wcs.crpix\n else:\n center = w.wcs_world2pix(center[0], center[1], 0)\n if width is None:\n dx_pix = 0.5*hdu.shape[0]\n dy_pix = 0.5*hdu.shape[1]\n else:\n dx_pix = width / pix_scale[0]\n dy_pix = width / pix_scale[1]\n fig = plt.figure(figsize=figsize)\n ax = WCSAxes(fig, [0.15, 0.1, 0.8, 0.8], wcs=w)\n fig.add_axes(ax)\n im = ax.imshow(hdu.data, norm=norm, cmap=cmap)\n ax.set_xlim(center[0] - 0.5*dx_pix, center[0] + 0.5*dx_pix)\n ax.set_ylim(center[1] - 0.5*dy_pix, center[1] + 0.5*dy_pix)\n ax.set_facecolor(facecolor)\n cbar = plt.colorbar(im)\n return fig, ax"
]
| [
[
"numpy.bincount",
"numpy.histogram",
"matplotlib.pyplot.colorbar",
"numpy.nan_to_num",
"numpy.logical_and",
"numpy.arange",
"numpy.sqrt",
"matplotlib.colors.PowerNorm",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.diff",
"scipy.ndimage.interpolation.rotate",
"matplotlib.colors.LogNorm",
"numpy.isinf",
"numpy.histogram2d",
"numpy.errstate",
"numpy.ones",
"matplotlib.colors.Normalize",
"numpy.abs",
"numpy.average",
"numpy.linspace"
]
]
|
kristijanbartol/SfMLearner | [
"ec3007d82a7d2205ec5e5ffb5fc99729d31faf88"
]
| [
"train.py"
]
| [
"from __future__ import division\nimport tensorflow as tf\nimport pprint\nimport random\nimport numpy as np\nfrom SfMLearner import SfMLearner\nimport os\n\nflags = tf.app.flags\nflags.DEFINE_string(\"dataset_dir\", \"\", \"Dataset directory\")\nflags.DEFINE_string(\"checkpoint_dir\", \"./checkpoints/\", \"Directory name to save the checkpoints\")\nflags.DEFINE_string(\"init_checkpoint_file\", None, \"Specific checkpoint file to initialize from\")\nflags.DEFINE_float(\"learning_rate\", 0.0002, \"Learning rate of for adam\")\nflags.DEFINE_float(\"beta1\", 0.9, \"Momentum term of adam\")\nflags.DEFINE_float(\"smooth_weight\", 0.5, \"Weight for smoothness\")\nflags.DEFINE_float(\"explain_reg_weight\", 0.0, \"Weight for explanability regularization\")\nflags.DEFINE_integer(\"batch_size\", 4, \"The size of of a sample batch\")\nflags.DEFINE_integer(\"img_height\", 128, \"Image height\")\nflags.DEFINE_integer(\"img_width\", 416, \"Image width\")\nflags.DEFINE_integer(\"seq_length\", 3, \"Sequence length for each example\")\nflags.DEFINE_integer(\"max_steps\", 200000, \"Maximum number of training iterations\")\nflags.DEFINE_integer(\"summary_freq\", 100, \"Logging every log_freq iterations\")\nflags.DEFINE_integer(\"save_latest_freq\", 5000, \\\n \"Save the latest model every save_latest_freq iterations (overwrites the previous latest model)\")\nflags.DEFINE_integer(\"num_source\", 2, \"Number of source images\")\nflags.DEFINE_integer(\"num_scales\", 4, \"Number of scaling points\")\nflags.DEFINE_boolean(\"continue_train\", False, \"Continue training from previous checkpoint\")\nFLAGS = flags.FLAGS\n\ndef main(_):\n seed = 8964\n tf.set_random_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n pp = pprint.PrettyPrinter()\n pp.pprint(flags.FLAGS.__flags)\n \n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n \n sfm = SfMLearner()\n sfm.train(FLAGS)\n\nif __name__ == '__main__':\n tf.app.run()\n"
]
| [
[
"tensorflow.set_random_seed",
"tensorflow.app.run",
"numpy.random.seed"
]
]
|
InnovArul/DIGITS | [
"c69d709e2eba01f2cf1a9556d7838933103a3b31"
]
| [
"tools/create_generic_db.py"
]
| [
"#!/usr/bin/env python2\n# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\n\nimport argparse\n# Find the best implementation available\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\nimport lmdb\nimport logging\nimport numpy as np\nimport os\nimport PIL.Image\nimport Queue\nimport sys\nimport threading\n\n# Add path for DIGITS package\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport digits.config\ndigits.config.load_config()\nfrom digits import extensions, log\nfrom digits.job import Job\n\n# Run load_config() first to set the path to Caffe\nimport caffe.io\nimport caffe_pb2\n\nlogger = logging.getLogger('digits.tools.create_dataset')\n\n\nclass DbWriter(threading.Thread):\n \"\"\"\n Abstract class for writing to databases\n \"\"\"\n\n def __init__(self, output_dir, total_records=None):\n self._dir = output_dir\n self.write_queue = Queue.Queue(10)\n # sequence number\n self.seqn = 0\n self.total_records = total_records\n self.done = False\n threading.Thread.__init__(self)\n\n def write_batch_threadsafe(self, batch):\n \"\"\"\n This function writes a batch of data into the database\n This may be called from multiple threads\n \"\"\"\n self.write_queue.put(batch)\n\n def set_done(self):\n \"\"\"\n Instructs writer thread to complete after queue becomes empty\n \"\"\"\n self.done = True\n\n def run(self):\n \"\"\"\n DB Writer thread entry point\n \"\"\"\n while True:\n try:\n batch = self.write_queue.get(timeout=0.1)\n except Queue.Empty:\n if self.done:\n # break out of main loop and terminate\n break\n else:\n # just keep looping\n continue\n self.write_batch_threadunsafe(batch)\n\n\nclass LmdbWriter(DbWriter):\n\n def __init__(self,\n dataset_dir,\n stage,\n feature_encoding,\n label_encoding,\n **kwargs):\n self.stage = stage\n db_dir = os.path.join(dataset_dir, stage)\n if not os.path.exists(db_dir):\n os.makedirs(db_dir)\n super(LmdbWriter, self).__init__(dataset_dir, **kwargs)\n\n # create LMDB for features\n self.feature_db = self.create_lmdb(\"features\")\n # will create LMDB for labels later if necessary\n self.label_db = None\n # encoding\n self.feature_encoding = feature_encoding\n self.label_encoding = label_encoding\n\n def create_lmdb(self, db_type):\n sub_dir = os.path.join(self.stage, db_type)\n db_dir = os.path.join(self._dir, sub_dir)\n db = lmdb.open(\n db_dir,\n map_async=True,\n max_dbs=0)\n logger.info('Created %s db for stage %s in %s' % (db_type,\n self.stage,\n sub_dir))\n return db\n\n def array_to_datum(self, data, scalar_label, encoding):\n if data.ndim != 3:\n raise ValueError('Invalid number of dimensions: %d' % data.ndim)\n if encoding == 'none':\n if data.shape[0] == 3:\n # RGB to BGR\n # XXX see issue #59\n data = data[[2, 1, 0], ...]\n datum = caffe.io.array_to_datum(data, scalar_label)\n else:\n # Transpose to (height, width, channel)\n data = data.transpose((1, 2, 0))\n datum = caffe_pb2.Datum()\n datum.height = data.shape[0]\n datum.width = data.shape[1]\n datum.channels = data.shape[2]\n datum.label = scalar_label\n if data.shape[2] == 1:\n # grayscale\n data = data[:, :, 0]\n s = StringIO()\n if encoding == 'png':\n PIL.Image.fromarray(data).save(s, format='PNG')\n elif encoding == 'jpg':\n PIL.Image.fromarray(data).save(s, format='JPEG', quality=90)\n else:\n raise ValueError('Invalid encoding type')\n datum.data = s.getvalue()\n datum.encoded = True\n return datum\n\n def write_batch(self, batch):\n \"\"\"\n encode data into datum objects\n this may be called from multiple encoder threads\n \"\"\"\n datums = []\n for (feature, label) in batch:\n # restrict features to 3D data (Caffe Datum objects)\n if feature.ndim != 3:\n raise ValueError(\"LMDB/Caffe expect 3D data - ndim=%d\" % feature.ndim)\n # restrict labels to 3D data (Caffe Datum objects) or scalars\n if not (label.ndim == 3 or label.size == 1):\n raise ValueError(\"LMDB/Caffe expect 3D or scalar label - ndim=%d\" % label.ndim)\n if label.size > 1:\n label_datum = self.array_to_datum(\n label,\n 0,\n self.label_encoding)\n # setting label to 0 - it will be unused as there is\n # a dedicated label DB\n label = 0\n else:\n label = label[0]\n label_datum = None\n feature_datum = self.array_to_datum(\n feature,\n label,\n self.feature_encoding)\n datums.append(\n (feature_datum.SerializeToString(),\n label_datum.SerializeToString() if label_datum else None))\n self.write_batch_threadsafe(datums)\n\n def write_batch_threadunsafe(self, batch):\n \"\"\"\n Write batch do DB, this must only be called from the writer thread\n \"\"\"\n feature_datums = []\n label_datums = []\n for (feature, label) in batch:\n key = \"%09d\" % self.seqn\n if label is not None:\n if self.label_db is None:\n self.label_db = self.create_lmdb(\"labels\")\n label_datums.append((key, label))\n feature_datums.append((key, feature))\n self.seqn += 1\n self.write_datums(self.feature_db, feature_datums)\n if len(label_datums) > 0:\n self.write_datums(self.label_db, label_datums)\n logger.info('Processed %d/%d' % (self.seqn, self.total_records))\n\n def write_datums(self, db, batch):\n try:\n with db.begin(write=True) as lmdb_txn:\n for key, datum in batch:\n lmdb_txn.put(key, datum)\n except lmdb.MapFullError:\n # double the map_size\n curr_limit = db.info()['map_size']\n new_limit = curr_limit*2\n logger.info(\n 'Doubling LMDB map size to %sMB ...' % (new_limit >> 20,))\n try:\n db.set_mapsize(new_limit) # double it\n except AttributeError as e:\n version = tuple(int(x) for x in lmdb.__version__.split('.'))\n if version < (0, 87):\n raise ValueError('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)\n else:\n raise e\n # try again\n self.write_datums(db, batch)\n\n\nclass Encoder(threading.Thread):\n def __init__(self, queue, writer, extension, error_queue):\n self.extension = extension\n self.queue = queue\n self.writer = writer\n self.label_shape = None\n self.feature_shape = None\n self.feature_sum = None\n self.processed_count = 0\n self.error_queue = error_queue\n threading.Thread.__init__(self)\n\n def run(self):\n data = []\n while True:\n # get entry ID\n # don't block- if the queue is empty then we're done\n try:\n batch = self.queue.get_nowait()\n except Queue.Empty:\n # break out of main loop and terminate\n break\n\n try:\n data = []\n for entry_id in batch:\n # call into extension to format entry into number arrays\n feature, label = self.extension.encode_entry(entry_id)\n\n # check feature and label shapes\n if self.feature_shape is None:\n self.feature_shape = feature.shape\n self.feature_sum = np.zeros(self.feature_shape, np.float64)\n elif self.feature_shape != feature.shape:\n raise ValueError(\"Feature shape mismatch (last:%s, previous:%s)\" % (repr(feature.shape), repr(self.feature_shape)))\n if self.label_shape is None:\n self.label_shape = label.shape\n elif self.label_shape != label.shape:\n raise ValueError(\"Label shape mismatch (last:%s, previous:%s)\" % (repr(label.shape), repr(self.label_shape)))\n\n # accumulate sum for mean file calculation\n self.feature_sum += feature\n\n # aggregate data\n data.append((feature, label))\n\n self.processed_count += 1\n\n if len(data) >= 0:\n # write data\n self.writer.write_batch(data)\n except Exception as e:\n self.error_queue.put('%s: %s' % (type(e).__name__, e.message))\n raise\n\nclass DbCreator(object):\n\n def create_db(self, extension, stage, dataset_dir, batch_size, num_threads, feature_encoding, label_encoding):\n # retrieve itemized list of entries\n entry_ids = extension.itemize_entries(stage)\n entry_count = len(entry_ids)\n\n if entry_count > 0:\n # create a queue to write errors to\n error_queue = Queue.Queue()\n\n # create db writer\n writer = LmdbWriter(\n dataset_dir,\n stage,\n total_records=entry_count,\n feature_encoding=feature_encoding,\n label_encoding=label_encoding)\n writer.daemon = True\n writer.start()\n\n # create and fill encoder queue\n encoder_queue = Queue.Queue()\n batch = []\n for entry_id in entry_ids:\n batch.append(entry_id)\n if len(batch) >= batch_size:\n # queue this batch\n encoder_queue.put(batch)\n batch = []\n if len(batch) > 0:\n # queue any remaining entries\n encoder_queue.put(batch)\n\n # create encoder threads\n encoders = []\n for _ in xrange(num_threads):\n encoder = Encoder(encoder_queue, writer, extension, error_queue)\n encoder.daemon = True\n encoder.start()\n encoders.append(encoder)\n\n # wait for all encoder threads to complete and aggregate data\n feature_sum = None\n processed_count = 0\n feature_shape = None\n label_shape = None\n for encoder in encoders:\n encoder.join()\n # catch errors that may have occurred in reader thread\n if not error_queue.empty():\n while not error_queue.empty():\n err = error_queue.get()\n logger.error(err)\n raise Exception(err)\n if feature_sum is None:\n feature_sum = encoder.feature_sum\n elif encoder.feature_sum is not None:\n feature_sum += encoder.feature_sum\n if feature_shape is None:\n feature_shape = encoder.feature_shape\n logger.info('Feature shape for stage %s: %s' % (stage, repr(feature_shape)))\n elif encoder.feature_shape is not None:\n if feature_shape != encoder.feature_shape:\n raise ValueError(\"Feature shape mismatch (last:%s, previous:%s)\" % (repr(feature_shape), repr(encoder.feature_shape)))\n if label_shape is None:\n label_shape = encoder.label_shape\n logger.info('Label shape for stage %s: %s' % (stage, repr(label_shape)))\n elif encoder.label_shape is not None:\n if label_shape != encoder.label_shape:\n raise ValueError(\"Label shape mismatch (last:%s, previous:%s)\" % (repr(label_shape), repr(encoder.label_shape)))\n processed_count += encoder.processed_count\n\n # write mean file\n if feature_sum is not None:\n self.save_mean(feature_sum, processed_count, dataset_dir, stage)\n\n # wait for writer thread to complete\n writer.set_done()\n writer.join()\n\n if processed_count != entry_count:\n # TODO: handle this more gracefully\n raise ValueError('Number of processed entries (%d) does not match entry count (%d)' % (processed_count, entry_count))\n\n logger.info('Found %d entries for stage %s' % (processed_count, stage))\n\n\n def save_mean(self, feature_sum, entry_count, dataset_dir, stage):\n \"\"\"\n Save mean to file\n \"\"\"\n data = np.around(feature_sum / entry_count).astype(np.uint8)\n mean_file = os.path.join(stage, 'mean.binaryproto')\n # Transform to caffe's format requirements\n if data.ndim == 3:\n if data.shape[0] == 3:\n # channel swap\n # XXX see issue #59\n data = data[[2, 1, 0], ...]\n elif data.ndim == 2:\n # Add a channels axis\n data = data[np.newaxis, :, :]\n\n blob = caffe_pb2.BlobProto()\n blob.num = 1\n blob.channels, blob.height, blob.width = data.shape\n blob.data.extend(data.astype(float).flat)\n\n with open(os.path.join(dataset_dir, mean_file), 'wb') as outfile:\n outfile.write(blob.SerializeToString())\n\n logger.info('Created mean file for stage %s in %s' % (stage, mean_file))\n\n\ndef create_generic_db(jobs_dir, dataset_id, stage):\n \"\"\"\n Create a generic DB\n \"\"\"\n\n # job directory defaults to that defined in DIGITS config\n if jobs_dir == 'none':\n jobs_dir = digits.config.config_value('jobs_dir')\n\n # load dataset job\n dataset_dir = os.path.join(jobs_dir, dataset_id)\n if not os.path.isdir(dataset_dir):\n raise IOError(\"Dataset dir %s does not exist\" % dataset_dir)\n dataset = Job.load(dataset_dir)\n\n # create instance of extension\n extension_id = dataset.extension_id\n extension_class = extensions.data.get_extension(extension_id)\n extension = extension_class(**dataset.extension_userdata)\n\n # encoding\n feature_encoding = dataset.feature_encoding\n label_encoding = dataset.label_encoding\n\n batch_size = dataset.batch_size\n num_threads = dataset.num_threads\n\n # create main DB creator object and execute main method\n db_creator = DbCreator()\n db_creator.create_db(\n extension,\n stage,\n dataset_dir,\n batch_size,\n num_threads,\n feature_encoding,\n label_encoding)\n\n logger.info('Generic DB creation Done')\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='DB creation tool - DIGITS')\n\n ### Positional arguments\n\n parser.add_argument(\n 'dataset',\n help='Dataset Job ID')\n\n ### Optional arguments\n parser.add_argument(\n '-j',\n '--jobs_dir',\n default='none',\n help='Jobs directory (default: from DIGITS config)',\n )\n\n parser.add_argument(\n '-s',\n '--stage',\n default='train',\n help='Stage (train, val, test)',\n )\n\n args = vars(parser.parse_args())\n\n try:\n create_generic_db(\n args['jobs_dir'],\n args['dataset'],\n args['stage']\n )\n except Exception as e:\n logger.error('%s: %s' % (type(e).__name__, e.message))\n raise\n"
]
| [
[
"numpy.around",
"numpy.zeros"
]
]
|
jesnyder/bioMaking | [
"b38d794de255db40634767d77cad8da26787ba34"
]
| [
"code/python/c0002_html_table.py"
]
| [
"\r\nimport os\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom prettytable import PrettyTable\r\n\r\ndef html_table():\r\n\r\n patent_path = os.path.join('searchResults', 'patents')\r\n patent_files = os.listdir(patent_path)\r\n\r\n for file in patent_files:\r\n\r\n if file == patent_files[-1]:\r\n\r\n patent_file = os.path.join(patent_path, file)\r\n print(patent_file)\r\n df = pd.read_csv(patent_file)\r\n del df['Unnamed: 0']\r\n del df['description']\r\n del df['claims']\r\n del df['abstract']\r\n del df['inventors']\r\n # del df['url']\r\n\r\n html_code = df.to_html(render_links=True)\r\n\r\n\r\n \"\"\"\r\n\r\n x = PrettyTable()\r\n\r\n read_file = open(patent_file, 'r')\r\n for line in read_file:\r\n line_split = line.split(',')\r\n # print(line)\r\n\r\n x.add_row([line_split[0], line_split[1], line_split[3]])\r\n\r\n html_code = x.get_html_string()\r\n \"\"\"\r\n\r\n html_path = os.path.join('code')\r\n if not os.path.isdir(html_path): os.mkdir(html_path)\r\n html_path = os.path.join('code', 'html')\r\n if not os.path.isdir(html_path): os.mkdir(html_path)\r\n html_file = os.path.join(html_path, 'table_' + 'patents' + '.html')\r\n\r\n\r\n\r\n with open(html_file, 'w') as myFile:\r\n myFile.write('<html>')\r\n myFile.write('<body>')\r\n myFile.write('<table>')\r\n\r\n for line in html_code:\r\n myFile.write(line)\r\n # myFile.write('/n')\r\n\r\n myFile.write('</tr>')\r\n myFile.write('</table>')\r\n myFile.write('</body>')\r\n myFile.write('</html>')\r\n\r\n myFile.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"\r\n df.to_csv(patent_file)\r\n print('patentsRetrieved saved: ' + patent_file)\r\n\r\n ref_path = os.path.join( 'metadata')\r\n ref_file = os.path.join(ref_path, 'ref.csv')\r\n df = pd.read_csv(ref_file)\r\n\r\n variableNames = list(df['name'])\r\n variableValues = list(df['value'])\r\n\r\n value = 0\r\n for i in range(len(variableNames)):\r\n if variableName == variableNames[i]:\r\n value = variableValues[i]\r\n break\r\n\r\n # print('value = ' + str(value))\r\n return value\r\n \"\"\"\r\n"
]
| [
[
"pandas.read_csv"
]
]
|
nlpyang/NoisySumm | [
"8d5f71531f871841b9a088f8ade1c8f80fb437fc"
]
| [
"src/decode_seq2seq.py"
]
| [
"\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport json\nimport logging\nimport argparse\nimport math\nfrom tqdm import tqdm, trange\nimport numpy as np\nimport torch\nimport random\nimport pickle\n\nfrom s2s_ft.modeling_decoding import BertForSeq2SeqDecoder, BertConfig\nfrom transformers.tokenization_bert import whitespace_tokenize\nimport s2s_ft.s2s_loader as seq2seq_loader\nfrom s2s_ft.utils import load_and_cache_examples\nfrom transformers import \\\n BertTokenizer, RobertaTokenizer\nfrom s2s_ft.tokenization_unilm import UnilmTokenizer\nfrom s2s_ft.tokenization_minilm import MinilmTokenizer\n\nTOKENIZER_CLASSES = {\n 'bert': BertTokenizer,\n 'minilm': MinilmTokenizer,\n 'roberta': RobertaTokenizer,\n 'unilm': UnilmTokenizer,\n}\n\nclass WhitespaceTokenizer(object):\n def tokenize(self, text):\n return whitespace_tokenize(text)\n\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef detokenize(tk_list):\n r_list = []\n for tk in tk_list:\n if tk.startswith('##') and len(r_list) > 0:\n r_list[-1] = r_list[-1] + tk[2:]\n else:\n r_list.append(tk)\n return r_list\n\n\ndef ascii_print(text):\n text = text.encode(\"ascii\", \"ignore\")\n print(text)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type selected in the list: \" + \", \".join(TOKENIZER_CLASSES.keys()))\n parser.add_argument(\"--model_path\", default=None, type=str, required=True,\n help=\"Path to the model checkpoint.\")\n parser.add_argument(\"--config_path\", default=None, type=str,\n help=\"Path to config.json for the model.\")\n\n # tokenizer_name\n parser.add_argument(\"--tokenizer_name\", default=None, type=str, required=True, \n help=\"tokenizer name\")\n parser.add_argument(\"--max_seq_length\", default=512, type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n\n # decoding parameters\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--amp', action='store_true',\n help=\"Whether to use amp for fp16\")\n parser.add_argument(\"--input_file\", type=str, help=\"Input file\")\n parser.add_argument('--subset', type=int, default=0,\n help=\"Decode a subset of the input dataset.\")\n parser.add_argument(\"--output_file\", type=str, help=\"output file\")\n parser.add_argument(\"--split\", type=str, default=\"\",\n help=\"Data split (train/val/test).\")\n parser.add_argument('--tokenized_input', action='store_true',\n help=\"Whether the input is tokenized.\")\n parser.add_argument('--seed', type=int, default=123,\n help=\"random seed for initialization\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument('--batch_size', type=int, default=4,\n help=\"Batch size for decoding.\")\n parser.add_argument('--beam_size', type=int, default=1,\n help=\"Beam size for searching\")\n parser.add_argument('--length_penalty', type=float, default=0,\n help=\"Length penalty for beam search\")\n\n parser.add_argument('--forbid_duplicate_ngrams', action='store_true')\n parser.add_argument('--forbid_ignore_word', type=str, default=None,\n help=\"Forbid the word during forbid_duplicate_ngrams\")\n parser.add_argument(\"--min_len\", default=1, type=int)\n parser.add_argument('--need_score_traces', action='store_true')\n parser.add_argument('--ngram_size', type=int, default=3)\n parser.add_argument('--mode', default=\"s2s\",\n choices=[\"s2s\", \"l2r\", \"both\"])\n parser.add_argument('--max_tgt_length', type=int, default=128,\n help=\"maximum length of target sequence\")\n parser.add_argument('--s2s_special_token', action='store_true',\n help=\"New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.\")\n parser.add_argument('--s2s_add_segment', action='store_true',\n help=\"Additional segmental for the encoder of S2S.\")\n parser.add_argument('--s2s_share_segment', action='store_true',\n help=\"Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).\")\n parser.add_argument('--pos_shift', action='store_true',\n help=\"Using position shift for fine-tuning.\")\n parser.add_argument(\"--cache_dir\", default=None, type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n\n args = parser.parse_args()\n\n if args.need_score_traces and args.beam_size <= 1:\n raise ValueError(\n \"Score trace is only available for beam search with beam size > 1.\")\n if args.max_tgt_length >= args.max_seq_length - 2:\n raise ValueError(\"Maximum tgt length exceeds max seq length - 2.\")\n\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n\n if args.seed > 0:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n else:\n random_seed = random.randint(0, 10000)\n logger.info(\"Set random seed as: {}\".format(random_seed))\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n \n tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained(\n args.tokenizer_name, do_lower_case=args.do_lower_case, \n cache_dir=args.cache_dir if args.cache_dir else None, max_len=args.max_seq_length)\n\n if args.model_type == \"roberta\":\n vocab = tokenizer.encoder\n else:\n vocab = tokenizer.vocab\n\n\n config_file = args.config_path if args.config_path else os.path.join(args.model_path, \"config.json\")\n logger.info(\"Read decoding config from: %s\" % config_file)\n config = BertConfig.from_json_file(config_file)\n\n bi_uni_pipeline = []\n bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder(\n list(vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length,\n max_tgt_length=args.max_tgt_length, pos_shift=args.pos_shift,\n source_type_id=config.source_type_id, target_type_id=config.target_type_id, \n cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, pad_token=tokenizer.pad_token))\n\n mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids(\n [tokenizer.mask_token, tokenizer.sep_token, tokenizer.sep_token])\n forbid_ignore_set = None\n if args.forbid_ignore_word:\n w_list = []\n for w in args.forbid_ignore_word.split('|'):\n if w.startswith('[') and w.endswith(']'):\n w_list.append(w.upper())\n else:\n w_list.append(w)\n forbid_ignore_set = set(tokenizer.convert_tokens_to_ids(w_list))\n print(args.model_path)\n found_checkpoint_flag = False\n for model_recover_path in [args.model_path.strip()]:\n logger.info(\"***** Recover model: %s *****\", model_recover_path)\n found_checkpoint_flag = True\n model = BertForSeq2SeqDecoder.from_pretrained(\n model_recover_path, config=config, mask_word_id=mask_word_id, search_beam_size=args.beam_size,\n length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id,\n forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set,\n ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode,\n max_position_embeddings=args.max_seq_length, pos_shift=args.pos_shift, \n )\n\n if args.fp16:\n model.half()\n model.to(device)\n if n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n torch.cuda.empty_cache()\n model.eval()\n next_i = 0\n max_src_length = args.max_seq_length - 2 - args.max_tgt_length\n\n to_pred = load_and_cache_examples(\n args.input_file, tokenizer, local_rank=-1, \n cached_features_file=None, shuffle=False)\n\n input_lines = []\n for line in to_pred:\n _line = tokenizer.convert_ids_to_tokens(line[\"source_ids\"])[:max_src_length]\n\n input_lines.append(_line)\n if args.subset > 0:\n logger.info(\"Decoding subset: %d\", args.subset)\n input_lines = input_lines[:args.subset]\n\n input_lines = sorted(list(enumerate(input_lines)),\n key=lambda x: -len(x[1]))\n output_lines = [\"\"] * len(input_lines)\n score_trace_list = [None] * len(input_lines)\n total_batch = math.ceil(len(input_lines) / args.batch_size)\n\n with tqdm(total=total_batch) as pbar:\n batch_count = 0\n first_batch = True\n while next_i < len(input_lines):\n _chunk = input_lines[next_i:next_i + args.batch_size]\n buf_id = [x[0] for x in _chunk]\n buf = [x[1] for x in _chunk]\n next_i += args.batch_size\n batch_count += 1\n max_a_len = max([len(x) for x in buf])\n instances = []\n for instance in [(x, max_a_len) for x in buf]:\n for proc in bi_uni_pipeline:\n instances.append(proc(instance))\n with torch.no_grad():\n batch = seq2seq_loader.batch_list_to_batch_tensors(\n instances)\n batch = [\n t.to(device) if t is not None else None for t in batch]\n input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx = batch\n traces = model(input_ids, token_type_ids,\n position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv)\n if args.beam_size > 1:\n traces = {k: v.tolist() for k, v in traces.items()}\n output_ids = traces['pred_seq']\n else:\n output_ids = traces.tolist()\n for i in range(len(buf)):\n w_ids = output_ids[i]\n output_buf = tokenizer.convert_ids_to_tokens(w_ids)\n output_tokens = []\n for t in output_buf:\n if t in (tokenizer.sep_token, tokenizer.pad_token):\n break\n output_tokens.append(t)\n if args.model_type == \"roberta\":\n output_sequence = tokenizer.convert_tokens_to_string(output_tokens)\n else:\n output_sequence = ' '.join(detokenize(output_tokens))\n if '\\n' in output_sequence:\n output_sequence = \" [X_SEP] \".join(output_sequence.split('\\n'))\n output_lines[buf_id[i]] = output_sequence\n if first_batch or batch_count % 50 == 0:\n logger.info(\"{} = {}\".format(buf_id[i], output_sequence))\n if args.need_score_traces:\n score_trace_list[buf_id[i]] = {\n 'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]}\n pbar.update(1)\n first_batch = False\n if args.output_file:\n fn_out = args.output_file\n else:\n fn_out = model_recover_path+'.'+args.split\n with open(fn_out, \"w\", encoding=\"utf-8\") as fout:\n for l in output_lines:\n fout.write(l)\n fout.write(\"\\n\")\n\n if args.need_score_traces:\n with open(fn_out + \".trace.pickle\", \"wb\") as fout_trace:\n pickle.dump(\n {\"version\": 0.0, \"num_samples\": len(input_lines)}, fout_trace)\n for x in score_trace_list:\n pickle.dump(x, fout_trace)\n\n if not found_checkpoint_flag:\n logger.info(\"Not found the model checkpoint file!\")\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.no_grad",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
]
|
caspervdw/Shapely | [
"a0f8a31a18d5384f0a04ee8e8b2d089b37c186cc"
]
| [
"shapely/io.py"
]
| [
"import numpy as np\n\nfrom . import Geometry # noqa\nfrom . import lib\nfrom .decorators import requires_geos\nfrom .enum import ParamEnum\n\n\n__all__ = [\n \"from_geojson\",\n \"from_wkb\",\n \"from_wkt\",\n \"to_geojson\",\n \"to_wkb\",\n \"to_wkt\",\n]\n\n\n# Allowed options for handling WKB/WKT decoding errors\n# Note: cannot use standard constructor since \"raise\" is a keyword\nDecodingErrorOptions = ParamEnum(\n \"DecodingErrorOptions\", {\"ignore\": 0, \"warn\": 1, \"raise\": 2}\n)\n\n\ndef to_wkt(\n geometry,\n rounding_precision=6,\n trim=True,\n output_dimension=3,\n old_3d=False,\n **kwargs,\n):\n \"\"\"\n Converts to the Well-Known Text (WKT) representation of a Geometry.\n\n The Well-known Text format is defined in the `OGC Simple Features\n Specification for SQL <https://www.opengeospatial.org/standards/sfs>`__.\n\n The following limitations apply to WKT serialization:\n\n - for GEOS <= 3.8 a multipoint with an empty sub-geometry will raise an exception\n - for GEOS <= 3.8 empty geometries are always serialized to 2D\n - for GEOS >= 3.9 only simple empty geometries can be 3D, collections are still\n always 2D\n\n Parameters\n ----------\n geometry : Geometry or array_like\n rounding_precision : int, default 6\n The rounding precision when writing the WKT string. Set to a value of\n -1 to indicate the full precision.\n trim : bool, default True\n If True, trim unnecessary decimals (trailing zeros).\n output_dimension : int, default 3\n The output dimension for the WKT string. Supported values are 2 and 3.\n Specifying 3 means that up to 3 dimensions will be written but 2D\n geometries will still be represented as 2D in the WKT string.\n old_3d : bool, default False\n Enable old style 3D/4D WKT generation. By default, new style 3D/4D WKT\n (ie. \"POINT Z (10 20 30)\") is returned, but with ``old_3d=True``\n the WKT will be formatted in the style \"POINT (10 20 30)\".\n **kwargs\n For other keyword-only arguments, see the\n `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.\n\n Examples\n --------\n >>> to_wkt(Geometry(\"POINT (0 0)\"))\n 'POINT (0 0)'\n >>> to_wkt(Geometry(\"POINT (0 0)\"), rounding_precision=3, trim=False)\n 'POINT (0.000 0.000)'\n >>> to_wkt(Geometry(\"POINT (0 0)\"), rounding_precision=-1, trim=False)\n 'POINT (0.0000000000000000 0.0000000000000000)'\n >>> to_wkt(Geometry(\"POINT (1 2 3)\"), trim=True)\n 'POINT Z (1 2 3)'\n >>> to_wkt(Geometry(\"POINT (1 2 3)\"), trim=True, output_dimension=2)\n 'POINT (1 2)'\n >>> to_wkt(Geometry(\"POINT (1 2 3)\"), trim=True, old_3d=True)\n 'POINT (1 2 3)'\n\n Notes\n -----\n The defaults differ from the default of the GEOS library. To mimic this,\n use::\n\n to_wkt(geometry, rounding_precision=-1, trim=False, output_dimension=2)\n\n \"\"\"\n if not np.isscalar(rounding_precision):\n raise TypeError(\"rounding_precision only accepts scalar values\")\n if not np.isscalar(trim):\n raise TypeError(\"trim only accepts scalar values\")\n if not np.isscalar(output_dimension):\n raise TypeError(\"output_dimension only accepts scalar values\")\n if not np.isscalar(old_3d):\n raise TypeError(\"old_3d only accepts scalar values\")\n\n return lib.to_wkt(\n geometry,\n np.intc(rounding_precision),\n np.bool_(trim),\n np.intc(output_dimension),\n np.bool_(old_3d),\n **kwargs,\n )\n\n\ndef to_wkb(\n geometry, hex=False, output_dimension=3, byte_order=-1, include_srid=False, **kwargs\n):\n r\"\"\"\n Converts to the Well-Known Binary (WKB) representation of a Geometry.\n\n The Well-Known Binary format is defined in the `OGC Simple Features\n Specification for SQL <https://www.opengeospatial.org/standards/sfs>`__.\n\n The following limitations apply to WKB serialization:\n\n - linearrings will be converted to linestrings\n - a point with only NaN coordinates is converted to an empty point\n - for GEOS <= 3.7, empty points are always serialized to 3D if\n output_dimension=3, and to 2D if output_dimension=2\n - for GEOS == 3.8, empty points are always serialized to 2D\n\n Parameters\n ----------\n geometry : Geometry or array_like\n hex : bool, default False\n If true, export the WKB as a hexidecimal string. The default is to\n return a binary bytes object.\n output_dimension : int, default 3\n The output dimension for the WKB. Supported values are 2 and 3.\n Specifying 3 means that up to 3 dimensions will be written but 2D\n geometries will still be represented as 2D in the WKB represenation.\n byte_order : int, default -1\n Defaults to native machine byte order (-1). Use 0 to force big endian\n and 1 for little endian.\n include_srid : bool, default False\n If True, the SRID is be included in WKB (this is an extension\n to the OGC WKB specification).\n **kwargs\n For other keyword-only arguments, see the\n `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.\n\n Examples\n --------\n >>> to_wkb(Geometry(\"POINT (1 1)\"), byte_order=1)\n b'\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf0?\\x00\\x00\\x00\\x00\\x00\\x00\\xf0?'\n >>> to_wkb(Geometry(\"POINT (1 1)\"), hex=True, byte_order=1)\n '0101000000000000000000F03F000000000000F03F'\n \"\"\"\n if not np.isscalar(hex):\n raise TypeError(\"hex only accepts scalar values\")\n if not np.isscalar(output_dimension):\n raise TypeError(\"output_dimension only accepts scalar values\")\n if not np.isscalar(byte_order):\n raise TypeError(\"byte_order only accepts scalar values\")\n if not np.isscalar(include_srid):\n raise TypeError(\"include_srid only accepts scalar values\")\n\n return lib.to_wkb(\n geometry,\n np.bool_(hex),\n np.intc(output_dimension),\n np.intc(byte_order),\n np.bool_(include_srid),\n **kwargs,\n )\n\n\n@requires_geos(\"3.10.0\")\ndef to_geojson(geometry, indent=None, **kwargs):\n \"\"\"Converts to the GeoJSON representation of a Geometry.\n\n The GeoJSON format is defined in the `RFC 7946 <https://geojson.org/>`__.\n NaN (not-a-number) coordinates will be written as 'null'.\n\n The following are currently unsupported:\n\n - Geometries of type LINEARRING: these are output as 'null'.\n - Three-dimensional geometries: the third dimension is ignored.\n\n Parameters\n ----------\n geometry : str, bytes or array_like\n indent : int, optional\n If indent is a non-negative integer, then GeoJSON will be formatted.\n An indent level of 0 will only insert newlines. None (the default)\n selects the most compact representation.\n **kwargs\n For other keyword-only arguments, see the\n `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.\n\n Examples\n --------\n >>> to_geojson(Geometry(\"POINT (1 1)\"))\n '{\"type\":\"Point\",\"coordinates\":[1.0,1.0]}'\n >>> print(to_geojson(Geometry(\"POINT (1 1)\"), indent=2))\n {\n \"type\": \"Point\",\n \"coordinates\": [\n 1.0,\n 1.0\n ]\n }\n \"\"\"\n # GEOS Tickets:\n # - handle linearrings: https://trac.osgeo.org/geos/ticket/1140\n # - support 3D: https://trac.osgeo.org/geos/ticket/1141\n if indent is None:\n indent = -1\n elif not np.isscalar(indent):\n raise TypeError(\"indent only accepts scalar values\")\n elif indent < 0:\n raise ValueError(\"indent cannot be negative\")\n\n return lib.to_geojson(geometry, np.intc(indent), **kwargs)\n\n\ndef from_wkt(geometry, on_invalid=\"raise\", **kwargs):\n \"\"\"\n Creates geometries from the Well-Known Text (WKT) representation.\n\n The Well-known Text format is defined in the `OGC Simple Features\n Specification for SQL <https://www.opengeospatial.org/standards/sfs>`__.\n\n Parameters\n ----------\n geometry : str or array_like\n The WKT string(s) to convert.\n on_invalid : {\"raise\", \"warn\", \"ignore\"}, default \"raise\"\n - raise: an exception will be raised if WKT input geometries are invalid.\n - warn: a warning will be raised and invalid WKT geometries will be\n returned as ``None``.\n - ignore: invalid WKT geometries will be returned as ``None`` without a warning.\n **kwargs\n For other keyword-only arguments, see the\n `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.\n\n Examples\n --------\n >>> from_wkt('POINT (0 0)')\n <pygeos.Geometry POINT (0 0)>\n \"\"\"\n if not np.isscalar(on_invalid):\n raise TypeError(\"on_invalid only accepts scalar values\")\n\n invalid_handler = np.uint8(DecodingErrorOptions.get_value(on_invalid))\n\n return lib.from_wkt(geometry, invalid_handler, **kwargs)\n\n\ndef from_wkb(geometry, on_invalid=\"raise\", **kwargs):\n r\"\"\"\n Creates geometries from the Well-Known Binary (WKB) representation.\n\n The Well-Known Binary format is defined in the `OGC Simple Features\n Specification for SQL <https://www.opengeospatial.org/standards/sfs>`__.\n\n\n Parameters\n ----------\n geometry : str or array_like\n The WKB byte object(s) to convert.\n on_invalid : {\"raise\", \"warn\", \"ignore\"}, default \"raise\"\n - raise: an exception will be raised if a WKB input geometry is invalid.\n - warn: a warning will be raised and invalid WKB geometries will be\n returned as ``None``.\n - ignore: invalid WKB geometries will be returned as ``None`` without a warning.\n **kwargs\n For other keyword-only arguments, see the\n `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.\n\n Examples\n --------\n >>> from_wkb(b'\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf0?\\x00\\x00\\x00\\x00\\x00\\x00\\xf0?')\n <pygeos.Geometry POINT (1 1)>\n \"\"\"\n\n if not np.isscalar(on_invalid):\n raise TypeError(\"on_invalid only accepts scalar values\")\n\n invalid_handler = np.uint8(DecodingErrorOptions.get_value(on_invalid))\n\n # ensure the input has object dtype, to avoid numpy inferring it as a\n # fixed-length string dtype (which removes trailing null bytes upon access\n # of array elements)\n geometry = np.asarray(geometry, dtype=object)\n return lib.from_wkb(geometry, invalid_handler, **kwargs)\n\n\n@requires_geos(\"3.10.1\")\ndef from_geojson(geometry, on_invalid=\"raise\", **kwargs):\n \"\"\"Creates geometries from GeoJSON representations (strings).\n\n If a GeoJSON is a FeatureCollection, it is read as a single geometry\n (with type GEOMETRYCOLLECTION). This may be unpacked using the ``pygeos.get_parts``.\n Properties are not read.\n\n The GeoJSON format is defined in `RFC 7946 <https://geojson.org/>`__.\n\n The following are currently unsupported:\n\n - Three-dimensional geometries: the third dimension is ignored.\n - Geometries having 'null' in the coordinates.\n\n Parameters\n ----------\n geometry : str, bytes or array_like\n The GeoJSON string or byte object(s) to convert.\n on_invalid : {\"raise\", \"warn\", \"ignore\"}, default \"raise\"\n - raise: an exception will be raised if an input GeoJSON is invalid.\n - warn: a warning will be raised and invalid input geometries will be\n returned as ``None``.\n - ignore: invalid input geometries will be returned as ``None`` without a warning.\n **kwargs\n For other keyword-only arguments, see the\n `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.\n\n See also\n --------\n get_parts\n\n Examples\n --------\n >>> from_geojson('{\"type\": \"Point\",\"coordinates\": [1, 2]}')\n <pygeos.Geometry POINT (1 2)>\n \"\"\"\n # GEOS Tickets:\n # - support 3D: https://trac.osgeo.org/geos/ticket/1141\n # - handle null coordinates: https://trac.osgeo.org/geos/ticket/1142\n if not np.isscalar(on_invalid):\n raise TypeError(\"on_invalid only accepts scalar values\")\n\n invalid_handler = np.uint8(DecodingErrorOptions.get_value(on_invalid))\n\n # ensure the input has object dtype, to avoid numpy inferring it as a\n # fixed-length string dtype (which removes trailing null bytes upon access\n # of array elements)\n geometry = np.asarray(geometry, dtype=object)\n\n return lib.from_geojson(geometry, invalid_handler, **kwargs)\n"
]
| [
[
"numpy.bool_",
"numpy.isscalar",
"numpy.asarray",
"numpy.intc"
]
]
|
nickrodd/GalDM | [
"6ea9ed3882ecf4235fb4a773f8370858ad9ebf1e"
]
| [
"GalDM/create_mask_forJD.py"
]
| [
"###############################################################################\n# create_mask_forJD.py\n###############################################################################\n#\n# Creates a Boolean mask where pixels labelled as true are masked and those\n# labelled false are unmasked.\n#\n# Note throughout we adjust from b to theta = 90-b, as this is what healpy\n# uses.\n#\n# Note also all inputs are in degrees.\n#\n# NB: this module fundamentally assumes that the analysis is being performed\n# on a spherical region pixelised using HEALPix. If this is not the case, the\n# mask must be computed differently.\n#\n# Example of how to use this can be found here:\n# https://github.com/bsafdi/NPTFit/blob/master/examples/Example2_Creating_Masks.ipynb\n#\n###############################################################################\n\nimport numpy as np\nimport healpy as hp\n\n\ndef make_plane_mask(band_mask_range, nside):\n \"\"\" Masks within |b| < band_mask_range\n \"\"\"\n mask_none = np.arange(hp.nside2npix(nside))\n return (np.radians(90-band_mask_range) < hp.pix2ang(nside, mask_none)[0]) * \\\n (hp.pix2ang(nside, mask_none)[0] < np.radians(90+band_mask_range))\n\n\ndef make_long_mask(l_deg_min, l_deg_max, nside):\n \"\"\" Masks outside l_deg_min < l < l_deg_max\n \"\"\"\n mask_none = np.arange(hp.nside2npix(nside))\n return (np.radians(l_deg_max) < hp.pix2ang(nside, mask_none)[1]) * \\\n (hp.pix2ang(nside, mask_none)[1] < np.radians(360 + l_deg_min))\n\n\ndef make_lat_mask(b_deg_min, b_deg_max, nside):\n \"\"\" Masks outside b_deg_min < b < b_deg_max\n \"\"\"\n mask_none = np.arange(hp.nside2npix(nside))\n return np.logical_not(\n (np.radians(90-b_deg_max) < hp.pix2ang(nside, mask_none)[0]) *\n (hp.pix2ang(nside, mask_none)[0] < np.radians(90-b_deg_min)))\n\n\ndef make_ring_mask(inner, outer, ring_b, ring_l, nside):\n \"\"\" Masks outside inner < r < outer, of a ring centred at (ring_b,ring_l)\n \"\"\"\n mask_none = np.arange(hp.nside2npix(nside))\n return np.logical_not(\n (np.cos(np.radians(inner)) >=\n np.dot(hp.ang2vec(np.radians(90-ring_b),\n np.radians(ring_l)), hp.pix2vec(nside, mask_none))) *\n (np.dot(hp.ang2vec(np.radians(90-ring_b),\n np.radians(ring_l)), hp.pix2vec(nside, mask_none)) >=\n np.cos(np.radians(outer))))\n\n\ndef make_mask_total(nside=128,\n band_mask=False, band_mask_range=30,\n l_mask=False, l_deg_min=-30, l_deg_max=30,\n b_mask=False, b_deg_min=-30, b_deg_max=30,\n mask_ring=False, inner=0, outer=30,\n ring_b=0, ring_l=0,\n custom_mask=None):\n \"\"\" Combines band, l, b, ring, and custom masks into a single mask\n \"\"\"\n\n # Initialise an array where no pixels are masked\n mask_array = np.zeros(nside**2*12, dtype=bool)\n\n # Add masks depending on input\n if band_mask:\n mask_array += make_plane_mask(band_mask_range, nside)\n\n if l_mask:\n mask_array += make_long_mask(l_deg_min, l_deg_max, nside)\n\n if b_mask:\n mask_array += make_lat_mask(b_deg_min, b_deg_max, nside)\n\n if mask_ring:\n mask_array += make_ring_mask(inner, outer, ring_b, ring_l, nside)\n\n if custom_mask is not None:\n mask_array += custom_mask\n\n return np.where(mask_array == False)[0]\n"
]
| [
[
"numpy.where",
"numpy.radians",
"numpy.zeros"
]
]
|
Patoaventura/Datos-COVID19 | [
"5fce35aaab8bbe84d5e55293373dfe6634cd96bb"
]
| [
"src/reporteDiario.py"
]
| [
"'''\nMIT License\n\nCopyright (c) 2020 Sebastian Cornejo\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\n\"\"\"\nLos productos que salen del reporte diario son:\n7\n8\n9\n10\n12\n17\n20\n23\n24\n\"\"\"\n\nimport pandas as pd\nimport utils\nfrom shutil import copyfile\n\n\ndef prod7_8(fte, producto):\n df = pd.read_csv(fte, dtype={'Codigo region': object})\n utils.regionName(df)\n df_t = df.T\n df.to_csv(producto + '.csv', index=False)\n df_t.to_csv(producto + '_T.csv', header=False)\n\n\ndef prod9_10_20_23_24(fte, producto):\n copyfile(fte, producto + '.csv')\n HospitalizadosUCIEtario_T = utils.transpone_csv(producto + '.csv')\n HospitalizadosUCIEtario_T.to_csv(producto + '_T.csv', header=False)\n\n\nif __name__ == '__main__':\n print('Generando producto 7')\n prod7_8('../input/ReporteDiario/PCR.csv', '../output/producto7/PCR')\n\n print('Generando producto 8')\n prod7_8('../input/ReporteDiario//UCI.csv', '../output/producto8/UCI')\n\n print('Generando producto 9')\n prod9_10_20_23_24('../input/ReporteDiario/HospitalizadosUCIEtario.csv', '../output/producto9/HospitalizadosUCIEtario')\n\n print('Generando producto 10')\n prod9_10_20_23_24('../input/ReporteDiario/FallecidosEtario.csv', '../output/producto10/FallecidosEtario')\n\n print('Generando producto 12')\n exec(open('bulk_producto7.py').read())\n\n print('Generando producto 17')\n copyfile('../input/ReporteDiario/PCREstablecimiento.csv', '../output/producto17/PCREstablecimiento.csv')\n\n print('Generando producto 20')\n prod9_10_20_23_24('../input/ReporteDiario/NumeroVentiladores.csv', '../output/producto20/NumeroVentiladores')\n\n print('Generando producto 23')\n prod9_10_20_23_24('../input/ReporteDiario/PacientesCriticos.csv', '../output/producto23/PacientesCriticos')\n\n print('Generando producto 24')\n prod9_10_20_23_24('../input/ReporteDiario/CamasHospital_Diario.csv', '../output/producto24/CamasHospital_Diario')\n"
]
| [
[
"pandas.read_csv"
]
]
|
PANDASANG1231/sagemaker_bestofU | [
"94e8a2fd904406866222393e74cabeea807802bd"
]
| [
"models/pytorch/train.py"
]
| [
"import os\nimport sys\nimport numpy as np\nimport pandas as pd\n\nimport torch \nimport torchvision\n\n\ndef init_weights(m):\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.xavier_uniform(m.weight)\n \ndef accuracy(y_hat, y):\n return (y_hat.argmax(axis=1) == y).sum()\n\ndef evaluate_acc(test_dataload):\n \n accs = []\n lengs = []\n\n for batch_X, batch_y in test_dataload:\n \n batch_y_hat = model(batch_X)\n acc = (batch_y_hat.argmax(axis=1) == batch_y).sum()\n leng = len(batch_y_hat)\n \n accs.append(acc)\n lengs.append(leng)\n \n return sum(accs)/sum(lengs)\n\ndef train_epoch(train_dataload):\n\n for batch_X, batch_y in train_dataload:\n \n batch_y_hat = model(batch_X)\n batch_loss = loss(batch_y_hat, batch_y)\n \n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n\ndef train(epoch_num):\n for i in range(epoch_num):\n train_epoch(train_dataload)\n accuracy = evaluate_acc(test_dataload)\n print(\"Epoch {}, Accuracy: {:.2f}%\".format(i, accuracy*100))\n \n \n\n\nif __name__ == \"__main__\":\n \n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument('--learning_rate', type=float, default=0.1)\n parser.add_argument('--model_dir', type=str, default='./pytorch_model.pkl')\n args = parser.parse_args()\n print(args.batch_size)\n\n\n train_data = torchvision.datasets.MNIST(root=\"./../0_sample_data/\", train=True, transform=torchvision.transforms.ToTensor(), download=False)\n test_data = torchvision.datasets.MNIST(root=\"./../0_sample_data/\", train=False, transform=torchvision.transforms.ToTensor(), download=False)\n train_dataload = torch.utils.data.DataLoader(train_data, shuffle=True, batch_size=args.batch_size)\n test_dataload = torch.utils.data.DataLoader(train_data, shuffle=False, batch_size=args.batch_size)\n\n model = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(784, 256),\n torch.nn.ReLU(),\n torch.nn.Linear(256, 10),\n )\n model.apply(init_weights)\n\n loss = torch.nn.CrossEntropyLoss()\n\n optimizer = torch.optim.Adagrad(params=model.parameters(), lr=args.learning_rate)\n\n train(epoch_num=args.epochs)\n \n torch.save(model.state_dict(), args.model_dir)\n \n sys.exit(0)\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.init.xavier_uniform",
"torch.nn.ReLU",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten"
]
]
|
lmatz/mars | [
"45f9166b54eb91b21e66cef8b590a41aa8ac9569"
]
| [
"mars/tensor/expressions/arithmetic/radians.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom .... import operands\nfrom ..utils import infer_dtype\nfrom .core import TensorUnaryOp\n\n\nclass TensorRadians(operands.Radians, TensorUnaryOp):\n def __init__(self, casting='same_kind', err=None, dtype=None, sparse=False, **kw):\n err = err if err is not None else np.geterr()\n super(TensorRadians, self).__init__(_casting=casting, _err=err,\n _dtype=dtype, _sparse=sparse, **kw)\n\n @classmethod\n def _is_sparse(cls, x):\n return x.issparse()\n\n\n@infer_dtype(np.radians)\ndef radians(x, out=None, where=None, **kwargs):\n \"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : array_like\n Input tensor in degrees.\n out : Tensor, None, or tuple of Tensor and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated tensor is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n where : array_like, optional\n Values of True indicate to calculate the ufunc at that position, values\n of False indicate to leave the value in the output alone.\n **kwargs\n\n Returns\n -------\n y : Tensor\n The corresponding radian values.\n\n See Also\n --------\n deg2rad : equivalent function\n\n Examples\n --------\n Convert a degree array to radians\n\n >>> import mars.tensor as mt\n\n >>> deg = mt.arange(12.) * 30.\n >>> mt.radians(deg).execute()\n array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,\n 2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,\n 5.23598776, 5.75958653])\n\n >>> out = mt.zeros((deg.shape))\n >>> ret = mt.radians(deg, out)\n >>> ret is out\n True\n \"\"\"\n op = TensorRadians(**kwargs)\n return op(x, out=out, where=where)\n"
]
| [
[
"numpy.geterr"
]
]
|
jchen42703/detection-models | [
"1d672ce27621857faf230a195fb8fba6655802f5"
]
| [
"detectionmodels/yolov3/utils.py"
]
| [
"from tensorflow.keras.layers import Add, ZeroPadding2D, UpSampling2D, \\\n Concatenate, MaxPooling2D\nfrom tensorflow.keras.models import Model\n\nfrom detectionmodels.layers.yolo import compose, DarknetConv2D, \\\n DarknetConv2D_BN_Leaky, Spp_Conv2D_BN_Leaky\n\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1, 0), (1, 0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)\n x = Add()([x, y])\n return x\n\n\ndef darknet53_body(x):\n '''Darknet53 body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3, 3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\n\ndef make_last_layers(\n x, num_filters, out_filters, predict_filters=None, predict_id='1'):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)\n\n if predict_filters is None:\n predict_filters = num_filters*2\n y = compose(DarknetConv2D_BN_Leaky(predict_filters, (3, 3)), DarknetConv2D(\n out_filters, (1, 1), name='predict_conv_' + predict_id))(x)\n return x, y\n\n\ndef make_spp_last_layers(\n x, num_filters, out_filters, predict_filters=None, predict_id='1'):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)\n\n x = Spp_Conv2D_BN_Leaky(x, num_filters)\n\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)\n\n if predict_filters is None:\n predict_filters = num_filters*2\n y = compose(DarknetConv2D_BN_Leaky(predict_filters, (3, 3)), DarknetConv2D(\n out_filters, (1, 1), name='predict_conv_' + predict_id))(x)\n return x, y\n\n\ndef yolo3_predictions(\n feature_maps, feature_channel_nums, num_anchors, num_classes,\n use_spp=False):\n f1, f2, f3 = feature_maps\n f1_channel_num, f2_channel_num, f3_channel_num = feature_channel_nums\n\n # feature map 1 head & output (13x13 for 416 input)\n if use_spp:\n x, y1 = make_spp_last_layers(\n f1, f1_channel_num // 2, num_anchors * (num_classes + 5),\n predict_id='1')\n else:\n x, y1 = make_last_layers(\n f1, f1_channel_num // 2, num_anchors * (num_classes + 5),\n predict_id='1')\n\n # upsample fpn merge for feature map 1 & 2\n x = compose(\n DarknetConv2D_BN_Leaky(f2_channel_num//2, (1, 1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x, f2])\n\n # feature map 2 head & output (26x26 for 416 input)\n x, y2 = make_last_layers(\n x, f2_channel_num // 2, num_anchors * (num_classes + 5),\n predict_id='2')\n\n # upsample fpn merge for feature map 2 & 3\n x = compose(\n DarknetConv2D_BN_Leaky(f3_channel_num//2, (1, 1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x, f3])\n\n # feature map 3 head & output (52x52 for 416 input)\n x, y3 = make_last_layers(\n x, f3_channel_num // 2, num_anchors * (num_classes + 5),\n predict_id='3')\n\n return y1, y2, y3\n\n\ndef yolo3_body(inputs, num_anchors, num_classes, weights_path=None):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet53_body(inputs))\n if weights_path is not None:\n darknet.load_weights(weights_path, by_name=True)\n print('Load weights {}.'.format(weights_path))\n\n # f1: 13 x 13 x 1024\n f1 = darknet.output\n # f2: 26 x 26 x 512\n f2 = darknet.layers[152].output\n # f3: 52 x 52 x 256\n f3 = darknet.layers[92].output\n\n f1_channel_num = 1024\n f2_channel_num = 512\n f3_channel_num = 256\n\n y1, y2, y3 = yolo3_predictions(\n (f1, f2, f3),\n (f1_channel_num, f2_channel_num, f3_channel_num),\n num_anchors, num_classes)\n\n return Model(inputs, [y1, y2, y3])\n\n\ndef tiny_yolo3_body(inputs, num_anchors, num_classes):\n '''Create Tiny YOLO_v3 model CNN body in keras.'''\n # feature map 2 (26x26x256 for 416 input)\n f2 = compose(\n DarknetConv2D_BN_Leaky(16, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),\n DarknetConv2D_BN_Leaky(32, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),\n DarknetConv2D_BN_Leaky(64, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),\n DarknetConv2D_BN_Leaky(128, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),\n DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs)\n\n # feature map 1 (13x13x1024 for 416 input)\n f1 = compose(\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),\n DarknetConv2D_BN_Leaky(512, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),\n DarknetConv2D_BN_Leaky(1024, (3, 3)))(f2)\n\n # feature map 1 transform\n x1 = DarknetConv2D_BN_Leaky(256, (1, 1))(f1)\n\n # feature map 1 output (13x13 for 416 input)\n y1 = compose(DarknetConv2D_BN_Leaky(512, (3, 3)), DarknetConv2D(\n num_anchors*(num_classes+5), (1, 1), name='predict_conv_1'))(x1)\n\n # upsample fpn merge for feature map 1 & 2\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1, 1)),\n UpSampling2D(2))(x1)\n\n # feature map 2 output (26x26 for 416 input)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3, 3)),\n DarknetConv2D(\n num_anchors * (num_classes + 5),\n (1, 1),\n name='predict_conv_2'))(\n [x2, f2])\n\n return Model(inputs, [y1, y2])\n\n\ndef custom_tiny_yolo3_body(inputs, num_anchors, num_classes, weights_path):\n '''Create a custom Tiny YOLO_v3 model, use\n pre-trained weights from darknet and fit\n for our target classes.'''\n # TODO: get darknet class number from class file\n num_classes_coco = 80\n base_model = tiny_yolo3_body(inputs, num_anchors, num_classes_coco)\n base_model.load_weights(weights_path, by_name=False)\n print('Load weights {}.'.format(weights_path))\n\n # get conv output in original network\n y1 = base_model.layers[40].output\n y2 = base_model.layers[41].output\n y1 = DarknetConv2D(\n num_anchors * (num_classes + 5),\n (1, 1),\n name='predict_conv_1')(y1)\n y2 = DarknetConv2D(\n num_anchors * (num_classes + 5),\n (1, 1),\n name='predict_conv_2')(y2)\n return Model(inputs, [y1, y2])\n"
]
| [
[
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.layers.Concatenate"
]
]
|
huxia001/autokeras | [
"f4503bb3a3be014b452f54d8e2d187bb6419f627"
]
| [
"autokeras/utils.py"
]
| [
"import os\nimport pickle\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom autokeras import constant\n\n\ndef lr_schedule(epoch):\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n return lr\n\n\nclass NoImprovementError(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass EarlyStop:\n def __init__(self, max_no_improvement_num=constant.MAX_NO_IMPROVEMENT_NUM, min_loss_dec=constant.MIN_LOSS_DEC):\n super().__init__()\n self.training_losses = []\n self.minimum_loss = None\n self._no_improvement_count = 0\n self._max_no_improvement_num = max_no_improvement_num\n self._done = False\n self._min_loss_dec = min_loss_dec\n self.max_accuracy = 0\n\n def on_train_begin(self, logs=None):\n self.training_losses = []\n self._no_improvement_count = 0\n self._done = False\n self.minimum_loss = float('inf')\n\n def on_epoch_end(self, loss):\n self.training_losses.append(loss)\n if self._done and loss > (self.minimum_loss - self._min_loss_dec):\n return False\n\n if loss > (self.minimum_loss - self._min_loss_dec):\n self._no_improvement_count += 1\n else:\n self._no_improvement_count = 0\n self.minimum_loss = loss\n\n if self._no_improvement_count > self._max_no_improvement_num:\n self._done = True\n\n return True\n\n\nclass ModelTrainer:\n \"\"\"A class that is used to train model\n\n This class can train a model with dataset and will not stop until getting minimum loss\n\n Attributes:\n model: the model that will be trained\n x_train: the input train data\n y_train: the input train data labels\n x_test: the input test data\n y_test: the input test data labels\n verbose: verbosity mode\n \"\"\"\n\n def __init__(self, model, train_data, test_data, verbose):\n \"\"\"Init ModelTrainer with model, x_train, y_train, x_test, y_test, verbose\"\"\"\n self.model = model\n self.verbose = verbose\n self.train_data = train_data\n self.test_data = test_data\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.criterion = torch.nn.CrossEntropyLoss()\n self.optimizer = torch.optim.SGD(model.parameters(), lr=lr_schedule(0), momentum=0.9, weight_decay=5e-4)\n self.early_stop = None\n\n def train_model(self,\n max_iter_num=constant.MAX_ITER_NUM,\n max_no_improvement_num=constant.MAX_NO_IMPROVEMENT_NUM,\n batch_size=constant.MAX_BATCH_SIZE):\n \"\"\"Train the model.\n\n Args:\n max_iter_num: An integer. The maximum number of epochs to train the model.\n The training will stop when this number is reached.\n max_no_improvement_num: An integer. The maximum number of epochs when the loss value doesn't decrease.\n The training will stop when this number is reached.\n batch_size: An integer. The batch size during the training.\n optimizer: An optimizer class.\n \"\"\"\n batch_size = min(len(self.train_data), batch_size)\n\n train_loader = DataLoader(self.train_data, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(self.test_data, batch_size=batch_size, shuffle=True)\n\n self.early_stop = EarlyStop(max_no_improvement_num)\n self.early_stop.on_train_begin()\n\n for epoch in range(max_iter_num):\n self._train(train_loader)\n test_loss = self._test(test_loader)\n terminate = self.early_stop.on_epoch_end(test_loss)\n if terminate:\n break\n\n def _train(self, loader):\n self.model.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(loader):\n targets = targets.argmax(1)\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n self.optimizer.zero_grad()\n outputs = self.model(inputs)\n loss = self.criterion(outputs, targets)\n loss.backward()\n self.optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n def _test(self, test_loader):\n self.model.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(test_loader):\n targets = targets.argmax(1)\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n outputs = self.model(inputs)\n loss = self.criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n return test_loss\n\n\ndef ensure_dir(directory):\n \"\"\"Create directory if it does not exist\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef ensure_file_dir(path):\n \"\"\"Create path if it does not exist\"\"\"\n ensure_dir(os.path.dirname(path))\n\n\ndef has_file(path):\n return os.path.exists(path)\n\n\ndef pickle_from_file(path):\n return pickle.load(open(path, 'rb'))\n\n\ndef pickle_to_file(obj, path):\n pickle.dump(obj, open(path, 'wb'))\n"
]
| [
[
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader"
]
]
|
CallumRai/Hansard | [
"4d2a5b30d44e3d5af88ae6c4e01f6f229fcf12b8"
]
| [
"hansard/scripts/save_model.py"
]
| [
"import torch\nimport os\nfrom transformers import GPT2Tokenizer, TFGPT2LMHeadModel\n\nif __name__ == \"__main__\":\n dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n # Save tokenizer\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2', bos_token='<|startoftext|>',\n eos_token='<|endoftext|>',\n pad_token='<|pad|>')\n tokenizer.save_pretrained(dir_path + \"\\\\data\\\\model\\\\\")\n\n # Save PyTorch model\n model = torch.load(dir_path + \"\\\\data\\\\model\\\\pretrained.pth\")\n model.save_pretrained(dir_path + \"\\\\data\\\\model\\\\\")\n\n"
]
| [
[
"torch.load"
]
]
|
pythoro/npsolve | [
"c6993dd7a10a60a09655f0252c3ad36fe7dc3af1"
]
| [
"test/test_partial.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 5 20:43:48 2019\n\n@author: Reuben\n\"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom npsolve.core import sb, EMIT_VECTORS, EMIT_STATE, GET_VARS, \\\n GET_STEP_METHODS, GET_PARTIALS, SET_CACHING, GET_CACHE_CLEARS, Partial\nfrom npsolve.cache import multi_cached, mono_cached\n\ndef make_signals():\n sb.get_active().clear()\n s_names = [EMIT_VECTORS, EMIT_STATE, GET_VARS, GET_STEP_METHODS,\n GET_PARTIALS, SET_CACHING, GET_CACHE_CLEARS]\n signals = {name: sb.get(name) for name in s_names}\n return signals\n\nclass P(Partial):\n def __init__(self):\n super().__init__()\n self.add_var('a', init=0.7)\n self.add_var('b', init=5.0)\n \n def step(self, state_dct, *args):\n a = state_dct['a']\n return {'a': a*2}\n \n\nclass Cached(P):\n @mono_cached()\n def mono(self, a):\n return np.array([a])\n\n @mono_cached()\n def mono_b(self, a):\n return np.array([a])\n\n @multi_cached() \n def multi(self, a):\n return np.array([a])\n\n @multi_cached() \n def multi_b(self, a):\n return np.array([a])\n\n\ndef make_partial(cls=P):\n signals = make_signals()\n p = cls()\n p.connect()\n state = np.array([1.3, 5.6])\n ret = np.zeros(2)\n a_arr = state[0:1]\n a_arr.flags['WRITEABLE'] = False\n b_arr = state[1:2]\n b_arr.flags['WRITEABLE'] = False\n state_dct = {'a': a_arr, 'b': b_arr}\n ret_dct = {'a': ret[0:1], 'b': ret[1:2]}\n signals[EMIT_VECTORS].emit(state_dct=state_dct, ret_dct=ret_dct)\n p.a = state_dct['a']\n p.b = state_dct['b']\n return p, state, ret, state_dct, ret_dct\n\n\nclass Test_Partial(unittest.TestCase):\n\n def test_connect_before_solver(self):\n def test_fun():\n sb.get_active().clear()\n p = Partial() \n p.connect()\n self.assertRaises(KeyError, test_fun)\n \n def test_create(self):\n signals = make_signals()\n p = P()\n\n def test_set_init(self):\n signals = make_signals()\n p = P()\n p.set_init('a', 0.8)\n p.set_init('b', 55.1)\n dct = {'a': {'init': np.array([0.8])},\n 'b': {'init': np.array([55.1])}}\n self.assertEqual(p.npsolve_vars, dct)\n \n def test_get_init(self):\n signals = make_signals()\n p = P()\n p.connect()\n dicts = signals[GET_VARS].fetch_all()\n dct = {'a': {'init': np.array([0.7])},\n 'b': {'init': np.array([5.0])}}\n self.assertEqual(dicts[0], dct)\n \n def test_set_vectors(self):\n p, state, ret, state_dct, ret_dct = make_partial()\n self.assertEqual(p.a, np.array([1.3]))\n self.assertEqual(p.a.flags['OWNDATA'], False)\n self.assertEqual(p.b, np.array([5.6]))\n self.assertEqual(p.b.flags['OWNDATA'], False)\n\n def test_update_state(self):\n p, state, ret, state_dct, ret_dct = make_partial()\n state[:] = [33.0, 66.0]\n self.assertEqual(p.a, 33.0)\n self.assertEqual(p.b, 66.0)\n\n def test_get_state(self):\n p, state, ret, state_dct, ret_dct = make_partial()\n a = state_dct['a']\n b = state_dct['b']\n self.assertEqual(a, 1.3)\n self.assertEqual(b, 5.6)\n\n def test_fetch_step(self):\n p, state, ret, state_dct, ret_dct = make_partial()\n lst = sb.get(GET_STEP_METHODS).fetch_all()\n self.assertEqual(lst[0], p.step)\n\n def test_fetch_partials(self):\n p, state, ret, state_dct, ret_dct = make_partial()\n lst = sb.get(GET_PARTIALS).fetch_all()\n self.assertEqual(lst[0], p)\n \n def test_step(self):\n p, state, ret, state_dct, ret_dct = make_partial()\n a = state_dct['a']\n a_ret = p.step(state_dct)\n self.assertEqual(a_ret, {'a': a*2})\n \n \n \nclass Test_Partial_Mono_Caching(unittest.TestCase):\n\n def test_mono_cache_init(self):\n signals = make_signals()\n p = Cached()\n \n def test_mono_cache_after_call(self):\n signals = make_signals()\n p = Cached()\n p.mono.cache_enable()\n ret_1 = p.mono(65.1)\n self.assertEqual(ret_1, 65.1)\n\n def test_mono_cache_not_enabled(self):\n signals = make_signals()\n p = Cached()\n ret_1 = p.mono(65.1)\n ret_2 = p.mono(31.2)\n self.assertEqual(ret_1, 65.1)\n self.assertEqual(ret_2, 31.2)\n \n def test_mono_cache_disabled(self):\n signals = make_signals()\n p = Cached()\n p.mono.cache_enable()\n p.mono.cache_disable()\n ret_1 = p.mono(65.1)\n ret_2 = p.mono(31.2)\n self.assertEqual(ret_1, 65.1)\n self.assertEqual(ret_2, 31.2)\n \n def test_mono_cache_after_second_call(self):\n signals = make_signals()\n p = Cached()\n p.mono.cache_enable()\n ret_1 = p.mono(65.1)\n ret_2 = p.mono(31.2)\n self.assertEqual(ret_1, 65.1)\n self.assertEqual(ret_2, 65.1)\n \n def test_mono_cache_clear(self):\n signals = make_signals()\n p = Cached()\n p.mono.cache_enable()\n ret_1 = p.mono(65.1)\n p.mono.cache_clear()\n ret_2 = p.mono(31.2)\n self.assertEqual(ret_2, np.array(31.2))\n\n def test_mono_cache_separate_caches(self):\n signals = make_signals()\n p = Cached()\n p.mono.cache_enable()\n p.mono.cache_clear()\n ret_1 = p.mono(65.1)\n ret_2 = p.mono(15)\n self.assertEqual(ret_2, 65.1)\n ret_3 = p.mono_b(100)\n self.assertEqual(ret_3, 100)\n \n def test_get_cache_clear_functions(self):\n signals = make_signals()\n p = Cached()\n lst = p._get_cache_clear_functions()\n self.assertEqual(len(lst), 4)\n for f in lst:\n self.assertEqual(callable(f), True)\n \n def test_get_cached_methods(self):\n signals = make_signals()\n p = Cached()\n lst = p._get_cached_methods()\n self.assertEqual(len(lst), 4)\n for f in lst:\n self.assertEqual(callable(f), True)\n\n def test_set_caching(self):\n signals = make_signals()\n p = Cached()\n signals[SET_CACHING].emit(enable=True)\n ret = p.mono(5)\n ret2 = p.mono(5)\n self.assertEqual(ret, ret2)\n \n\nclass Test_Partial_Multi_Caching(unittest.TestCase):\n\n def test_multi_cache_init(self):\n signals = make_signals()\n p = Cached()\n \n def test_multi_cache_after_call(self):\n signals = make_signals()\n p = Cached()\n p.multi.cache_enable()\n ret_1 = p.multi(65.1)\n self.assertEqual(ret_1, 65.1)\n\n def test_multi_cache_not_enabled(self):\n signals = make_signals()\n p = Cached()\n ret_1 = p.multi(65.1)\n ret_2 = p.multi(31.2)\n self.assertEqual(ret_1, 65.1)\n self.assertEqual(ret_2, 31.2)\n \n def test_multi_cache_disabled(self):\n signals = make_signals()\n p = Cached()\n p.multi.cache_enable()\n p.multi.cache_disable()\n ret_1 = p.multi(65.1)\n ret_2 = p.multi(31.2)\n self.assertEqual(ret_1, 65.1)\n self.assertEqual(ret_2, 31.2)\n \n def test_multi_cache_after_second_call(self):\n signals = make_signals()\n p = Cached()\n p.multi.cache_enable()\n ret_1 = p.multi(65.1)\n ret_2 = p.multi(31.2)\n self.assertEqual(ret_1, 65.1)\n self.assertEqual(ret_2, 31.2)\n \n def test_multi_cache_clear(self):\n signals = make_signals()\n p = Cached()\n p.multi.cache_enable()\n p.multi.cache_clear()\n ret_1 = p.multi(65.1)\n self.assertEqual(len(p.multi.__closure__[0].cell_contents), 1)\n p.multi.cache_clear()\n self.assertEqual(len(p.multi.__closure__[0].cell_contents), 0)\n\n def test_multi_cache_separate_caches(self):\n signals = make_signals()\n p = Cached()\n p.multi.cache_enable()\n p.multi.cache_clear()\n p.multi_b.cache_enable()\n p.multi_b.cache_clear()\n ret_1 = p.multi(65.1)\n self.assertEqual(len(p.multi.__closure__[0].cell_contents), 1)\n self.assertEqual(len(p.multi_b.__closure__[0].cell_contents), 0)\n ret_2 = p.multi_b(5.5)\n self.assertEqual(len(p.multi.__closure__[0].cell_contents), 1)\n self.assertEqual(len(p.multi_b.__closure__[0].cell_contents), 1)\n \n def test_cache_clear(self):\n signals = make_signals()\n p = Cached()\n p.multi.cache_enable()\n p.multi.cache_clear()\n ret_1 = p.multi(65.1)\n self.assertEqual(len(p.multi.__closure__[0].cell_contents), 1)\n p.cache_clear()\n self.assertEqual(len(p.multi.__closure__[0].cell_contents), 0)"
]
| [
[
"numpy.array",
"numpy.zeros"
]
]
|
pymoc/pymoc_uf | [
"7f162a20ed1aa65894adb73c4bb444470914c879"
]
| [
"tests/modules/test_SO_ML.py"
]
| [
"import pytest\nimport sys\nimport funcsigs\nimport numpy as np\nfrom scipy import integrate\nsys.path.append('/pymoc/src/pymoc/modules')\nfrom SO_ML import SO_ML\nfrom pymoc.utils import make_array\n\n\[email protected](\n scope=\"module\",\n params=[{\n 'y': np.asarray(np.linspace(0, 2.0e6, 51)),\n 'Ks': 100,\n 'h': 50,\n 'L': 4e6,\n 'surflux': 5.9e3,\n 'rest_mask': 0.0,\n 'b_rest': 0.0,\n 'v_pist': 2.0 / 86400.0,\n 'bs': 0.02\n }, {\n 'y': np.asarray(np.linspace(0, 2.0e6, 51)),\n }, {\n 'y': None,\n }, {\n 'y': 1e6\n }]\n)\ndef so_ml_config(request):\n return request.param\n\n\[email protected](scope=\"module\")\ndef so_ml(request):\n return SO_ML(y=np.asarray(np.linspace(0, 2.0e6, 51)))\n\n\nclass TestSO_ML(object):\n def test_so_ml_init(self, so_ml_config):\n if not isinstance(so_ml_config['y'],\n np.ndarray) or not len(so_ml_config['y']):\n with pytest.raises(TypeError) as yinfo:\n SO_ML(**so_ml_config)\n assert (\n str(yinfo.value\n ) == \"y needs to be numpy array providing (regular) grid\"\n )\n return\n\n so_ml = SO_ML(**so_ml_config)\n for k in [\n 'y', 'Ks', 'h', 'L', 'surflux', 'rest_mask', 'b_rest', 'v_pist',\n 'Psi_s', 'bs'\n ]:\n assert hasattr(so_ml, k)\n\n so_ml_signature = funcsigs.signature(SO_ML)\n\n for k in ['Ks', 'h', 'L', 'v_pist', 'Psi_s']:\n assert getattr(so_ml, k) == (\n so_ml_config[k] if k in so_ml_config and so_ml_config[k] else\n so_ml_signature.parameters[k].default\n )\n\n for k in ['surflux', 'rest_mask', 'b_rest', 'bs']:\n assert all(\n getattr(so_ml, k) == make_array((\n so_ml_config[k] if k in so_ml_config and so_ml_config[k] else\n so_ml_signature.parameters[k].default\n ), so_ml.y, k)\n )\n\n # def test_make_array(self, so_ml):\n # myst = np.arange(0.0, 8.0, 0.1)\n # assert all(so_ml.make_array(myst, 'myst') == myst)\n # myst = lambda n: 42 + n\n # assert all(so_ml.make_array(myst, 'myst') == myst(so_ml.y))\n # myst = 5.0\n # assert all(so_ml.make_array(myst, 'myst') == 5.0 * np.ones((len(so_ml.y))))\n # myst = 1\n # with pytest.raises(TypeError) as mystinfo:\n # so_ml.make_array(myst, 'myst')\n # assert(str(mystinfo.value) == \"('myst', 'needs to be either function, numpy array, or float')\")\n\n # def test_solve_equi(self, so_ml):\n # with pytest.raises(TypeError) as info:\n # so_ml.solve_equi()\n # assert (str(info.value) == \"This functionality is not yet implemented\")\n\n def test_timestep(self, so_ml_config):\n dt = 60 * 86400\n conf = {\n 'y': np.asarray(np.linspace(0, 2.0e6, 51)),\n 'Ks': 100,\n 'h': 50,\n 'L': 4e6,\n 'surflux': 5.9e3,\n 'rest_mask': 0.0,\n 'b_rest': 0.0,\n 'v_pist': 2.0 / 86400.0,\n 'bs': 0.02\n }\n b_basin = np.linspace(0.03, -0.002, 80)\n Psi_b = np.linspace(4.0e6, 0, 80)\n so_ml1 = SO_ML(**conf)\n so_ml2 = SO_ML(**conf)\n\n with pytest.raises(TypeError) as info:\n so_ml1.timestep(dt=dt, Psi_b=Psi_b)\n assert (\n str(info.value) ==\n 'b_basin needs to be numpy array providing buoyancy levels in basin'\n )\n\n with pytest.raises(TypeError) as info:\n so_ml1.timestep(dt=dt, b_basin=b_basin)\n assert (\n str(info.value) ==\n 'Psi_b needs to be numpy array providing overturning at buoyancy levels given by b_basin'\n )\n\n so_ml1.timestep(dt=dt, b_basin=b_basin, Psi_b=Psi_b)\n so_ml2.advdiff(b_basin=b_basin, Psi_b=Psi_b, dt=dt)\n assert (all(so_ml1.bs == so_ml2.bs))\n assert (all(so_ml1.Psi_s == so_ml2.Psi_s))\n\n def test_advdiff(self):\n dt = 60 * 86400\n # dt=60\n y = np.asarray(np.linspace(0, 2.0e6, 51))\n z = np.asarray(np.linspace(-4000, 0, 80))\n tau = 0.12\n Ks = 100\n L = 4e6\n h = 50\n surflux = 5.9e3\n dtheta_dy = 2.0 * np.pi / 2.0e6\n b_basin = np.asarray([0.02 * (n / 2.0e6)**2 for n in y])\n bs = np.asarray([b_basin[-1] * np.cos(n * dtheta_dy) for n in y])\n conf = {\n 'y': y,\n 'Ks': Ks,\n 'h': h,\n 'L': L,\n 'surflux': surflux,\n 'rest_mask': 0.0,\n 'b_rest': 0.0,\n 'v_pist': 2.0 / 86400.0,\n 'bs': bs\n }\n\n Psi_b = np.asarray(np.linspace(1e4, 2.0e4, 51))\n so_ml = SO_ML(**conf)\n\n # Explicity calculate the analytical solution fo the above setup\n dbs_dy = np.asarray([\n -dtheta_dy * b_basin[-1] * np.sin(n * dtheta_dy) for n in y\n ])\n d2bs_dy2 = np.asarray([\n -dtheta_dy**2 * b_basin[-1] * np.cos(n * dtheta_dy) for n in y\n ])\n db = -((Psi_b / (h*L)) * dbs_dy + Ks*d2bs_dy2 + surflux/h) * dt\n\n b = -(so_ml.bs.copy() + db)\n so_ml.advdiff(b_basin, Psi_b, dt)\n assert (\n all([np.abs(b[i] - so_ml.bs[i]) / b[i] < 0.05 for i in range(len(b))])\n )\n"
]
| [
[
"numpy.sin",
"numpy.asarray",
"numpy.abs",
"numpy.cos",
"numpy.linspace"
]
]
|
fixstars/tvm | [
"d0646e973b5c142e1aa361d9706e558ed4f8d4a3"
]
| [
"tests/python/unittest/test_micro_model_library_format.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport datetime\nimport json\nimport os\nimport sys\nimport tarfile\n\nimport numpy\nimport pytest\n\nimport tvm\nimport tvm.relay\nfrom tvm.relay.backend import executor_factory\nfrom tvm.relay.testing import byoc\nimport tvm.runtime.module\nimport tvm.testing\nfrom tvm.contrib import utils\n\n\[email protected]_micro\ndef test_export_operator_model_library_format():\n import tvm.micro as micro\n\n target = tvm.target.target.micro(\"host\")\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n A = tvm.te.placeholder((2,), dtype=\"int8\")\n B = tvm.te.placeholder((1,), dtype=\"int8\")\n C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name=\"C\")\n sched = tvm.te.create_schedule(C.op)\n mod = tvm.build(sched, [A, B, C], tvm.target.Target(target, target), name=\"add\")\n\n temp_dir = utils.tempdir()\n mlf_tar_path = temp_dir.relpath(\"lib.tar\")\n micro.export_model_library_format(mod, mlf_tar_path)\n\n tf = tarfile.open(mlf_tar_path)\n\n extract_dir = temp_dir.relpath(\"extract\")\n os.mkdir(extract_dir)\n tf.extractall(extract_dir)\n\n with open(os.path.join(extract_dir, \"metadata.json\")) as json_f:\n metadata = json.load(json_f)\n assert metadata[\"version\"] == 5\n assert metadata[\"model_name\"] == \"add\"\n export_datetime = datetime.datetime.strptime(\n metadata[\"export_datetime\"], \"%Y-%m-%d %H:%M:%SZ\"\n )\n assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)\n assert metadata[\"target\"] == {\"1\": str(target)}\n\n assert metadata[\"memory\"][\"add\"][0][\"dtype\"] == \"int8\"\n assert metadata[\"memory\"][\"add\"][0][\"shape\"] == [2]\n assert metadata[\"memory\"][\"add\"][0][\"size_bytes\"] == 2\n\n assert metadata[\"memory\"][\"add\"][1][\"dtype\"] == \"int8\"\n assert metadata[\"memory\"][\"add\"][1][\"shape\"] == [1]\n assert metadata[\"memory\"][\"add\"][1][\"size_bytes\"] == 1\n\n assert metadata[\"memory\"][\"add\"][2][\"dtype\"] == \"int8\"\n assert metadata[\"memory\"][\"add\"][2][\"shape\"] == [2]\n assert metadata[\"memory\"][\"add\"][2][\"size_bytes\"] == 2\n\n assert os.path.exists(os.path.join(extract_dir, \"codegen\", \"host\", \"src\", \"lib0.c\"))\n assert os.path.exists(os.path.join(extract_dir, \"codegen\", \"host\", \"src\", \"lib1.c\"))\n\n assert (\n len(mod.ir_module_by_target) == 1\n ), f\"expect 1 ir_model_by_target: {ir_module_by_target!r}\"\n for target, ir_mod in mod.ir_module_by_target.items():\n assert int(tvm.runtime.ndarray.device(str(target)).device_type) == 1\n with open(os.path.join(extract_dir, \"src\", \"tir-1.txt\")) as tir_f:\n assert tir_f.read() == str(ir_mod)\n\n\ndef validate_graph_json(extract_dir, factory):\n with open(os.path.join(extract_dir, \"executor-config\", \"graph\", \"graph.json\")) as graph_f:\n graph_json = graph_f.read()\n assert graph_json == factory.graph_json\n\n # Just check it parses and looks roughly right.\n graph = json.loads(graph_json)\n assert \"nodes\" in graph\n assert len(graph[\"nodes\"]) == 4\n assert \"attrs\" in graph\n\n\[email protected]_micro\[email protected](\n \"executor,target,should_generate_interface\",\n [\n (\"graph\", tvm.target.target.micro(\"host\"), False),\n (\"aot\", tvm.target.target.micro(\"host\", options=\"-executor=aot\"), False),\n (\n \"aot\",\n tvm.target.target.micro(\n \"host\", options=\"-executor=aot --unpacked-api=1 --interface-api=c\"\n ),\n True,\n ),\n ],\n)\ndef test_export_model_library_format_c(executor, target, should_generate_interface):\n with utils.TempDirectory.set_keep_for_debug(True):\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n relay_mod = tvm.parser.fromtext(\n \"\"\"\n #[version = \"0.0.5\"]\n def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {\n %0 = cast(%a, dtype=\"float32\") + %b * %c;\n %0\n }\"\"\"\n )\n factory = tvm.relay.build(\n relay_mod,\n target,\n mod_name=\"add\",\n params={\"c\": numpy.array([[2.0, 4.0]], dtype=\"float32\")},\n )\n\n temp_dir = utils.tempdir()\n mlf_tar_path = temp_dir.relpath(\"lib.tar\")\n import tvm.micro as micro\n\n micro.export_model_library_format(factory, mlf_tar_path)\n tf = tarfile.open(mlf_tar_path)\n\n extract_dir = temp_dir.relpath(\"extract\")\n os.mkdir(extract_dir)\n tf.extractall(extract_dir)\n\n with open(os.path.join(extract_dir, \"metadata.json\")) as json_f:\n metadata = json.load(json_f)\n assert metadata[\"version\"] == 5\n assert metadata[\"model_name\"] == \"add\"\n export_datetime = datetime.datetime.strptime(\n metadata[\"export_datetime\"], \"%Y-%m-%d %H:%M:%SZ\"\n )\n assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)\n assert metadata[\"target\"] == {\"1\": str(target)}\n if executor == \"graph\":\n assert metadata[\"memory\"][\"sids\"] == [\n {\"storage_id\": 0, \"size_bytes\": 2, \"input_binding\": \"a\"},\n {\"storage_id\": 1, \"size_bytes\": 8, \"input_binding\": \"b\"},\n {\"storage_id\": 2, \"size_bytes\": 8, \"input_binding\": \"p0\"},\n {\"storage_id\": 3, \"size_bytes\": 8},\n ]\n assert metadata[\"memory\"][\"functions\"][\"main\"] == [\n {\n \"constants_size_bytes\": 8,\n \"device\": 1,\n \"io_size_bytes\": 18,\n \"workspace_size_bytes\": 0,\n }\n ]\n assert metadata[\"memory\"][\"functions\"][\"operator_functions\"][0][\"workspace\"] == [\n {\"device\": 1, \"workspace_size_bytes\": 0}\n ]\n assert (\n \"fused_cast_multiply_add\"\n in metadata[\"memory\"][\"functions\"][\"operator_functions\"][0][\"function_name\"]\n )\n\n assert os.path.exists(os.path.join(extract_dir, \"codegen\", \"host\", \"src\", \"add_lib0.c\"))\n assert os.path.exists(os.path.join(extract_dir, \"codegen\", \"host\", \"src\", \"add_lib1.c\"))\n assert should_generate_interface == os.path.exists(\n os.path.join(extract_dir, \"codegen\", \"host\", \"include\", \"tvmgen_add.h\")\n )\n\n if executor == \"graph\":\n validate_graph_json(extract_dir, factory)\n\n with open(os.path.join(extract_dir, \"src\", \"relay.txt\")) as relay_f:\n assert relay_f.read() == str(relay_mod)\n\n with open(os.path.join(extract_dir, \"parameters\", \"add.params\"), \"rb\") as params_f:\n params = tvm.relay.load_param_dict(params_f.read())\n assert \"p0\" in params\n\n\[email protected]_micro\ndef test_export_model_library_format_llvm():\n with utils.TempDirectory.set_keep_for_debug(True):\n target = tvm.target.target.micro(\"host\")\n assert str(target)[:2] == \"c \"\n target = tvm.target.Target(\"llvm \" + str(target)[2:])\n with tvm.transform.PassContext(opt_level=3):\n relay_mod = tvm.parser.fromtext(\n \"\"\"\n #[version = \"0.0.5\"]\n def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {\n %0 = cast(%a, dtype=\"float32\") + %b * %c;\n %0\n }\"\"\"\n )\n factory = tvm.relay.build(\n relay_mod,\n target,\n mod_name=\"add\",\n params={\"c\": numpy.array([[2.0, 4.0]], dtype=\"float32\")},\n )\n\n temp_dir = utils.tempdir()\n mlf_tar_path = temp_dir.relpath(\"lib.tar\")\n import tvm.micro as micro\n\n micro.export_model_library_format(factory, mlf_tar_path)\n tf = tarfile.open(mlf_tar_path)\n\n extract_dir = temp_dir.relpath(\"extract\")\n os.mkdir(extract_dir)\n tf.extractall(extract_dir)\n\n with open(os.path.join(extract_dir, \"metadata.json\")) as json_f:\n metadata = json.load(json_f)\n assert metadata[\"version\"] == 5\n assert metadata[\"model_name\"] == \"add\"\n export_datetime = datetime.datetime.strptime(\n metadata[\"export_datetime\"], \"%Y-%m-%d %H:%M:%SZ\"\n )\n assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)\n assert metadata[\"target\"] == {\"1\": str(target)}\n assert metadata[\"memory\"][\"sids\"] == [\n {\"storage_id\": 0, \"size_bytes\": 2, \"input_binding\": \"a\"},\n {\"storage_id\": 1, \"size_bytes\": 8, \"input_binding\": \"b\"},\n {\"storage_id\": 2, \"size_bytes\": 8, \"input_binding\": \"p0\"},\n {\"storage_id\": 3, \"size_bytes\": 8},\n ]\n assert metadata[\"memory\"][\"functions\"][\"main\"] == [\n {\n \"constants_size_bytes\": 8,\n \"device\": 1,\n \"io_size_bytes\": 18,\n \"workspace_size_bytes\": 0,\n }\n ]\n assert metadata[\"memory\"][\"functions\"][\"operator_functions\"][0][\"workspace\"] == [\n {\"device\": 1, \"workspace_size_bytes\": 0}\n ]\n assert (\n \"fused_cast_multiply_add\"\n in metadata[\"memory\"][\"functions\"][\"operator_functions\"][0][\"function_name\"]\n )\n\n assert os.path.exists(os.path.join(extract_dir, \"codegen\", \"host\", \"lib\", \"add_lib0.o\"))\n\n validate_graph_json(extract_dir, factory)\n\n with open(os.path.join(extract_dir, \"src\", \"relay.txt\")) as relay_f:\n assert relay_f.read() == str(relay_mod)\n\n with open(os.path.join(extract_dir, \"parameters\", \"add.params\"), \"rb\") as params_f:\n params = tvm.relay.load_param_dict(params_f.read())\n assert \"p0\" in params\n\n\[email protected]_micro\[email protected](\n \"target\",\n [tvm.target.target.micro(\"host\"), tvm.target.target.micro(\"host\", options=\"-executor=aot\")],\n)\ndef test_export_model_library_format_workspace(target):\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n relay_mod = tvm.parser.fromtext(\n \"\"\"\n #[version = \"0.0.5\"]\n def @main(%p0: Tensor[(1, 56, 56, 128), int16], %p1: Tensor[(3, 3, 128, 1), int16], %p2: Tensor[(1, 1, 1, 128), int32]){\n %0 = nn.conv2d(%p0, %p1, padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3], data_layout=\"NHWC\", kernel_layout=\"HWOI\", out_dtype=\"int32\") /* ty=Tensor[(1, 56, 56, 128), int32] */;\n %1 = add(%0, %p2) /* ty=Tensor[(1, 56, 56, 128), int32] */;\n %2 = fixed_point_multiply(%1, multiplier=2080045879, shift=-4) /* ty=Tensor[(1, 56, 56, 128), int32] */;\n %3 = clip(%2, a_min=0f, a_max=255f) /* ty=Tensor[(1, 56, 56, 128), int32] */;\n cast(%3, dtype=\"uint8\") /* ty=Tensor[(1, 56, 56, 128), uint8] */\n }\n \"\"\"\n )\n factory = tvm.relay.build(relay_mod, target, mod_name=\"qnn_conv2d\")\n\n temp_dir = utils.tempdir()\n mlf_tar_path = temp_dir.relpath(\"lib.tar\")\n import tvm.micro as micro\n\n micro.export_model_library_format(factory, mlf_tar_path)\n tf = tarfile.open(mlf_tar_path)\n\n extract_dir = temp_dir.relpath(\"extract\")\n os.mkdir(extract_dir)\n tf.extractall(extract_dir)\n\n with open(os.path.join(extract_dir, \"metadata.json\")) as json_f:\n metadata = json.load(json_f)\n assert metadata[\"version\"] == 5\n assert metadata[\"model_name\"] == \"qnn_conv2d\"\n export_datetime = datetime.datetime.strptime(\n metadata[\"export_datetime\"], \"%Y-%m-%d %H:%M:%SZ\"\n )\n assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)\n assert metadata[\"target\"] == {\"1\": str(target)}\n assert metadata[\"memory\"][\"functions\"][\"main\"] == [\n {\n \"constants_size_bytes\": 0,\n \"device\": 1,\n \"io_size_bytes\": 1207040,\n \"workspace_size_bytes\": 2466816,\n }\n ]\n assert metadata[\"memory\"][\"functions\"][\"operator_functions\"][0][\"workspace\"] == [\n {\"device\": 1, \"workspace_size_bytes\": 2466816}\n ]\n assert (\n \"fused_nn_conv2d_add_fixed_point_multiply_clip_cast\"\n in metadata[\"memory\"][\"functions\"][\"operator_functions\"][0][\"function_name\"]\n )\n\n\[email protected]_micro\ndef test_export_non_dso_exportable():\n module = tvm.support.FrontendTestModule()\n\n temp_dir = utils.tempdir()\n import tvm.micro as micro\n import tvm.micro.model_library_format as model_library_format\n\n with pytest.raises(micro.UnsupportedInModelLibraryFormatError) as exc:\n model_library_format._populate_codegen_dir(module, temp_dir.relpath(\"codegen\"))\n\n assert str(exc.exception) == (\n \"Don't know how to export non-c or non-llvm modules; found: ffi_testing\"\n )\n\n\[email protected]_micro\ndef test_export_byoc_c_module():\n \"\"\"Test BYOC flow when it produces DSO-exportable modules.\n\n NOTE the general BYOC flow is not fully supported by Model Library Format right now.\n \"\"\"\n x = tvm.relay.var(\"x\", shape=(10, 10))\n w0 = tvm.relay.var(\"w0\", shape=(10, 10))\n w1 = tvm.relay.var(\"w1\", shape=(10, 10))\n w2 = tvm.relay.var(\"w2\", shape=(10, 10))\n w3 = tvm.relay.var(\"w3\", shape=(10, 10))\n w4 = tvm.relay.var(\"w4\", shape=(10, 10))\n w5 = tvm.relay.var(\"w5\", shape=(10, 10))\n w6 = tvm.relay.var(\"w6\", shape=(10, 10))\n w7 = tvm.relay.var(\"w7\", shape=(10, 10))\n\n # C compiler\n z0 = tvm.relay.add(x, w0)\n p0 = tvm.relay.subtract(z0, w1)\n q0 = tvm.relay.multiply(p0, w2)\n\n z1 = tvm.relay.add(x, w3)\n p1 = tvm.relay.subtract(z1, w4)\n q1 = tvm.relay.multiply(p1, w5)\n\n # Other parts on TVM\n z2 = tvm.relay.add(x, w6)\n q2 = tvm.relay.subtract(z2, w7)\n\n r = tvm.relay.concatenate((q0, q1, q2), axis=0)\n f = tvm.relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)\n mod = tvm.IRModule()\n ann = byoc.CcompilerAnnotator()\n mod[\"main\"] = ann.visit(f)\n mod = tvm.relay.transform.PartitionGraph(\"mod_name\")(mod)\n mod = tvm.relay.transform.InferType()(mod)\n\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n factory = tvm.relay.build(mod, tvm.target.target.micro(\"host\"))\n\n temp_dir = utils.tempdir()\n mlf_tar_path = temp_dir.relpath(\"lib.tar\")\n\n from tvm import micro\n\n micro.export_model_library_format(factory, mlf_tar_path)\n\n with tarfile.open(mlf_tar_path, \"r:*\") as tf:\n tar_members = [ti.name for ti in tf.getmembers()]\n print(\"tar members\", tar_members)\n assert \"./metadata.json\" in tar_members\n with tf.extractfile(\"./metadata.json\") as f:\n metadata = json.load(f)\n main_md = metadata[\"memory\"][\"functions\"][\"main\"]\n assert main_md == [\n {\n \"constants_size_bytes\": 0,\n \"device\": 1,\n \"io_size_bytes\": 4800,\n \"workspace_size_bytes\": 800,\n }\n ]\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n"
]
| [
[
"numpy.array"
]
]
|
rpp0/emma | [
"fab81e1c66b8a88d14e68b8878ddbb5ee6528de2"
]
| [
"tools/paper_tools.py"
]
| [
"#!/usr/bin/python\n\n\"\"\"\nScript used to generate several plots from the paper \"Improving CEMA using Correlation Optimization\".\n\"\"\"\n\nimport argparse\nimport os\nimport pickle\nfrom emma.ai import models\nimport subprocess\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom emma.attacks.leakagemodels import LeakageModelType\nfrom emma.ai.inputs import AIInputType\nfrom emma.processing.action import Action\n\n\ndef download_files(remote_file_paths, destination):\n dest_path = os.path.abspath(destination)\n print(\"Creating directory %s\" % dest_path)\n os.makedirs(dest_path, exist_ok=True)\n for source_path in remote_file_paths:\n print(\"Downloading %s...\" % source_path)\n command = [\"/usr/bin/scp\", source_path, dest_path]\n scp_process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=False)\n stdout, stderr = scp_process.communicate()\n scp_process.wait()\n\ndef get_hash(file_path, is_remote=False):\n if is_remote:\n host, _, path = file_path.rpartition(':')\n command = [\"/usr/bin/ssh\", host, \"/usr/bin/md5sum \" + path]\n else:\n command = [\"/usr/bin/md5sum\", file_path]\n\n stat_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n stdout, stderr = stat_process.communicate()\n stat_process.wait()\n hash = stdout[0:32]\n\n if b'No such file' in stderr:\n print(\"Skipping %s (no such file)\" % file_path)\n\n return hash\n\ndef normalize(values):\n return (values - np.min(values)) / np.ptp(values)\n\ndef is_remote(path):\n return ':' in path\n\nclass RankConfidencePlot():\n def __init__(self, x_label='number of traces', y1_label='mean rank', y2_label='mean confidence', includey2=True):\n self.fig, self.ax1 = plt.subplots()\n self.ax1.set_xlabel(x_label)\n self.ax1.set_ylabel(y1_label)\n self.ax1.set_ylim([0,256])\n if includey2:\n self.ax2 = self.ax1.twinx()\n self.ax2.set_ylabel(y2_label)\n #self.ax1.spines['top'].set_visible(False)\n #self.ax2.spines['top'].set_visible(False)\n self.handles = []\n\n def add_series(self, x, ranks_y, confidences_y, rank_color='tab:blue', confidence_color='tab:blue', rank_style='-', confidence_style=':', rank_label=\"mean rank\", confidence_label=\"mean confidence\"):\n rank_series, = self.ax1.plot(x, ranks_y, color=rank_color, linestyle=rank_style, label=rank_label)\n confidence_series, = self.ax2.plot(x, confidences_y, color=confidence_color, linestyle=confidence_style, label=confidence_label, alpha=0.5)\n self.handles.extend([rank_series, confidence_series])\n\n def add_rank_series(self, x, ranks_y, rank_color='tab:blue', rank_style='-', rank_label=\"mean rank\"):\n rank_series, = self.ax1.plot(x, ranks_y, color=rank_color, linestyle=rank_style, label=rank_label)\n self.handles.append(rank_series)\n\n def set_title(self, title):\n plt.title(title)\n\n def save(self, path):\n legend = plt.legend(handles=self.handles, loc=9, fontsize=8)\n plt.gca().add_artist(legend)\n self.fig.savefig(path, bbox_inches='tight')\n\ndef get_series_from_tfold_blob(tfold_blob):\n ranks = tfold_blob['ranks']\n confidences = tfold_blob['confidences']\n step = tfold_blob['rank_trace_step']\n num_validation_traces = tfold_blob['num_validation_traces']\n print(\"Number of validation traces: %d\" % num_validation_traces)\n\n x = range(0, num_validation_traces + step, step)\n ranks_y = np.array([256] + list(np.mean(ranks, axis=0)), dtype=np.float32)\n confidences_y = np.array([0] + list(np.mean(confidences, axis=0)), dtype=np.float32)\n\n return x[0:len(ranks_y)], ranks_y, confidences_y\n\n\ndef insert_attribute_if_absent(instance, attr_name, default):\n dummy = getattr(instance, attr_name, None)\n if dummy is None:\n setattr(instance, attr_name, default)\n\n\nclass FigureGenerator():\n def __init__(self, input_path, model_id, model_suffix=\"last\"):\n if not 'models' in input_path:\n raise Exception\n\n self.is_remote = is_remote(input_path)\n self.model_id = model_id\n self.model_suffix = model_suffix\n\n if self.is_remote:\n self.remote_path = input_path\n self.input_path = os.path.join(\"./models/\", self.remote_path.rpartition('models')[2][1:])\n else:\n self.input_path = input_path\n\n self.input_path = os.path.abspath(self.input_path)\n self.output_path = os.path.abspath(os.path.join(\"./paper_data\", self.input_path.rpartition('models')[2][1:]))\n\n def get_remote_model(self, input_path, remote_path, model_id, model_suffix):\n remote_model_path = os.path.join(remote_path, model_id + \"-\" + model_suffix + \".h5\")\n remote_model_history_path = os.path.join(remote_path, model_id + \"-history.p\")\n remote_model_ranks_path = os.path.join(remote_path, model_id + \"-t-ranks.p\")\n remote_model_testrank_path = os.path.join(remote_path, model_id + \"-bestrank-testrank.p\")\n local_model_path = os.path.join(input_path, \"%s-%s.h5\" % (model_id, model_suffix))\n local_model_history_path = os.path.join(input_path, \"%s-history.p\" % model_id)\n local_model_ranks_path = os.path.join(input_path, \"%s-t-ranks.p\" % model_id)\n local_model_testrank_path = os.path.join(input_path, \"%s-bestrank-testrank.p\" % model_id)\n\n # Check if model already exists\n if os.path.exists(input_path):\n # Is there a newer model?\n local_model_hash = get_hash(local_model_path, is_remote=False)\n remote_model_hash = get_hash(remote_model_path, is_remote=True)\n if local_model_hash != remote_model_hash:\n download_files([remote_model_path], input_path)\n\n # Is there a newer history?\n local_model_history_hash = get_hash(local_model_history_path, is_remote=False)\n remote_model_history_hash = get_hash(remote_model_history_path, is_remote=True)\n if local_model_history_hash != remote_model_history_hash:\n download_files([remote_model_history_path], input_path)\n\n # Is there a newer ranks file?\n local_model_ranks_hash = get_hash(local_model_ranks_path, is_remote=False)\n remote_model_ranks_hash = get_hash(remote_model_ranks_path, is_remote=True)\n if local_model_ranks_hash != remote_model_ranks_hash:\n download_files([remote_model_ranks_path], input_path)\n\n # Newer testrank file?\n local_model_testrank_hash = get_hash(local_model_testrank_path, is_remote=False)\n remote_model_testrank_hash = get_hash(remote_model_testrank_path, is_remote=True)\n if local_model_testrank_hash != remote_model_testrank_hash:\n download_files([remote_model_testrank_path], input_path)\n else:\n # Download model\n download_files([remote_model_path,remote_model_history_path,remote_model_ranks_path,remote_model_testrank_path], input_path)\n\n def generate_stats(self):\n tfold_blob = None\n\n if self.is_remote:\n self.get_remote_model(self.input_path, self.remote_path, self.model_id, self.model_suffix)\n\n # Make directory for resulting data and graphs\n os.makedirs(self.output_path, exist_ok=True)\n\n # History graphs\n try:\n history = pickle.load(open(os.path.join(self.input_path, self.model_id + \"-history.p\"), \"rb\"))\n self.generate_history_graphs(history)\n except FileNotFoundError:\n print(\"File not found; skipping history graphs\")\n\n # Rank graphs\n try:\n tfold_blob = pickle.load(open(os.path.join(self.input_path, self.model_id + \"-t-ranks.p\"), \"rb\"))\n self.generate_ranks_graphs(tfold_blob)\n except FileNotFoundError:\n print(\"File not found; skipping rank graphs\")\n\n # Testrank graphs\n try:\n tfold_blob = pickle.load(open(os.path.join(self.input_path, self.model_id + \"-bestrank-testrank.p\"), \"rb\"))\n self.generate_testrank_graphs(tfold_blob)\n except FileNotFoundError:\n print(\"File not found; skipping testrank graphs\")\n\n # Model graphs\n try:\n if tfold_blob is not None and \"conf\" in tfold_blob:\n # TODO hack because some old blobs don't have use_bias\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"use_bias\", True)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"batch_norm\", True)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"cnn\", False)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"metric_freq\", 10)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"regularizer\", None)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"reglambda\", 0.001)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"key_low\", 2)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"key_high\", 3)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"loss_type\", \"correlation\")\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"leakage_model\", LeakageModelType.HAMMING_WEIGHT_SBOX)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"input_type\", AIInputType.SIGNAL)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"n_hidden_nodes\", 256)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"n_hidden_layers\", 1)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"lr\", 0.0001)\n insert_attribute_if_absent(tfold_blob[\"conf\"], \"activation\", \"leakyrelu\")\n\n actions = []\n for action in tfold_blob[\"conf\"].actions:\n if isinstance(action, str):\n actions.append(Action(action))\n else:\n actions.append(action)\n tfold_blob[\"conf\"].actions = actions\n\n model = models.AI(model_type=self.model_id, conf=tfold_blob[\"conf\"])\n model.load()\n self.generate_model_graphs(model.model)\n else:\n print(\"No tfold blob containing conf. Skipping model graphs.\")\n except OSError:\n print(\"File not found; skipping model graphs\")\n\n def generate_history_graphs(self, history):\n for key, values in history.items():\n print(\"Generating %s graph\" % key)\n fig = plt.figure()\n plt.plot(np.arange(len(values)), values)\n fig.savefig(os.path.join(self.output_path, \"%s-%s-%s.pdf\" % (self.model_id, self.model_suffix, key)), bbox_inches='tight')\n\n def generate_ranks_graphs(self, tfold_blob):\n conf = tfold_blob['conf']\n t = tfold_blob['folds']\n\n x, ranks_y, confidences_y = get_series_from_tfold_blob(tfold_blob)\n\n plot = RankConfidencePlot()\n #plot.set_title(\"%d-fold cross-validation of dataset '%s'\" % (t, conf.dataset_id))\n plot.add_series(x, ranks_y, confidences_y, rank_label=\"rank\", confidence_label=\"confidence\")\n plot.save(os.path.join(self.output_path, \"%s-%s-tfold.pdf\" % (self.model_id, self.model_suffix)))\n\n def generate_testrank_graphs(self, tfold_blob):\n conf = tfold_blob['conf']\n t = tfold_blob['folds']\n\n tfold_blob['ranks'] = np.expand_dims(tfold_blob['ranks'], axis=0)\n tfold_blob['confidences'] = np.expand_dims(tfold_blob['confidences'], axis=0)\n x, ranks_y, confidences_y = get_series_from_tfold_blob(tfold_blob)\n\n plot = RankConfidencePlot(y1_label=\"rank\", y2_label=\"confidence\")\n #plot.set_title(\"Rank test of dataset '%s'\" % conf.dataset_id)\n plot.add_series(x, ranks_y, confidences_y, rank_label=\"rank\", confidence_label=\"confidence\")\n plot.save(os.path.join(self.output_path, \"%s-%s-testrank.pdf\" % (self.model_id, self.model_suffix)))\n\n def generate_model_graphs(self, model):\n print(model.get_weights())\n print(model.summary())\n\ndef ascad_sort_name(name):\n if 'desync100' in name:\n return 2\n elif 'desync50' in name:\n return 1\n else:\n return 0\n\nclass CombinedFigureGenerator(FigureGenerator):\n def __init__(self, input_tuples, name=\"unknown\", model_suffix=\"last\"):\n self.input_tuples = input_tuples\n self.model_suffix = model_suffix\n self.output_path = os.path.abspath(os.path.join(\"./paper_data\", \"combined-\" + name))\n\n def dump_text(self, name, x, ranks_y, confidences_y):\n '''\n Dump data to text. Useful for getting raw data for the paper.\n '''\n\n with open(os.path.join(self.output_path, \"data-%s.txt\" % name), \"w\") as f:\n min_rank = np.amin(ranks_y)\n min_rank_x = x[np.argmin(ranks_y)]\n max_rank = np.amax(ranks_y)\n max_rank_x = x[np.argmax(ranks_y)]\n min_conf = np.amin(confidences_y)\n min_conf_x = x[np.argmin(confidences_y)]\n max_conf = np.amax(confidences_y)\n max_conf_x = x[np.argmax(confidences_y)]\n f.write(\"Ranks:\\n\")\n f.write(str(list(zip(x, ranks_y))))\n f.write(\"\\n\\n\")\n f.write(\"Confidences:\\n\")\n f.write(str(list(zip(x, confidences_y))))\n f.write(\"\\n\\n\")\n f.write(\"Min rank: (%d, %d)\\n\" % (min_rank_x, min_rank))\n f.write(\"Max rank: (%d, %d)\\n\" % (max_rank_x, max_rank))\n f.write(\"Last rank: (%d, %d)\\n\" % (x[-1], ranks_y[-1]))\n f.write(\"Min confidence: (%d, %f)\\n\" % (min_conf_x, min_conf))\n f.write(\"Max confidence: (%d, %f)\\n\" % (max_conf_x, max_conf))\n f.write(\"Last confidence: (%d, %f)\\n\" % (x[-1], confidences_y[-1]))\n\n print(\"Dumped %s data to text\" % name)\n\n\n def generate_stats(self, title=\"\", dump_text=True):\n plot = RankConfidencePlot(includey2=True) # TODO need to manually change this includey2... Fix\n\n linestyles = ['-', '--', ':', '-.']\n #colors = ['xkcd:aqua', 'xkcd:azure', 'xkcd:green']\n colors = ['tab:blue', 'tab:orange', 'tab:green']\n if len(self.input_tuples) < 1:\n print(\"Nothing to do\")\n return\n\n for input_tuple in sorted(self.input_tuples, key=lambda f: ascad_sort_name(f[0])):\n input_path, model_id = input_tuple\n\n if is_remote(input_path):\n remote_path = input_path\n input_path = os.path.join(\"./models/\", remote_path.rpartition('models')[2][1:])\n self.get_remote_model(input_path, remote_path, model_id, self.model_suffix)\n\n tfold_blob = pickle.load(open(os.path.join(input_path, model_id + \"-t-ranks.p\"), \"rb\"))\n x, ranks_y, confidences_y = get_series_from_tfold_blob(tfold_blob)\n dataset_name = input_path[input_path.find('ASCAD'):]\n rank_label = \"mean rank (%s)\" % dataset_name\n confidence_label = \"mean confidence (%s)\" % dataset_name\n linestyle = linestyles.pop(0)\n color = colors.pop(0)\n if 'aiascad' in model_id: # Only plot ranks for ASCAD\n plot.add_rank_series(x, ranks_y, rank_label=rank_label, rank_color=color)\n else:\n plot.add_series(x, ranks_y, confidences_y, rank_label=rank_label, confidence_label=confidence_label, rank_color=color, confidence_color=color)\n if dump_text:\n self.dump_text(dataset_name + '-' + model_id, x, ranks_y, confidences_y)\n\n os.makedirs(self.output_path, exist_ok=True)\n plot.save(os.path.join(self.output_path, \"combined-%s-tfold.pdf\" % model_id))\n print(\"Combined:\")\n print(self.input_tuples)\n\n def generate_stats_testrank(self, title=\"\", dump_text=True):\n plot = RankConfidencePlot()\n\n linestyles = ['-', '--', ':', '-.']\n #colors = ['xkcd:aqua', 'xkcd:azure', 'xkcd:green']\n colors = ['tab:blue', 'tab:orange', 'tab:green']\n if len(self.input_tuples) < 1:\n print(\"Nothing to do\")\n return\n\n for input_tuple in sorted(self.input_tuples, key=lambda f: ascad_sort_name(f[0])):\n input_path, model_id = input_tuple\n\n if is_remote(input_path):\n remote_path = input_path\n input_path = os.path.join(\"./models/\", remote_path.rpartition('models')[2][1:])\n self.get_remote_model(input_path, remote_path, model_id, self.model_suffix)\n\n tfold_blob = pickle.load(open(os.path.join(input_path, model_id + \"-bestrank-testrank.p\"), \"rb\"))\n tfold_blob['ranks'] = np.expand_dims(tfold_blob['ranks'], axis=0)\n tfold_blob['confidences'] = np.expand_dims(tfold_blob['confidences'], axis=0)\n x, ranks_y, confidences_y = get_series_from_tfold_blob(tfold_blob)\n dataset_name = input_path[input_path.find('ASCAD'):]\n rank_label = \"rank (%s)\" % dataset_name\n confidence_label = \"confidence (%s)\" % dataset_name\n linestyle = linestyles.pop(0)\n color = colors.pop(0)\n plot.add_series(x, ranks_y, confidences_y, rank_label=rank_label, confidence_label=confidence_label, rank_color=color, confidence_color=color)\n\n if dump_text:\n self.dump_text(dataset_name + '-' + model_id, x, ranks_y, confidences_y)\n\n os.makedirs(self.output_path, exist_ok=True)\n plot.save(os.path.join(self.output_path, \"combined-%s-testrank.pdf\" % model_id))\n print(\"Combined:\")\n print(self.input_tuples)\n\n\nclass ModelFinder():\n def __init__(self, models_dir):\n self.models_dir = models_dir\n self.is_remote = is_remote(models_dir)\n self.keywords = ('-t-ranks', '-last', '-history')\n\n def find_models(self, dir_filter=None, model_filter=None):\n if self.is_remote:\n host, _, path = self.models_dir.rpartition(':')\n python_command = \"python -c \\\"import os; import json; print(json.dumps(list(os.walk('%s'))))\\\"\" % path\n\n command = [\"/usr/bin/ssh\", host, python_command]\n walk_process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=False)\n stdout, stderr = walk_process.communicate()\n walk_process.wait()\n\n subwalks = json.loads(stdout.decode(\"utf-8\"))\n else:\n walk_generator = os.walk(self.models_dir)\n model_directories = next(walk_generator)[1]\n subwalks = list(walk_generator)\n\n model_locations = []\n for subwalk in subwalks:\n subdirectory = subwalk[0]\n if not dir_filter is None and not dir_filter in subdirectory:\n continue\n if self.is_remote:\n subdirectory = host + ':' + subdirectory\n files = subwalk[2]\n model_names = set()\n for file in files:\n for keyword in self.keywords:\n if keyword in file:\n model_names.add(file.rpartition(keyword)[0])\n for model_name in model_names:\n if not model_filter is None and not model_filter in model_name:\n continue\n model_locations.append((subdirectory, model_name))\n\n return model_locations\n\nif __name__ == \"__main__\":\n \"\"\"\n Tools for creating the paper. Possible commands:\n * run <suite_name>: Run a suite of experiments\n * stats <model_id>: Generate statistics and graphs for model_id\n \"\"\"\n parser = argparse.ArgumentParser(description='Tools for CEMA correlation optimization paper.', formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('command', type=str, help='Command to execute')\n parser.add_argument('parameters', type=str, help='Parameters for the command', nargs='+')\n args, unknown = parser.parse_known_args()\n\n if args.command == 'stats':\n if len(args.parameters) >= 2:\n f = FigureGenerator(args.parameters[0], args.parameters[1])\n f.generate_stats()\n else:\n print(\"Not enough parameters. Expected <model_subdirectory> <model_id>\")\n elif args.command == 'autostats':\n if len(args.parameters) >= 1:\n mf = ModelFinder(args.parameters[0])\n for model_location in mf.find_models():\n f = FigureGenerator(model_location[0], model_location[1])\n f.generate_stats()\n else:\n print(\"Not enough parameters. Expected <models_root_directory>\")\n elif args.command == 'combinedtfold':\n if len(args.parameters) >= 3:\n mf = ModelFinder(args.parameters[0])\n model_locations = mf.find_models(dir_filter=args.parameters[1], model_filter=args.parameters[2])\n f = CombinedFigureGenerator(model_locations, name=args.parameters[1])\n try: # Deadline approaching, fix later\n f.generate_stats()\n except FileNotFoundError:\n pass\n try: # Deadline approaching, fix later\n f.generate_stats_testrank()\n except FileNotFoundError:\n pass\n else:\n print(\"Not enough parameters. Expected <models_root_directory> <dir_filter> <model_filter>\")\n else:\n print(\"Unknown command %s\" % args.command)\n"
]
| [
[
"numpy.argmin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.min",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.amax",
"numpy.argmax",
"numpy.ptp",
"numpy.amin",
"matplotlib.pyplot.gca",
"numpy.expand_dims"
]
]
|
jillnogold/mlrun | [
"beff7da359b697156890e4eb45cb9a1bc9f16631"
]
| [
"mlrun/utils/helpers.py"
]
| [
"# Copyright 2018 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport enum\nimport hashlib\nimport inspect\nimport json\nimport re\nimport sys\nimport time\nimport typing\nfrom datetime import datetime, timezone\nfrom importlib import import_module\nfrom os import environ, path\nfrom types import ModuleType\nfrom typing import Any, List, Optional, Tuple\n\nimport numpy as np\nimport requests\nimport yaml\nfrom dateutil import parser\nfrom pandas._libs.tslibs.timestamps import Timedelta, Timestamp\nfrom tabulate import tabulate\nfrom yaml.representer import RepresenterError\n\nimport mlrun\nimport mlrun.errors\nimport mlrun.utils.version.version\n\nfrom ..config import config\nfrom .logger import create_logger\n\nyaml.Dumper.ignore_aliases = lambda *args: True\n_missing = object()\n\nhub_prefix = \"hub://\"\nDB_SCHEMA = \"store\"\n\n\nclass StorePrefix:\n \"\"\"map mlrun store objects to prefixes\"\"\"\n\n FeatureSet = \"feature-sets\"\n FeatureVector = \"feature-vectors\"\n Artifact = \"artifacts\"\n Model = \"models\"\n Dataset = \"datasets\"\n\n @classmethod\n def is_artifact(cls, prefix):\n return prefix in [cls.Artifact, cls.Model, cls.Dataset]\n\n @classmethod\n def kind_to_prefix(cls, kind):\n kind_map = {\"model\": cls.Model, \"dataset\": cls.Dataset}\n return kind_map.get(kind, cls.Artifact)\n\n @classmethod\n def is_prefix(cls, prefix):\n return prefix in [\n cls.Artifact,\n cls.Model,\n cls.Dataset,\n cls.FeatureSet,\n cls.FeatureVector,\n ]\n\n\ndef get_artifact_target(item: dict, project=None):\n if is_legacy_artifact(item):\n db_key = item.get(\"db_key\")\n project_str = project or item.get(\"project\")\n tree = item.get(\"tree\")\n else:\n db_key = item[\"spec\"].get(\"db_key\")\n project_str = project or item[\"metadata\"].get(\"project\")\n tree = item[\"metadata\"].get(\"tree\")\n\n kind = item.get(\"kind\")\n if kind in [\"dataset\", \"model\"] and db_key:\n return f\"{DB_SCHEMA}://{StorePrefix.Artifact}/{project_str}/{db_key}:{tree}\"\n\n return (\n item.get(\"target_path\")\n if is_legacy_artifact(item)\n else item[\"spec\"].get(\"target_path\")\n )\n\n\nlogger = create_logger(config.log_level, config.log_formatter, \"mlrun\", sys.stdout)\nmissing = object()\n\nis_ipython = False\ntry:\n import IPython\n\n ipy = IPython.get_ipython()\n # if its IPython terminal ignore (cant show html)\n if ipy and \"Terminal\" not in str(type(ipy)):\n is_ipython = True\nexcept ImportError:\n pass\n\nif is_ipython and config.nest_asyncio_enabled in [\"1\", \"True\"]:\n # bypass Jupyter asyncio bug\n import nest_asyncio\n\n nest_asyncio.apply()\n\n\nclass run_keys:\n input_path = \"input_path\"\n output_path = \"output_path\"\n inputs = \"inputs\"\n artifacts = \"artifacts\"\n outputs = \"outputs\"\n data_stores = \"data_stores\"\n secrets = \"secret_sources\"\n\n\ndef verify_field_regex(\n field_name, field_value, patterns, raise_on_failure: bool = True\n) -> bool:\n logger.debug(\n \"Validating field against patterns\",\n field_name=field_name,\n field_value=field_value,\n pattern=patterns,\n )\n\n for pattern in patterns:\n if not re.match(pattern, str(field_value)):\n log_func = logger.warn if raise_on_failure else logger.debug\n log_func(\n \"Field is malformed. Does not match required pattern\",\n field_name=field_name,\n field_value=field_value,\n pattern=pattern,\n )\n if raise_on_failure:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"Field '{field_name}' is malformed. Does not match required pattern: {pattern}\"\n )\n else:\n return False\n return True\n\n\n# Verifying that a field input is of the expected type. If not the method raises a detailed MLRunInvalidArgumentError\ndef verify_field_of_type(field_name: str, field_value, expected_type: type):\n if not isinstance(field_value, expected_type):\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"Field '{field_name}' should be of type {expected_type.__name__} \"\n f\"(got: {type(field_value).__name__} with value: {field_value}).\"\n )\n\n\n# Verifying that a field input is of type list and all elements inside are of the expected element type.\n# If not the method raises a detailed MLRunInvalidArgumentError\ndef verify_field_list_of_type(\n field_name: str, field_value, expected_element_type: type\n):\n verify_field_of_type(field_name, field_value, list)\n for element in field_value:\n verify_field_of_type(field_name, element, expected_element_type)\n\n\ndef verify_dict_items_type(\n name: str,\n dictionary: dict,\n expected_keys_types: list = None,\n expected_values_types: list = None,\n):\n if dictionary:\n if type(dictionary) != dict:\n raise mlrun.errors.MLRunInvalidArgumentTypeError(\n f\"{name} expected to be of type dict, got type : {type(dictionary)}\"\n )\n try:\n verify_list_items_type(dictionary.keys(), expected_keys_types)\n verify_list_items_type(dictionary.values(), expected_values_types)\n except mlrun.errors.MLRunInvalidArgumentTypeError as exc:\n raise mlrun.errors.MLRunInvalidArgumentTypeError(\n f\"{name} should be of type Dict[{get_pretty_types_names(expected_keys_types)},\"\n f\"{get_pretty_types_names(expected_values_types)}].\"\n ) from exc\n\n\ndef verify_list_items_type(list_, expected_types: list = None):\n if list_ and expected_types:\n list_items_types = set(map(type, list_))\n expected_types = set(expected_types)\n\n if not list_items_types.issubset(expected_types):\n raise mlrun.errors.MLRunInvalidArgumentTypeError(\n f\"Found unexpected types in list items. expected: {expected_types},\"\n f\" found: {list_items_types} in : {list_}\"\n )\n\n\ndef get_pretty_types_names(types):\n if len(types) == 0:\n return \"\"\n if len(types) > 1:\n return \"Union[\" + \",\".join([ty.__name__ for ty in types]) + \"]\"\n return types[0].__name__\n\n\ndef now_date():\n return datetime.now(timezone.utc)\n\n\ndef to_date_str(d):\n if d:\n return d.isoformat()\n return \"\"\n\n\ndef normalize_name(name):\n # TODO: Must match\n # [a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?\n name = re.sub(r\"\\s+\", \"-\", name)\n name = name.replace(\"_\", \"-\")\n return name.lower()\n\n\nclass LogBatchWriter:\n def __init__(self, func, batch=16, maxtime=5):\n self.batch = batch\n self.maxtime = maxtime\n self.start_time = datetime.now()\n self.buffer = \"\"\n self.func = func\n\n def write(self, data):\n self.buffer += data\n self.batch -= 1\n elapsed_time = (datetime.now() - self.start_time).seconds\n if elapsed_time > self.maxtime or self.batch <= 0:\n self.flush()\n\n def flush(self):\n self.func(self.buffer)\n self.buffer = \"\"\n self.start_time = datetime.now()\n\n\ndef get_in(obj, keys, default=None):\n \"\"\"\n >>> get_in({'a': {'b': 1}}, 'a.b')\n 1\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split(\".\")\n\n for key in keys:\n if not obj or key not in obj:\n return default\n obj = obj[key]\n return obj\n\n\ndef verify_and_update_in(\n obj, key, value, expected_type: type, append=False, replace=True\n):\n verify_field_of_type(key, value, expected_type)\n update_in(obj, key, value, append, replace)\n\n\ndef verify_list_and_update_in(\n obj, key, value, expected_element_type: type, append=False, replace=True\n):\n verify_field_list_of_type(key, value, expected_element_type)\n update_in(obj, key, value, append, replace)\n\n\ndef update_in(obj, key, value, append=False, replace=True):\n parts = key.split(\".\") if isinstance(key, str) else key\n for part in parts[:-1]:\n sub = obj.get(part, missing)\n if sub is missing:\n sub = obj[part] = {}\n obj = sub\n\n last_key = parts[-1]\n if last_key not in obj:\n if append:\n obj[last_key] = []\n else:\n obj[last_key] = {}\n\n if append:\n if isinstance(value, list):\n obj[last_key] += value\n else:\n obj[last_key].append(value)\n else:\n if replace or not obj.get(last_key):\n obj[last_key] = value\n\n\ndef match_labels(labels, conditions):\n match = True\n\n def splitter(verb, text):\n items = text.split(verb)\n if len(items) != 2:\n raise ValueError(f\"illegal condition - {text}\")\n return labels.get(items[0].strip(), \"\"), items[1].strip()\n\n for condition in conditions:\n if \"~=\" in condition:\n l, val = splitter(\"~=\", condition)\n match = match and val in l\n elif \"!=\" in condition:\n l, val = splitter(\"!=\", condition)\n match = match and val != l\n elif \"=\" in condition:\n l, val = splitter(\"=\", condition)\n match = match and val == l\n else:\n match = match and (condition.strip() in labels)\n return match\n\n\ndef match_times(time_from, time_to, obj, key):\n obj_time = get_in(obj, key)\n if not obj_time:\n # if obj doesn't have the required time, return false if either time_from or time_to were given\n return not time_from and not time_to\n obj_time = parser.isoparse(obj_time)\n\n if (time_from and time_from > obj_time) or (time_to and time_to < obj_time):\n return False\n\n return True\n\n\ndef match_value(value, obj, key):\n if not value:\n return True\n return get_in(obj, key, _missing) == value\n\n\ndef match_value_options(value_options, obj, key):\n if not value_options:\n return True\n\n return get_in(obj, key, _missing) in as_list(value_options)\n\n\ndef flatten(df, col, prefix=\"\"):\n params = []\n for r in df[col]:\n if r:\n for k in r.keys():\n if k not in params:\n params += [k]\n for p in params:\n df[prefix + p] = df[col].apply(lambda x: x.get(p, \"\") if x else \"\")\n df.drop(col, axis=1, inplace=True)\n return df\n\n\ndef list2dict(lines: list):\n out = {}\n for line in lines:\n i = line.find(\"=\")\n if i == -1:\n continue\n key, value = line[:i].strip(), line[i + 1 :].strip()\n if key is None:\n raise ValueError(\"cannot find key in line (key=value)\")\n value = path.expandvars(value)\n out[key] = value\n return out\n\n\ndef dict_to_list(struct: dict):\n if not struct:\n return []\n return [f\"{k}={v}\" for k, v in struct.items()]\n\n\ndef dict_to_str(struct: dict, sep=\",\"):\n return sep.join(dict_to_list(struct))\n\n\ndef numpy_representer_seq(dumper, data):\n return dumper.represent_list(data.tolist())\n\n\ndef float_representer(dumper, data):\n return dumper.represent_float(data)\n\n\ndef int_representer(dumper, data):\n return dumper.represent_int(data)\n\n\ndef date_representer(dumper, data):\n if isinstance(data, np.datetime64):\n value = str(data)\n else:\n value = data.isoformat()\n return dumper.represent_scalar(\"tag:yaml.org,2002:timestamp\", value)\n\n\ndef enum_representer(dumper, data):\n return dumper.represent_str(str(data.value))\n\n\nyaml.add_representer(np.int64, int_representer, Dumper=yaml.SafeDumper)\nyaml.add_representer(np.integer, int_representer, Dumper=yaml.SafeDumper)\nyaml.add_representer(np.float64, float_representer, Dumper=yaml.SafeDumper)\nyaml.add_representer(np.floating, float_representer, Dumper=yaml.SafeDumper)\nyaml.add_representer(np.ndarray, numpy_representer_seq, Dumper=yaml.SafeDumper)\nyaml.add_representer(np.datetime64, date_representer, Dumper=yaml.SafeDumper)\nyaml.add_representer(Timestamp, date_representer, Dumper=yaml.SafeDumper)\nyaml.add_multi_representer(enum.Enum, enum_representer, Dumper=yaml.SafeDumper)\n\n\ndef dict_to_yaml(struct):\n try:\n data = yaml.safe_dump(struct, default_flow_style=False, sort_keys=False)\n except RepresenterError as exc:\n raise ValueError(f\"error: data result cannot be serialized to YAML, {exc}\")\n return data\n\n\n# solve numpy json serialization\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, (int, str, float, list, dict)):\n return obj\n elif isinstance(obj, (np.integer, np.int64)):\n return int(obj)\n elif isinstance(obj, (np.floating, np.float64)):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return str(obj)\n\n\ndef dict_to_json(struct):\n return json.dumps(struct, cls=MyEncoder)\n\n\ndef uxjoin(base, local_path, key=\"\", iter=None, is_dir=False):\n if is_dir and (not local_path or local_path in [\".\", \"./\"]):\n local_path = \"\"\n elif not local_path:\n local_path = key\n\n if iter:\n local_path = path.join(str(iter), local_path).replace(\"\\\\\", \"/\")\n\n if base and not base.endswith(\"/\"):\n base += \"/\"\n base_str = base or \"\"\n if local_path.startswith(\"./\"):\n local_path = local_path[len(\"./\") :]\n return f\"{base_str}{local_path}\"\n\n\ndef parse_versioned_object_uri(uri, default_project=\"\"):\n project = default_project\n tag = \"\"\n hash_key = \"\"\n if \"/\" in uri:\n loc = uri.find(\"/\")\n project = uri[:loc]\n uri = uri[loc + 1 :]\n if \":\" in uri:\n loc = uri.find(\":\")\n tag = uri[loc + 1 :]\n uri = uri[:loc]\n if \"@\" in uri:\n loc = uri.find(\"@\")\n hash_key = uri[loc + 1 :]\n uri = uri[:loc]\n\n return project, uri, tag, hash_key\n\n\ndef parse_artifact_uri(uri, default_project=\"\"):\n uri_pattern = r\"^((?P<project>.*)/)?(?P<key>.*?)(\\#(?P<iteration>.*?))?(:(?P<tag>.*?))?(@(?P<uid>.*))?$\"\n match = re.match(uri_pattern, uri)\n if not match:\n raise ValueError(\n \"Uri not in supported format [<project>/]<key>[#<iteration>][:<tag>][@<uid>]\"\n )\n group_dict = match.groupdict()\n iteration = group_dict[\"iteration\"]\n if iteration is not None:\n try:\n iteration = int(iteration)\n except ValueError:\n raise ValueError(\n f\"illegal store path {uri}, iteration must be integer value\"\n )\n return (\n group_dict[\"project\"] or default_project,\n group_dict[\"key\"],\n iteration,\n group_dict[\"tag\"],\n group_dict[\"uid\"],\n )\n\n\ndef generate_object_uri(project, name, tag=None, hash_key=None):\n uri = f\"{project}/{name}\"\n\n # prioritize hash key over tag\n if hash_key:\n uri += f\"@{hash_key}\"\n elif tag:\n uri += f\":{tag}\"\n return uri\n\n\ndef generate_artifact_uri(project, key, tag=None, iter=None):\n artifact_uri = f\"{project}/{key}\"\n if iter is not None:\n artifact_uri = f\"{artifact_uri}#{iter}\"\n if tag is not None:\n artifact_uri = f\"{artifact_uri}:{tag}\"\n return artifact_uri\n\n\ndef extend_hub_uri_if_needed(uri):\n if not uri.startswith(hub_prefix):\n return uri, False\n name = uri[len(hub_prefix) :]\n tag = \"master\"\n if \":\" in name:\n loc = name.find(\":\")\n tag = name[loc + 1 :]\n name = name[:loc]\n\n # hub function directory name are with underscores instead of hyphens\n name = name.replace(\"-\", \"_\")\n return config.get_hub_url().format(name=name, tag=tag), True\n\n\ndef gen_md_table(header, rows=None):\n rows = [] if rows is None else rows\n\n def gen_list(items=None):\n items = [] if items is None else items\n out = \"|\"\n for i in items:\n out += f\" {i} |\"\n return out\n\n out = gen_list(header) + \"\\n\" + gen_list(len(header) * [\"---\"]) + \"\\n\"\n for r in rows:\n out += gen_list(r) + \"\\n\"\n return out\n\n\ndef gen_html_table(header, rows=None):\n rows = [] if rows is None else rows\n\n style = \"\"\"\n<style type=\"text/css\">\n.tg {border-collapse:collapse;border-spacing:0;}\n.tg td{border-style:solid;border-width:1px;padding:6px 4px;}\n.tg th{font-weight:normal;border-style:solid;border-width:1px;padding:6px 4px;}\n</style>\n\"\"\"\n\n def gen_list(items=None, tag=\"td\"):\n items = [] if items is None else items\n out = \"\"\n for item in items:\n out += f\"<{tag}>{item}</{tag}>\"\n return out\n\n out = \"<tr>\" + gen_list(header, \"th\") + \"</tr>\\n\"\n for r in rows:\n out += \"<tr>\" + gen_list(r, \"td\") + \"</tr>\\n\"\n return style + '<table class=\"tg\">\\n' + out + \"</table>\\n\\n\"\n\n\ndef new_pipe_meta(artifact_path=None, ttl=None, *args):\n from kfp.dsl import PipelineConf\n\n def _set_artifact_path(task):\n from kubernetes import client as k8s_client\n\n task.add_env_variable(\n k8s_client.V1EnvVar(name=\"MLRUN_ARTIFACT_PATH\", value=artifact_path)\n )\n return task\n\n conf = PipelineConf()\n ttl = ttl or int(config.kfp_ttl)\n if ttl:\n conf.set_ttl_seconds_after_finished(ttl)\n if artifact_path:\n conf.add_op_transformer(_set_artifact_path)\n for op in args:\n if op:\n conf.add_op_transformer(op)\n return conf\n\n\ndef _convert_python_package_version_to_image_tag(version: typing.Optional[str]):\n return (\n version.replace(\"+\", \"-\").replace(\"0.0.0-\", \"\") if version is not None else None\n )\n\n\ndef enrich_image_url(image_url: str, client_version: str = None) -> str:\n client_version = _convert_python_package_version_to_image_tag(client_version)\n server_version = _convert_python_package_version_to_image_tag(\n mlrun.utils.version.Version().get()[\"version\"]\n )\n image_url = image_url.strip()\n tag = config.images_tag or client_version or server_version\n registry = config.images_registry\n\n # it's an mlrun image if the repository is mlrun\n is_mlrun_image = image_url.startswith(\"mlrun/\") or \"/mlrun/\" in image_url\n\n if is_mlrun_image and tag and \":\" not in image_url:\n image_url = f\"{image_url}:{tag}\"\n\n enrich_registry = False\n # enrich registry only if images_to_enrich_registry provided\n # example: \"^mlrun/*\" means enrich only if the image repository is mlrun and registry is not specified (in which\n # case /mlrun/ will be part of the url)\n\n if config.images_to_enrich_registry:\n for pattern_to_enrich in config.images_to_enrich_registry.split(\",\"):\n if re.match(pattern_to_enrich, image_url):\n enrich_registry = True\n if registry and enrich_registry:\n registry = registry if registry.endswith(\"/\") else f\"{registry}/\"\n image_url = f\"{registry}{image_url}\"\n\n return image_url\n\n\ndef get_docker_repository_or_default(repository: str) -> str:\n if not repository:\n repository = \"mlrun\"\n return repository\n\n\ndef get_parsed_docker_registry() -> Tuple[Optional[str], Optional[str]]:\n # according to https://stackoverflow.com/questions/37861791/how-are-docker-image-names-parsed\n docker_registry = config.httpdb.builder.docker_registry\n first_slash_index = docker_registry.find(\"/\")\n # this is exception to the rules from the link above, since the config value is called docker_registry we assume\n # that if someone gave just one component without any slash they gave a registry and not a repository\n if first_slash_index == -1:\n return docker_registry, None\n if (\n docker_registry[:first_slash_index].find(\".\") == -1\n and docker_registry[:first_slash_index].find(\":\") == -1\n and docker_registry[:first_slash_index] != \"localhost\"\n ):\n return None, docker_registry\n else:\n return (\n docker_registry[:first_slash_index],\n docker_registry[first_slash_index + 1 :],\n )\n\n\ndef pr_comment(\n message: str,\n repo: str = None,\n issue: int = None,\n token=None,\n server=None,\n gitlab=False,\n):\n \"\"\"push comment message to Git system PR/issue\n\n :param message: test message\n :param repo: repo name (org/repo)\n :param issue: pull-request/issue number\n :param token: git system security token\n :param server: url of the git system\n :param gitlab: set to True for GitLab (MLRun will try to auto detect the Git system)\n \"\"\"\n if (\"CI_PROJECT_ID\" in environ) or (server and \"gitlab\" in server):\n gitlab = True\n token = token or environ.get(\"GITHUB_TOKEN\") or environ.get(\"GIT_TOKEN\")\n\n if gitlab:\n server = server or \"gitlab.com\"\n headers = {\"PRIVATE-TOKEN\": token}\n repo = repo or environ.get(\"CI_PROJECT_ID\")\n # auto detect GitLab pr id from the environment\n issue = issue or environ.get(\"CI_MERGE_REQUEST_IID\")\n repo = repo.replace(\"/\", \"%2F\")\n url = f\"https://{server}/api/v4/projects/{repo}/merge_requests/{issue}/notes\"\n else:\n server = server or \"api.github.com\"\n repo = repo or environ.get(\"GITHUB_REPOSITORY\")\n # auto detect pr number if not specified, in github the pr id is identified as an issue id\n # we try and read the pr (issue) id from the github actions event file/object\n if not issue and \"GITHUB_EVENT_PATH\" in environ:\n with open(environ[\"GITHUB_EVENT_PATH\"]) as fp:\n data = fp.read()\n event = json.loads(data)\n if \"issue\" not in event:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"issue not found in github actions event\\ndata={data}\"\n )\n issue = event[\"issue\"].get(\"number\")\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\",\n \"Authorization\": f\"token {token}\",\n }\n url = f\"https://{server}/repos/{repo}/issues/{issue}/comments\"\n resp = requests.post(url=url, json={\"body\": str(message)}, headers=headers)\n if not resp.ok:\n errmsg = f\"bad pr comment resp!!\\n{resp.text}\"\n raise IOError(errmsg)\n return resp.json()[\"id\"]\n\n\ndef fill_object_hash(object_dict, uid_property_name, tag=\"\"):\n # remove tag, hash, date from calculation\n object_dict.setdefault(\"metadata\", {})\n tag = tag or object_dict[\"metadata\"].get(\"tag\")\n status = object_dict.setdefault(\"status\", {})\n object_dict[\"metadata\"][\"tag\"] = \"\"\n object_dict[\"metadata\"][uid_property_name] = \"\"\n object_dict[\"status\"] = None\n object_dict[\"metadata\"][\"updated\"] = None\n object_created_timestamp = object_dict[\"metadata\"].pop(\"created\", None)\n data = json.dumps(object_dict, sort_keys=True).encode()\n h = hashlib.sha1()\n h.update(data)\n uid = h.hexdigest()\n object_dict[\"metadata\"][\"tag\"] = tag\n object_dict[\"metadata\"][uid_property_name] = uid\n object_dict[\"status\"] = status\n if object_created_timestamp:\n object_dict[\"metadata\"][\"created\"] = object_created_timestamp\n return uid\n\n\ndef fill_function_hash(function_dict, tag=\"\"):\n return fill_object_hash(function_dict, \"hash\", tag)\n\n\ndef create_linear_backoff(base=2, coefficient=2, stop_value=120):\n \"\"\"\n Create a generator of linear backoff. Check out usage example in test_helpers.py\n \"\"\"\n x = 0\n comparison = min if coefficient >= 0 else max\n\n while True:\n next_value = comparison(base + x * coefficient, stop_value)\n yield next_value\n x += 1\n\n\ndef create_step_backoff(steps=None):\n \"\"\"\n Create a generator of steps backoff.\n Example: steps = [[2, 5], [20, 10], [120, None]] will produce a generator in which the first 5\n values will be 2, the next 10 values will be 20 and the rest will be 120.\n :param steps: a list of lists [step_value, number_of_iteration_in_this_step]\n \"\"\"\n steps = steps if steps is not None else [[2, 10], [10, 10], [120, None]]\n steps = iter(steps)\n\n # Get first step\n step = next(steps)\n while True:\n current_step_value, current_step_remain = step\n if current_step_remain == 0:\n\n # No more in this step, moving on\n step = next(steps)\n elif current_step_remain is None:\n\n # We are in the last step, staying here forever\n yield current_step_value\n elif current_step_remain > 0:\n\n # Still more remains in this step, just reduce the remaining number\n step[1] -= 1\n yield current_step_value\n\n\ndef create_exponential_backoff(base=2, max_value=120, scale_factor=1):\n \"\"\"\n Create a generator of exponential backoff. Check out usage example in test_helpers.py\n :param base: exponent base\n :param max_value: max limit on the result\n :param scale_factor: factor to be used as linear scaling coefficient\n \"\"\"\n exponent = 1\n while True:\n\n # This \"complex\" implementation (unlike the one in linear backoff) is to avoid exponent growing too fast and\n # risking going behind max_int\n next_value = scale_factor * (base**exponent)\n if next_value < max_value:\n exponent += 1\n yield next_value\n else:\n yield max_value\n\n\ndef retry_until_successful(\n backoff: int, timeout: int, logger, verbose: bool, _function, *args, **kwargs\n):\n \"\"\"\n Runs function with given *args and **kwargs.\n Tries to run it until success or timeout reached (timeout is optional)\n :param backoff: can either be a:\n - number (int / float) that will be used as interval.\n - generator of waiting intervals. (support next())\n :param timeout: pass None if timeout is not wanted, number of seconds if it is\n :param logger: a logger so we can log the failures\n :param verbose: whether to log the failure on each retry\n :param _function: function to run\n :param args: functions args\n :param kwargs: functions kwargs\n :return: function result\n \"\"\"\n start_time = time.time()\n last_exception = None\n\n # Check if backoff is just a simple interval\n if isinstance(backoff, int) or isinstance(backoff, float):\n backoff = create_linear_backoff(base=backoff, coefficient=0)\n\n # If deadline was not provided or deadline not reached\n while timeout is None or time.time() < start_time + timeout:\n next_interval = next(backoff)\n try:\n result = _function(*args, **kwargs)\n return result\n\n except mlrun.errors.MLRunFatalFailureError as exc:\n raise exc.original_exception\n except Exception as exc:\n last_exception = exc\n\n # If next interval is within allowed time period - wait on interval, abort otherwise\n if timeout is None or time.time() + next_interval < start_time + timeout:\n if logger is not None and verbose:\n logger.debug(\n f\"Operation not yet successful, Retrying in {next_interval} seconds. exc: {exc}\"\n )\n\n time.sleep(next_interval)\n else:\n break\n\n if logger is not None:\n logger.warning(\n f\"Operation did not complete on time. last exception: {last_exception}\"\n )\n\n raise Exception(\n f\"failed to execute command by the given deadline.\"\n f\" last_exception: {last_exception},\"\n f\" function_name: {_function.__name__},\"\n f\" timeout: {timeout}\"\n )\n\n\ndef get_ui_url(project, uid=None):\n url = \"\"\n if mlrun.mlconf.resolve_ui_url():\n url = \"{}/{}/{}/jobs\".format(\n mlrun.mlconf.resolve_ui_url(), mlrun.mlconf.ui.projects_prefix, project\n )\n if uid:\n url += f\"/monitor/{uid}/overview\"\n return url\n\n\ndef get_workflow_url(project, id=None):\n url = \"\"\n if mlrun.mlconf.resolve_ui_url():\n url = \"{}/{}/{}/jobs/monitor-workflows/workflow/{}\".format(\n mlrun.mlconf.resolve_ui_url(), mlrun.mlconf.ui.projects_prefix, project, id\n )\n return url\n\n\ndef are_strings_in_exception_chain_messages(\n exception: Exception, strings_list=typing.List[str]\n) -> bool:\n while exception is not None:\n if any([string in str(exception) for string in strings_list]):\n return True\n exception = exception.__cause__\n return False\n\n\nclass RunNotifications:\n def __init__(self, with_ipython=True, with_slack=False, secrets=None):\n self._hooks = []\n self._html = \"\"\n self._with_print = False\n self._secrets = secrets or {}\n self.with_ipython = with_ipython\n if with_slack and \"SLACK_WEBHOOK\" in environ:\n self.slack()\n self.print(skip_ipython=True)\n\n def push_start_message(\n self, project, commit_id=None, id=None, has_workflow_url=False\n ):\n message = f\"Pipeline started in project {project}\"\n if id:\n message += f\" id={id}\"\n commit_id = (\n commit_id or environ.get(\"GITHUB_SHA\") or environ.get(\"CI_COMMIT_SHA\")\n )\n if commit_id:\n message += f\", commit={commit_id}\"\n if has_workflow_url:\n url = get_workflow_url(project, id)\n else:\n url = get_ui_url(project)\n html = \"\"\n if url:\n html = (\n message\n + f'<div><a href=\"{url}\" target=\"_blank\">click here to view progress</a></div>'\n )\n message = message + f\", check progress in {url}\"\n self.push(message, html=html)\n\n def push_run_results(self, runs, push_all=False, state=None):\n \"\"\"push a structured table with run results to notification targets\n\n :param runs: list if run objects (RunObject)\n :param push_all: push all notifications (including already notified runs)\n :param state: final run state\n \"\"\"\n had_errors = 0\n runs_list = []\n for r in runs:\n notified = getattr(r, \"_notified\", False)\n if not notified or push_all:\n if r.status.state == \"error\":\n had_errors += 1\n runs_list.append(r.to_dict())\n r._notified = True\n\n text = \"pipeline run finished\"\n if had_errors:\n text += f\" with {had_errors} errors\"\n if state:\n text += f\", state={state}\"\n self.push(text, runs_list)\n\n def push(self, message, runs=None, html=None):\n if isinstance(runs, list):\n runs = mlrun.lists.RunList(runs)\n self._html = None\n for h in self._hooks:\n try:\n h(message, runs, html)\n except Exception as exc:\n logger.warning(f\"failed to push notification, {exc}\")\n if self.with_ipython and is_ipython:\n import IPython\n\n IPython.display.display(\n IPython.display.HTML(self._get_html(html or message, runs))\n )\n\n def _get_html(self, message, runs):\n if self._html:\n return self._html\n if not runs:\n return message\n\n html = \"<h2>Run Results</h2>\" + message\n html += \"<br>click the hyper links below to see detailed results<br>\"\n html += runs.show(display=False, short=True)\n self._html = html\n return html\n\n def print(self, skip_ipython=None):\n def _print(message, runs, html=None):\n if not runs:\n print(message)\n return\n\n table = []\n for r in runs:\n state = r[\"status\"].get(\"state\", \"\")\n if state == \"error\":\n result = r[\"status\"].get(\"error\", \"\")\n else:\n result = dict_to_str(r[\"status\"].get(\"results\", {}))\n\n table.append(\n [\n state,\n r[\"metadata\"][\"name\"],\n \"..\" + r[\"metadata\"][\"uid\"][-6:],\n result,\n ]\n )\n print(\n message\n + \"\\n\"\n + tabulate(table, headers=[\"status\", \"name\", \"uid\", \"results\"])\n )\n\n if not self._with_print and not (\n skip_ipython and self.with_ipython and is_ipython\n ):\n self._hooks.append(_print)\n self._with_print = True\n return self\n\n def slack(self, webhook=\"\"):\n emoji = {\"completed\": \":smiley:\", \"running\": \":man-running:\", \"error\": \":x:\"}\n webhook = (\n webhook\n or environ.get(\"SLACK_WEBHOOK\")\n or self._secrets.get(\"SLACK_WEBHOOK\")\n )\n if not webhook:\n raise ValueError(\"Slack webhook is not set\")\n\n def row(text):\n return {\"type\": \"mrkdwn\", \"text\": text}\n\n def _slack(message, runs, html=None):\n fields = [row(\"*Runs*\"), row(\"*Results*\")]\n for r in runs or []:\n meta = r[\"metadata\"]\n url = get_ui_url(meta.get(\"project\"), meta.get(\"uid\"))\n if url:\n line = f'<{url}|*{meta.get(\"name\")}*>'\n else:\n line = meta.get(\"name\")\n state = r[\"status\"].get(\"state\", \"\")\n line = f'{emoji.get(state, \":question:\")} {line}'\n\n fields.append(row(line))\n if state == \"error\":\n error_status = r[\"status\"].get(\"error\", \"\")\n result = f\"*{error_status}*\"\n else:\n result = dict_to_str(r[\"status\"].get(\"results\", {}), \", \")\n fields.append(row(result or \"None\"))\n\n data = {\n \"blocks\": [\n {\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": message}}\n ]\n }\n\n if runs:\n for i in range(0, len(fields), 8):\n data[\"blocks\"].append(\n {\"type\": \"section\", \"fields\": fields[i : i + 8]}\n )\n response = requests.post(\n webhook,\n data=json.dumps(data),\n headers={\"Content-Type\": \"application/json\"},\n )\n response.raise_for_status()\n\n self._hooks.append(_slack)\n return self\n\n def git_comment(\n self, git_repo=None, git_issue=None, token=None, server=None, gitlab=False\n ):\n def _comment(message, runs, html=None):\n pr_comment(\n self._get_html(html or message, runs),\n git_repo,\n git_issue,\n token=token\n or self._secrets.get(\"GIT_TOKEN\")\n or self._secrets.get(\"GITHUB_TOKEN\"),\n server=server,\n gitlab=gitlab,\n )\n\n self._hooks.append(_comment)\n return self\n\n\ndef create_class(pkg_class: str):\n \"\"\"Create a class from a package.module.class string\n\n :param pkg_class: full class location,\n e.g. \"sklearn.model_selection.GroupKFold\"\n \"\"\"\n splits = pkg_class.split(\".\")\n clfclass = splits[-1]\n pkg_module = splits[:-1]\n class_ = getattr(import_module(\".\".join(pkg_module)), clfclass)\n return class_\n\n\ndef create_function(pkg_func: list):\n \"\"\"Create a function from a package.module.function string\n\n :param pkg_func: full function location,\n e.g. \"sklearn.feature_selection.f_classif\"\n \"\"\"\n splits = pkg_func.split(\".\")\n pkg_module = \".\".join(splits[:-1])\n cb_fname = splits[-1]\n pkg_module = __import__(pkg_module, fromlist=[cb_fname])\n function_ = getattr(pkg_module, cb_fname)\n return function_\n\n\ndef get_caller_globals(level=2):\n try:\n return inspect.stack()[level][0].f_globals\n except Exception:\n return None\n\n\ndef _module_to_namespace(namespace):\n if isinstance(namespace, ModuleType):\n members = inspect.getmembers(\n namespace, lambda o: inspect.isfunction(o) or isinstance(o, type)\n )\n return {key: mod for key, mod in members}\n return namespace\n\n\ndef _search_in_namespaces(name, namespaces):\n \"\"\"search the class/function in a list of modules\"\"\"\n if not namespaces:\n return None\n if not isinstance(namespaces, list):\n namespaces = [namespaces]\n for namespace in namespaces:\n namespace = _module_to_namespace(namespace)\n if name in namespace:\n return namespace[name]\n return None\n\n\ndef get_class(class_name, namespace=None):\n \"\"\"return class object from class name string\"\"\"\n if isinstance(class_name, type):\n return class_name\n class_object = _search_in_namespaces(class_name, namespace)\n if class_object is not None:\n return class_object\n\n try:\n class_object = create_class(class_name)\n except (ImportError, ValueError) as exc:\n raise ImportError(f\"state init failed, class {class_name} not found, {exc}\")\n return class_object\n\n\ndef get_function(function, namespace):\n \"\"\"return function callable object from function name string\"\"\"\n if callable(function):\n return function\n\n function = function.strip()\n if function.startswith(\"(\"):\n if not function.endswith(\")\"):\n raise ValueError('function expression must start with \"(\" and end with \")\"')\n return eval(\"lambda event: \" + function[1:-1], {}, {})\n function_object = _search_in_namespaces(function, namespace)\n if function_object is not None:\n return function_object\n\n try:\n function_object = create_function(function)\n except (ImportError, ValueError) as exc:\n raise ImportError(\n f\"state/function init failed, handler {function} not found, {exc}\"\n )\n return function_object\n\n\ndef get_handler_extended(\n handler_path: str, context=None, class_args: dict = {}, namespaces=None\n):\n \"\"\"get function handler from [class_name::]handler string\n\n :param handler_path: path to the function ([class_name::]handler)\n :param context: MLRun function/job client context\n :param class_args: optional dict of class init kwargs\n :param namespaces: one or list of namespaces/modules to search the handler in\n :return: function handler (callable)\n \"\"\"\n if \"::\" not in handler_path:\n return get_function(handler_path, namespaces)\n\n splitted = handler_path.split(\"::\")\n class_path = splitted[0].strip()\n handler_path = splitted[1].strip()\n\n class_object = get_class(class_path, namespaces)\n argspec = inspect.getfullargspec(class_object)\n if argspec.varkw or \"context\" in argspec.args:\n class_args[\"context\"] = context\n try:\n instance = class_object(**class_args)\n except TypeError as exc:\n raise TypeError(f\"failed to init class {class_path}, {exc}\\n args={class_args}\")\n\n if not hasattr(instance, handler_path):\n raise ValueError(\n f\"handler ({handler_path}) specified but doesnt exist in class {class_path}\"\n )\n return getattr(instance, handler_path)\n\n\ndef datetime_from_iso(time_str: str) -> Optional[datetime]:\n if not time_str:\n return\n return parser.isoparse(time_str)\n\n\ndef datetime_to_iso(time_obj: Optional[datetime]) -> Optional[str]:\n if not time_obj:\n return\n return time_obj.isoformat()\n\n\ndef as_list(element: Any) -> List[Any]:\n return element if isinstance(element, list) else [element]\n\n\ndef calculate_local_file_hash(filename):\n h = hashlib.sha1()\n b = bytearray(128 * 1024)\n mv = memoryview(b)\n with open(filename, \"rb\", buffering=0) as f:\n for n in iter(lambda: f.readinto(mv), 0):\n h.update(mv[:n])\n return h.hexdigest()\n\n\ndef fill_artifact_path_template(artifact_path, project):\n # Supporting {{project}} is new, in certain setup configuration the default artifact path has the old\n # {{run.project}} so we're supporting it too for backwards compatibility\n if artifact_path and (\n \"{{run.project}}\" in artifact_path or \"{{project}}\" in artifact_path\n ):\n if not project:\n raise mlrun.errors.MLRunInvalidArgumentError(\n \"project name must be specified with this\"\n + f\" artifact_path template {artifact_path}\"\n )\n artifact_path = artifact_path.replace(\"{{run.project}}\", project)\n artifact_path = artifact_path.replace(\"{{project}}\", project)\n return artifact_path\n\n\ndef str_to_timestamp(time_str: str, now_time: Timestamp = None):\n \"\"\"convert fixed/relative time string to Pandas Timestamp\n\n can use relative times using the \"now\" verb, and align to floor using the \"floor\" verb\n\n time string examples::\n\n 1/1/2021\n now\n now + 1d2h\n now -1d floor 1H\n \"\"\"\n if not isinstance(time_str, str):\n return time_str\n\n time_str = time_str.strip()\n if time_str.lower().startswith(\"now\"):\n # handle now +/- timedelta\n timestamp: Timestamp = now_time or Timestamp.now()\n time_str = time_str[len(\"now\") :].lstrip()\n split = time_str.split(\"floor\")\n time_str = split[0].strip()\n\n if time_str and time_str[0] in [\"+\", \"-\"]:\n timestamp = timestamp + Timedelta(time_str)\n elif time_str:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"illegal time string expression now{time_str}, \"\n 'use \"now +/- <timestring>\" for relative times'\n )\n\n if len(split) > 1:\n timestamp = timestamp.floor(split[1].strip())\n return timestamp\n\n return Timestamp(time_str)\n\n\ndef is_legacy_artifact(artifact):\n if isinstance(artifact, dict):\n return \"metadata\" not in artifact\n else:\n return not hasattr(artifact, \"metadata\")\n\n\ndef set_paths(pythonpath=\"\"):\n \"\"\"update the sys path\"\"\"\n if not pythonpath:\n return\n paths = pythonpath.split(\":\")\n for p in paths:\n abspath = path.abspath(p)\n if abspath not in sys.path:\n sys.path.append(abspath)\n"
]
| [
[
"pandas._libs.tslibs.timestamps.Timestamp",
"pandas._libs.tslibs.timestamps.Timestamp.now",
"pandas._libs.tslibs.timestamps.Timedelta"
]
]
|
XSMUBC/DNC-lifelong-learning | [
"55b40bad65eb3cb68c50411acf8f770bfc52e3d9"
]
| [
"maindnc.py"
]
| [
"import torch\nimport tensorflow as tf\nimport numpy as np\nimport statistics \nfrom torch.nn import functional as F\nimport torch.distributions as tdist\n\n\nimport visual_visdom\nimport visual_plt\nimport utils\nimport matplotlib.pyplot as plt\n\n#########################################################\n## maindnc xsm code ##\n#########################################################\n\ndef maindnc(self, size, batch_index,z0,task,tasks,t_label):\n \n '''\n if list(z0.size())[0]!=0:\n #estimation of the mean and variance\n zx=z0\n mean=(zx.mean(dim=1)).mean(dim=0)\n var=(zx.std(dim=1)).mean(dim=0)\n #print('xsm mean',mean)\n #print('xsm xsm var',var)\n\n else:\n\n #estimate in begining\n mean=0\n var=1.6\n '''\n \n mean=0\n var=1.6\n n = tdist.Normal(mean, var)\n z1 =n.sample((size, self.z_dim)).to(self._device())\n\n t_label =n.sample((size, self.z_dim)).to(t_label)\n\n \n if (task<=round((tasks+1)/2)):\n z2=torch.cat((z0,z1,z1), 0) \n else:\n z2=torch.cat((z0,z1), 0) \n \n\n \n\n dl=64\n m=int(list(z1.size())[0]/dl)\n n=int(list(z0.size())[0]/dl)\n\n \n if list(z0.size())[0]!=0:\n\n for i in range(m):\n rows1 =z1[i*dl:i*dl+dl,:]\n\n tensor_similarity=0\n for j in range(n):\n rows2 = z0[j*dl:j*dl+dl,:]\n x = rows1\n y = rows2\n cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)\n tensor_similarity+=torch.sum(cos(x, y))\n\n\n if (tensor_similarity<0):\n z2=torch.cat((z2,torch.reshape(rows1, (dl, 100))), 0) \n\n\n image_tensor=z1\n\n\n print('xsm xsm xsm xsm z2',z2[:,:(-1)])\n\n plt.imsave('./plots/save.png', image_tensor.numpy() , cmap='gray')\n\n\n if batch_index==2000:\n\n torch.save(z2, 'dnc.pt')\n\n\n return z2,t_label\n\n\n\n"
]
| [
[
"torch.cat",
"torch.distributions.Normal",
"torch.save",
"torch.nn.CosineSimilarity",
"torch.reshape"
]
]
|
plai-group/ccfs-python | [
"532fd9a13eda79361d3028d02a3a91e8b17fb5fa"
]
| [
"src/training_utils/component_analysis.py"
]
| [
"import numpy as np\nimport scipy.linalg as la\nfrom src.utils.ccfUtils import randomRotation\nfrom src.utils.commonUtils import sVT\nfrom src.utils.commonUtils import amerge\nfrom src.utils.commonUtils import dict2array\nfrom src.utils.commonUtils import queryIfColumnsVary\n\n\ndef isSquare(x):\n # Check if a numpy array is Square matrix, i.e. NxN\n if len(x.shape) <= 1:\n return False\n else:\n if x.shape[0] == x.shape[1]:\n return True\n else:\n return False\n\n\ndef componentAnalysis(X, Y, processes, epsilon):\n \"\"\"\n Carries out a a section of component analyses on X and Y to produce a\n projection matrix projMat which maps X to its components. Valid\n projections are CCA, PCA, CCA-classwise, Original axes and Random Rotation.\n \"\"\"\n probs = dict2array(X=processes) * 1\n # Sample projections to use if some set to be probabilistically used\n bToSample = np.logical_and((probs > 0), (probs < 1))\n if np.any(bToSample):\n # TODO: Ignoring for now\n probs[~bToSample] = 0\n cumprobs = probs.cumsum(axis=0)/np.sum(probs)\n iSampled = np.sum(np.random.rand() > cumprobs) + 1\n iToSample = bToSample.ravel().nonzero()[0]\n for n in range(iToSample.size):\n processes[iToSample[n]] = False\n processes[iSampled] = True\n\n # Eliminate any columns that don't vary. We will add these back into the\n # projection matrices at the end\n bXVaries = queryIfColumnsVary(X=X, tol=1e-12)\n bYvaries = queryIfColumnsVary(X=Y, tol=1e-12)\n nXorg = bXVaries.size\n nYorg = bYvaries.size\n\n if ~(np.any(bXVaries)) or ~(np.any(bYvaries)):\n # One of X or Y doesn't vary so component analysis fails.\n # Return projection corresponding to first columns of X and Y\n A = np.concatenate((np.array([[1]]), np.zeros((nXorg - 1, 1))))\n B = np.concatenate((np.array([[1]]), np.zeros((nYorg - 1, 1))))\n U = X[:, 0]\n V = Y[:, 0]\n r = 0\n\n return A, B, U, V, r\n\n X = X[:, bXVaries]\n Y = Y[:, bYvaries]\n\n # Checks and sizes\n x1, x2 = X.shape\n assert (Y.shape[0] == x1), 'Input sizes do not match!'\n assert (x1 != 1), 'Cannot carry out component analysis with only one point!'\n K = Y.shape[1]\n\n # Subtraction of the mean is common to the process of calculating the\n # projection matrices for both CCA and PCA but for computational\n # effificently we don't make this translation when actually applying the\n # projections to choose the splits as it is the same effect on all points.\n # In other words, we don't split in canonical component space exactly, but\n # in a constant translation of this space.\n muX = np.divide(np.sum(X, axis=0), X.shape[0])\n muY = np.divide(np.sum(Y, axis=0), Y.shape[0])\n\n X = np.subtract(X, muX)\n Y = np.subtract(Y, muY)\n\n # Initialize the project matrices\n projMat = np.full((X.shape[1], 0), np.nan)\n yprojMat = np.full((Y.shape[1], 0), np.nan)\n r = np.array([])\n\n if processes['Original']:\n projMat = np.concatenate((projMat, np.eye(x2)))\n\n if processes['Random']:\n projMat = np.concatenate((projMat, randomRotation(N=x2)))\n\n if processes['PCA']:\n # PCA projection\n pcaCoeff, _, _ = pcaLite(X=X)\n projMat = np.concatenate((projMat, pcaCoeff))\n \n if processes['CCA'] or processes['CCAclasswise']:\n # CCA based projections\n q1, r1, p1 = la.qr(X, pivoting=True, mode='economic')\n # Reduce to full rank within some tolerance\n if r1.size == 0:\n rankX = 0\n else:\n rankX = np.sum(np.absolute(np.diag(r1)) >= (epsilon * np.absolute(r1[0, 0])))\n\n if rankX == 0:\n A = np.concatenate((np.array([[1]]), np.zeros((nXorg - 1, 1))))\n B = np.concatenate((np.array([[1]]), np.zeros((nYorg - 1, 1))))\n U = X[:, 0]\n V = Y[:, 0]\n r = 0\n\n return A, B, U, V, r\n\n elif rankX < x2:\n q1 = q1[:, 0:rankX]\n r1 = r1[0:rankX, 0:rankX]\n \n if processes['CCA']:\n q2, r2, p2 = la.qr(Y, mode='economic', pivoting=True)\n\n # Reduce to full rank within some tolerance\n if r2.size == 0:\n rankY = 0\n else:\n rankY = np.sum(np.absolute(np.diag(r2)) >= (epsilon * np.absolute(r2[0, 0])))\n\n if rankY == 0:\n A = np.concatenate((np.array([[1]]), np.zeros((nXorg - 1, 1))))\n B = np.concatenate((np.array([[1]]), np.zeros((nYorg - 1, 1))))\n U = X[:, 0]\n V = Y[:, 0]\n r = 0\n\n return A, B, U, V, r\n\n elif rankY < K:\n q2 = q2[:, 0:rankY]\n\n # Solve CCA using the decompositions, taking care to use minimal\n # complexity orientation for SVD. Note the two calculations are\n # equivalent except in computational complexity\n d = np.min((rankX, rankY))\n\n if rankX >= rankY:\n L, D, M = np.linalg.svd(np.dot(q1.T, q2), full_matrices=False)\n D = np.diag(D)\n M = M.T\n else:\n M, D, L = np.linalg.svd(np.dot(q2.T, q1), full_matrices=False)\n D = np.diag(D)\n L = L.T\n\n if isSquare(r1):\n locProj = np.linalg.solve(r1, L[:, 0:d] * np.sqrt(x1 - 1))\n else:\n locProj, _, _, _ = np.linalg.lstsq(r1, L[:, 0:d] * np.sqrt(x1 - 1), rcond=-1)\n\n # Put coefficients back to their full size and their correct order\n if x2-rankX != 0:\n locProj = np.concatenate((locProj, np.zeros((x2-rankX, d))), axis=0)\n locProj[p1, :] = [locProj]\n projMat = np.concatenate((projMat, locProj), axis=1) # Maybe fix with axis\n\n # Projection For Y\n r2 = r2[0:rankY, 0:rankY]\n if isSquare(r2):\n locyProj = np.linalg.solve(r2, M[:, 0:d] * np.sqrt(x1-1))\n else:\n locyProj, _, _, _ = np.linalg.lstsq(r2, M[:, 0:d] * np.sqrt(x1-1), rcond=-1)\n\n # Put coefficients back to their full size and their correct order\n if K-rankY != 0:\n locyProj = np.concatenate((locyProj, np.zeros((K-rankY, d))), axis=0)\n locyProj[p2, :] = [locyProj]\n yprojMat = np.concatenate((yprojMat, locyProj), axis=1)\n\n r = np.minimum(np.maximum(np.diag(D[:, 0:d]), 0), 1)\n\n if processes['CCAclasswise']:\n # Consider each output in an in / out fashion to generate a set of K projections.\n for k in range(K):\n L, _, _ = la.svd(np.dot(q1.T, Y[:, k]), full_matrices=False)\n if isSquare(r1):\n locProj = np.linalg.solve(r1, L[:, 0] * np.sqrt(x1-1))\n else:\n locProj = np.linalg.lstsq(r1, L[:, 0] * np.sqrt(x1-1))\n if x2-rankX != 0:\n locProj[p1, :] = np.concatenate((locProj, np.zeros((x2-rankX, 1))))\n locProj[p1, :] = [locProj]\n projMat = np.concatenate((projMat,locProj), axis=1)\n\n # Normalize the projection matrices. This ensures that the later tests for\n # close points are triggered appropriately and is useful for interpretability.\n projMat = np.divide(projMat, np.sqrt(np.sum(projMat**2, axis=0)))\n\n # Note that as in general only a projection matrix is given, we need to\n # add the mean back to be consistent with general use. This equates to\n # addition of a constant term to each column in U\n U = np.dot(X, projMat)\n V = np.dot(Y, yprojMat)\n\n # Finally, add back in the empty rows in the projection matrix for the\n # things which didn't vary\n A = np.zeros((nXorg, projMat.shape[1]))\n if len(bXVaries.shape) > 1 and bXVaries.shape[0] == 1:\n A[bXVaries[0], :] = projMat\n elif len(bXVaries.shape) > 1 and bXVaries.shape[1] == 1:\n A[bXVaries[:, 0], :] = projMat\n else:\n A[bXVaries, :] = projMat\n\n B = np.zeros((nYorg, yprojMat.shape[1]))\n if len(bYvaries.shape) > 1 and bYvaries.shape[0] == 1:\n B[bYvaries[0], :] = yprojMat\n elif len(bYvaries.shape) > 1 and bYvaries.shape[1] == 1:\n B[bYvaries[:, 0], :] = yprojMat\n else:\n B[bYvaries, :] = yprojMat\n\n return A, B, U, V, r\n"
]
| [
[
"numpy.concatenate",
"numpy.full",
"numpy.array",
"numpy.dot",
"numpy.random.rand",
"numpy.zeros",
"numpy.sum",
"numpy.min",
"numpy.logical_and",
"numpy.eye",
"numpy.any",
"numpy.subtract",
"scipy.linalg.qr",
"numpy.sqrt",
"numpy.absolute",
"numpy.diag"
]
]
|
Cospui/cotk | [
"9038420787f7251049534baf3b35eac538a82148"
]
| [
"models/seq2seq-pytorch/test_seq2seq_pytorch.py"
]
| [
"import pytest\nimport random\nfrom run import run\nfrom main import main\nimport os\nimport shutil\ncwd = os.path.abspath(os.path.dirname(__file__))\npath = os.path.split(cwd)[0]\npath = os.path.split(path)[0]\n\ndef setup_function(function):\n\timport sys\n\tsys.argv = ['python3']\n\trandom.seed(0)\n\timport numpy as np\n\tnp.random.seed(0)\n\timport torch\n\ttorch.manual_seed(0)\n\ttorch.cuda.manual_seed_all(0)\n\ttry:\n\t\tshutil.rmtree(cwd + '/output_test')\n\texcept Exception:\n\t\tpass\n\ttry:\n\t\tshutil.rmtree(cwd + '/tensorboard_test')\n\texcept Exception:\n\t\tpass\n\ttry:\n\t\tshutil.rmtree(cwd + '/model_test')\n\texcept Exception:\n\t\tpass\n\ttry:\n\t\tshutil.rmtree(cwd + '/cache_test')\n\texcept Exception:\n\t\tpass\n\tos.mkdir(cwd + '/output_test')\n\tos.mkdir(cwd + '/tensorboard_test')\n\tos.mkdir(cwd + '/model_test')\n\tos.mkdir(cwd + '/cache_test')\n\ndef teardown_function(function):\n\tshutil.rmtree(cwd + '/output_test')\n\tshutil.rmtree(cwd + '/tensorboard_test')\n\tshutil.rmtree(cwd + '/model_test')\n\tshutil.rmtree(cwd + '/cache_test')\n\ndef modify_args(args):\n\targs.cuda = False\n\targs.restore = None\n\targs.wvclass = 'Glove'\n\targs.wvpath = path + '/tests/wordvector/dummy_glove'\n\targs.out_dir = cwd + '/output_test'\n\targs.log_dir = cwd + '/tensorboard_test'\n\targs.model_dir = cwd + '/model_test'\n\targs.cache_dir = cwd + '/cache_test'\n\n\targs.name = 'test_seq2seq_pytorch'\n\targs.wvclass = 'Glove'\n\targs.epochs = 1\n\targs.batch_per_epoch = 5\n\targs.batch_size = 5\n\targs.datapath = path + '/tests/dataloader/dummy_opensubtitles'\n\ndef test_train(mocker):\n\tdef side_effect_train(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'train'\n\t\tmain(args)\n\tdef side_effect_restore(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'train'\n\t\targs.restore = 'last'\n\t\tmain(args)\n\tdef side_effect_cache(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'train'\n\t\targs.cache = True\n\t\tmain(args)\n\tmock = mocker.patch('main.main', side_effect=side_effect_train)\n\trun()\n\tmock.side_effect = side_effect_restore\n\trun()\n\tmock.side_effect = side_effect_cache\n\trun()\n\ndef test_test(mocker):\n\tdef side_effect_test(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'test'\n\t\tmain(args)\n\tmock = mocker.patch('main.main', side_effect=side_effect_test)\n\trun()\n"
]
| [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all"
]
]
|
RobertMcCarter/animal-finder | [
"5ac839a65df62ab312e440ce43416727492e84d8"
]
| [
"utils/excelFileUtils/convertExcelMultiCameraToListOfTaggedImagePaths.py"
]
| [
"r\"\"\"\n Processes the Excel file in:\n D:\\data\\NRSI\\2140_Turtle Nesting-Wildlife-Cameras-2019\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\n\n# First - load all the folders\nbaseFolder = r\"D:\\data\\NRSI\\2140_Turtle Nesting-Wildlife-Cameras-2019\"\nexcelFileToValidate = os.path.join(baseFolder, r\"taggedImages-summary.xlsx\")\n\n# Load the Excel file with the file paths to validate\ndf = pd.read_excel(excelFileToValidate, \"RAM-Data\")\ndf = df.replace(np.nan, \"\", regex=True)\n\ncount = 0\ntaggedImagePaths: list[str] = []\nfor i, row in df.iterrows():\n count += 1\n\n # Try and figure out the tagged image name\n camera = \"Camera-\" + str(row[\"Camera\"])\n subFolder = str(row[\"Folder\"])\n file = str(row[\"File\"])\n\n taggedImagePath: str = os.path.join(baseFolder, camera, subFolder, file)\n if os.path.isfile(taggedImagePath):\n print(taggedImagePath)\n taggedImagePaths.append(taggedImagePath)\n # else:\n # print(f'Failed to find tagged image from row: {i} - \"{taggedImagePath}\"')\n\n\nnumTaggedImages = len(taggedImagePaths)\nnumMissingImages = count - numTaggedImages\nprint(\n f\"Found a total of {numTaggedImages} tagged images - out of {count} (missing {numMissingImages})\"\n)\n"
]
| [
[
"pandas.read_excel"
]
]
|
thlautenschlaeger/mushroom-rl | [
"2847b40953b37a2bf0e9ac6241e50312a5357ded"
]
| [
"examples/pendulum_sac_100Hz_options.py"
]
| [
"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport os\n\nfrom mushroom_rl.algorithms.actor_critic import SAC\nfrom mushroom_rl.algorithms.actor_critic.deep_actor_critic.sac import OptionSAC\nfrom mushroom_rl.core import Core\nfrom mushroom_rl.core.core import OptionCore\nfrom mushroom_rl.environments.gym_env import Gym\nfrom mushroom_rl.sds.envs.option_model import OptionSwitchingModel\nfrom mushroom_rl.utils.dataset import compute_J\n\nimport mushroom_rl.sds\n\n\nclass CriticNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, n_features, **kwargs):\n super().__init__()\n\n n_input = input_shape[-1]\n n_output = output_shape[0]\n\n self._h1 = nn.Linear(n_input, n_features)\n self._h2 = nn.Linear(n_features, n_features)\n self._h3 = nn.Linear(n_features, n_output)\n\n nn.init.xavier_uniform_(self._h1.weight,\n gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self._h2.weight,\n gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self._h3.weight,\n gain=nn.init.calculate_gain('linear'))\n\n def forward(self, state, action):\n state_action = torch.cat((state.float(), action.float()), dim=1)\n features1 = F.relu(self._h1(state_action))\n features2 = F.relu(self._h2(features1))\n q = self._h3(features2)\n\n return torch.squeeze(q)\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, n_features, **kwargs):\n super(ActorNetwork, self).__init__()\n\n n_input = input_shape[-1]\n n_output = output_shape[0]\n\n self._h1 = nn.Linear(n_input, n_features)\n self._h2 = nn.Linear(n_features, n_features)\n self._h3 = nn.Linear(n_features, n_output)\n\n nn.init.xavier_uniform_(self._h1.weight,\n gain=nn.init.calculate_gain('tanh'))\n nn.init.xavier_uniform_(self._h2.weight,\n gain=nn.init.calculate_gain('tanh'))\n nn.init.xavier_uniform_(self._h3.weight,\n gain=nn.init.calculate_gain('linear'))\n\n def forward(self, state):\n features1 = torch.tanh(self._h1(torch.squeeze(state, 1).float()))\n features2 = torch.tanh(self._h2(features1))\n a = self._h3(features2)\n\n return a\n\n\ndef experiment(alg, n_epochs, n_steps, n_steps_test, seed):\n np.random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # MDP\n horizon = 1000\n gamma = 0.99\n mdp = Gym('Pendulum-ID-v1', horizon, gamma)\n mdp.seed(seed)\n rarhmm = torch.load(\n os.path.abspath(os.path.join(__file__, '..', '..')) + '/mushroom_rl/sds/envs/hybrid/models/neural_rarhmm_pendulum_cart.pkl',\n map_location='cpu')\n # mdp = Gym('Pendulum-v0', horizon, gamma)\n\n # Settings\n initial_replay_size = 512\n max_replay_size = 50000 * 4\n batch_size = 512\n n_critic_features = 64\n n_actor_features = 14\n warmup_transitions = 512\n tau = 0.005\n lr_alpha = 3e-4\n\n use_cuda = torch.cuda.is_available()\n\n # Approximator\n actor_input_shape = mdp.info.observation_space.shape\n actor_mu_params = dict(network=ActorNetwork,\n n_features=n_actor_features,\n input_shape=actor_input_shape,\n output_shape=mdp.info.action_space.shape,\n use_cuda=use_cuda)\n actor_sigma_params = dict(network=ActorNetwork,\n n_features=n_actor_features,\n input_shape=actor_input_shape,\n output_shape=mdp.info.action_space.shape,\n use_cuda=use_cuda)\n\n actor_optimizer = {'class': optim.Adam,\n 'params': {'lr': 3e-4}}\n\n critic_input_shape = (actor_input_shape[0] + mdp.info.action_space.shape[0],)\n critic_params = dict(network=CriticNetwork,\n optimizer={'class': optim.Adam,\n 'params': {'lr': 3e-4}},\n loss=F.mse_loss,\n n_features=n_critic_features,\n input_shape=critic_input_shape,\n output_shape=(1,),\n use_cuda=use_cuda)\n\n # Agent\n agent = alg(mdp.info, actor_mu_params, actor_sigma_params,\n actor_optimizer, critic_params, batch_size, initial_replay_size,\n max_replay_size, warmup_transitions, tau, lr_alpha,\n critic_fit_params=None, rarhmm=rarhmm)\n\n option_switch_model = OptionSwitchingModel(rarhmm)\n\n # Algorithm\n core = OptionCore(agent, mdp, option_switch_model=option_switch_model)\n\n core.learn(n_steps=initial_replay_size, n_steps_per_fit=initial_replay_size)\n\n J_results = []\n dataset_results = []\n # RUN\n\n dataset = core.evaluate(n_steps=n_steps_test, render=False)\n gamma = 1 # set gamma to 1 to compute cummulated reward\n J = compute_J(dataset, gamma)\n print('J: ', np.mean(J))\n J_results.append({'J_mean': np.mean(J), 'J_std': np.std(J)})\n dataset_results.append(dataset)\n\n for n in range(n_epochs):\n print('Epoch: ', n)\n core.learn(n_steps=n_steps, n_steps_per_fit=1)\n dataset = core.evaluate(n_steps=n_steps_test, render=False)\n J = compute_J(dataset, gamma)\n print('J: ', np.mean(J))\n J_results.append({'J_mean': np.mean(J), 'J_std': np.std(J)})\n dataset_results.append(dataset)\n\n print('Press a button to visualize pendulum')\n # input()\n return core.evaluate(n_episodes=1, render=False), J_results, dataset_results\n\n\nif __name__ == '__main__':\n seeds = [42069, 69, 420, 1337, 404, 42, 9000, 300]\n experiments = []\n algs = [\n OptionSAC\n ]\n\n for seed in seeds:\n for alg in algs:\n print('Algorithm: ', alg.__name__)\n samples_per_episode = 4000\n eval_steps = 10000\n n_epochs = 50\n dataset, J_results, dataset_results = experiment(alg=alg, n_epochs=n_epochs, n_steps=samples_per_episode, n_steps_test=eval_steps, seed=seed)\n # dataset = experiment(alg=alg, n_epochs=40, n_steps=5000, n_steps_test=2000)\n experiment_results = {'J_results': J_results,\n 'dataset_results': dataset_results,\n 'epochs': n_epochs,\n 'samples_per_episode': samples_per_episode,\n 'eval_steps': eval_steps,\n 'seed': seed}\n experiments.append(experiment_results)\n\n\n torch.save(experiments,\n os.path.abspath(os.path.join(__file__, '..', '..')) + '/results/option_sac_pendulum100Hz_experiments_tanh.pkl')\n\n # import matplotlib.pyplot as plt\n #\n # lol = [d[0] for d in dataset[0:1000]]\n # plt.plot(lol)\n # plt.show()\n"
]
| [
[
"torch.nn.Linear",
"numpy.random.seed",
"numpy.mean",
"torch.manual_seed",
"numpy.std",
"torch.squeeze",
"torch.cuda.is_available",
"torch.nn.init.calculate_gain"
]
]
|
QIN2DIM/2D-target-detection-Bleach-vs-Naruto | [
"93b5824e74535fc428407cf4053839b6ac23eab8"
]
| [
"network/utils/autoanchor.py"
]
| [
"# Auto-anchor utils\n\nimport random\n\nimport numpy as np\nimport torch\nimport yaml\nfrom tqdm import tqdm\n\nfrom utils.general import colorstr\n\n\ndef check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchor_grid.prod(-1).view(-1) # anchor area\n da = a[-1] - a[0] # delta a\n ds = m.stride[-1] - m.stride[0] # delta s\n if da.sign() != ds.sign(): # same order\n print('Reversing anchor order')\n m.anchors[:] = m.anchors.flip(0)\n m.anchor_grid[:] = m.anchor_grid.flip(0)\n\n\ndef check_anchors(dataset, model, thr=4.0, imgsz=640):\n # Check anchor fit to data, recompute if necessary\n prefix = colorstr('autoanchor: ')\n print(f'\\n{prefix}Analyzing anchors... ', end='')\n m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()\n shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale\n wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh\n\n def metric(k): # compute metric\n r = wh[:, None] / k[None]\n x = torch.min(r, 1. / r).min(2)[0] # ratio metric\n best = x.max(1)[0] # best_x\n aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold\n bpr = (best > 1. / thr).float().mean() # best possible recall\n return bpr, aat\n\n anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors\n bpr, aat = metric(anchors)\n print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')\n if bpr < 0.98: # threshold to recompute\n print('. Attempting to improve anchors, please wait...')\n na = m.anchor_grid.numel() // 2 # number of anchors\n try:\n anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n except Exception as e:\n print(f'{prefix}ERROR: {e}')\n new_bpr = metric(anchors)[0]\n if new_bpr > bpr: # replace anchors\n anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference\n m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss\n check_anchor_order(m)\n print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')\n else:\n print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')\n print('') # newline\n\n\ndef kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):\n \"\"\" Creates kmeans-evolved anchors from training datasets\n\n Arguments:\n dataset: path to data.yaml, or a loaded datasets\n n: number of anchors\n img_size: image size used for training\n thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0\n gen: generations to evolve anchors using genetic algorithm\n verbose: print all results\n\n Return:\n k: kmeans evolved anchors\n\n Usage:\n from utils.autoanchor import *; _ = kmean_anchors()\n \"\"\"\n from scipy.cluster.vq import kmeans\n\n thr = 1. / thr\n prefix = colorstr('autoanchor: ')\n\n def metric(k, wh): # compute metrics\n r = wh[:, None] / k[None]\n x = torch.min(r, 1. / r).min(2)[0] # ratio metric\n # x = wh_iou(wh, torch.tensor(k)) # iou metric\n return x, x.max(1)[0] # x, best_x\n\n def anchor_fitness(k): # mutation fitness\n _, best = metric(torch.tensor(k, dtype=torch.float32), wh)\n return (best * (best > thr).float()).mean() # fitness\n\n def print_results(k):\n k = k[np.argsort(k.prod(1))] # sort small to large\n x, best = metric(k, wh0)\n bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr\n print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')\n print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '\n f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')\n for i, x in enumerate(k):\n print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\\n') # use in *.cfg\n return k\n\n if isinstance(dataset, str): # *.yaml file\n with open(dataset, encoding='ascii', errors='ignore') as f:\n data_dict = yaml.safe_load(f) # model dict\n from utils.datasets import LoadImagesAndLabels\n dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)\n\n # Get label wh\n shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh\n\n # Filter\n i = (wh0 < 3.0).any(1).sum()\n if i:\n print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')\n wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels\n # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1\n\n # Kmeans calculation\n print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')\n s = wh.std(0) # sigmas for whitening\n k, dist = kmeans(wh / s, n, iter=30) # points, mean distance\n assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}')\n k *= s\n wh = torch.tensor(wh, dtype=torch.float32) # filtered\n wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered\n k = print_results(k)\n\n # Plot\n # k, d = [None] * 20, [None] * 20\n # for i in tqdm(range(1, 21)):\n # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance\n # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)\n # ax = ax.ravel()\n # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')\n # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh\n # ax[0].hist(wh[wh[:, 0]<100, 0],400)\n # ax[1].hist(wh[wh[:, 1]<100, 1],400)\n # fig.savefig('wh.png', dpi=200)\n\n # Evolve\n npr = np.random\n f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma\n pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar\n for _ in pbar:\n v = np.ones(sh)\n while (v == 1).all(): # mutate until a change occurs (prevent duplicates)\n v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)\n kg = (k.copy() * v).clip(min=2.0)\n fg = anchor_fitness(kg)\n if fg > f:\n f, k = fg, kg.copy()\n pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'\n if verbose:\n print_results(k)\n\n return print_results(k)\n"
]
| [
[
"torch.min",
"scipy.cluster.vq.kmeans",
"numpy.ones",
"numpy.random.uniform",
"torch.tensor"
]
]
|
DivyaKrishnani/road_object_detection | [
"80b39647188a94ef25be5c81f67141e25e0f6978"
]
| [
"darkflow/net/yolov2/predict.py"
]
| [
"import numpy as np\nimport math\nimport cv2\nimport os\nimport json\n#from scipy.special import expit\n#from utils.box import BoundBox, box_iou, prob_compare\n#from utils.box import prob_compare2, box_intersection\nfrom ...utils.box import BoundBox\nfrom ...cython_utils.cy_yolo2_findboxes import box_constructor\n\ndef expit(x):\n\treturn 1. / (1. + np.exp(-x))\n\ndef _softmax(x):\n e_x = np.exp(x - np.max(x))\n out = e_x / e_x.sum()\n return out\n\ndef findboxes(self, net_out):\n\t# meta\n\tmeta = self.meta\n\tboxes = list()\n\tboxes=box_constructor(meta,net_out)\n\treturn boxes\n\ndef postprocess(self, net_out, im, save = True):\n\t\"\"\"\n\tTakes net output, draw net_out, save to disk\n\t\"\"\"\n\tboxes = self.findboxes(net_out)\n\n\t# meta\n\tmeta = self.meta\n\tthreshold = meta['thresh']\n\tcolors = meta['colors']\n\tlabels = meta['labels']\n\tif type(im) is not np.ndarray:\n\t\timgcv = cv2.imread(im)\n\telse: imgcv = im\n\th, w, _ = imgcv.shape\n\t\n\tresultsForJSON = []\n\tfor b in boxes:\n\t\tboxResults = self.process_box(b, h, w, threshold)\n\t\tif boxResults is None:\n\t\t\tcontinue\n\t\tleft, right, top, bot, mess, max_indx, confidence = boxResults\n\t\tthick = int((h + w) // 300)\n\t\tif self.FLAGS.json:\n\t\t\tresultsForJSON.append({\"label\": mess, \"confidence\": float('%.2f' % confidence), \"topleft\": {\"x\": left, \"y\": top}, \"bottomright\": {\"x\": right, \"y\": bot}})\n\t\t\tcontinue\n\n\t\tcv2.rectangle(imgcv,\n\t\t\t(left, top), (right, bot),\n\t\t\tcolors[max_indx], 1)\n\t\tcv2.putText(imgcv, mess, (left, top - 12),\n\t\t\t0, 1e-3 * h, colors[max_indx],thick//3)\n\n\tif not save: return imgcv\n\n\toutfolder = os.path.join(self.FLAGS.imgdir, 'out')\n\timg_name = os.path.join(outfolder, os.path.basename(im))\n\tif self.FLAGS.json:\n\t\ttextJSON = json.dumps(resultsForJSON)\n\t\ttextFile = os.path.splitext(img_name)[0] + \".json\"\n\t\twith open(textFile, 'w') as f:\n\t\t\tf.write(textJSON)\n\t\treturn\n\n\tcv2.imwrite(img_name, imgcv)\n"
]
| [
[
"numpy.max",
"numpy.exp"
]
]
|
a1ip/bayesian-analysis-recipes | [
"c4f1e729307d5962a1e95bc99aa92617e0ad0b1c"
]
| [
"models/feedforward.py"
]
| [
"from .__init__ import BayesianModel\nimport theano\nimport theano.tensor as tt\nimport numpy as np\nimport pymc3 as pm\n\n\nclass ForestCoverModel(BayesianModel):\n \n def __init__(self, n_hidden):\n super(ForestCoverModel, self).__init__()\n self.n_hidden = n_hidden\n\n def create_model(self, X=None, y=None):\n if X:\n num_samples, self.num_pred = X.shape\n \n if y:\n num_samples, self.num_out = Y.shape\n\n model_input = theano.shared(np.zeros(shape=(1, self.num_pred)))\n model_output = theano.shared(np.zeros(self.num_out))\n \n self.shared_vars = {\n 'model_input': model_input,\n 'model_output': model_output\n }\n \n with pm.Model() as model:\n # Define weights\n weights_1 = pm.Normal('w_1', mu=0, sd=1, \n shape=(self.num_pred, self.n_hidden))\n weights_2 = pm.Normal('w_2', mu=0, sd=1,\n shape=(self.n_hidden, self.n_hidden))\n weights_out = pm.Normal('w_out', mu=0, sd=1, \n shape=(self.n_hidden, self.num_outs))\n\n # Define activations\n acts_1 = tt.tanh(tt.dot(model_input, weights_1))\n acts_2 = tt.tanh(tt.dot(acts_1, weights_2))\n acts_out = tt.nnet.softmax(tt.dot(acts_2, weights_out)) # noqa\n\n # Define likelihood\n out = pm.Multinomial('likelihood', n=1, p=acts_out, \n observed=model_output)\n \n return model\n \n \n def fit(self, X, y, n=200000, batch_size=10):\n \"\"\"\n Train the Bayesian NN model.\n \"\"\"\n num_samples, self.num_pred = X.shape\n \n if self.cached_model is None:\n self.cached_model = self.create_model()\n \n with self.cached_model:\n minibatches = {\n self.shared_vars['model_input']: pm.Minibatch(X, batch_size=batch_size),\n self.shared_vars['model_output']: pm.Minibatch(y, batch_size=batch_size),\n }\n self._inference(minibatches, n)\n \n return self\n \n \n\n"
]
| [
[
"numpy.zeros"
]
]
|
lescailab/fenicsmpi | [
"c205d552591dc8691008537907d28b5d589610f2"
]
| [
"modules/local/fenics/compute/lib/Fibers.py"
]
| [
"# -*- coding: utf-8 -*-\n# Code by: Francesco Regazzoni\n\nimport dolfin as df\n\n\ndef getH1projection(f, V):\n\n mesh = V.mesh()\n uv = df.TrialFunction(V)\n vv = df.TestFunction(V)\n A = df.dot(uv, vv) + df.inner(df.grad(uv), df.grad(vv))\n L = df.dot(f, vv) + df.inner(df.grad(f), df.grad(vv))\n sol = df.Function(V)\n df.solve(A * df.dx(mesh) == L * df.dx(mesh), sol)\n return sol\n\n\ndef getTransmuralCoordinate(mesh, boundary_markers, ENDO, EPI, degree=2):\n\n df.set_log_level(30)\n dx = df.dx(mesh)\n\n V = df.FunctionSpace(mesh, 'CG', degree)\n bc1 = df.DirichletBC(V, df.Constant(0.0), boundary_markers, ENDO)\n bc2 = df.DirichletBC(V, df.Constant(1.0), boundary_markers, EPI)\n\n phi = df.Function(V, name=\"phi_transmural\")\n phi_trial = df.TrialFunction(V)\n psi = df.TestFunction(V)\n\n df.solve(df.dot(df.grad(phi_trial), df.grad(psi)) * dx ==\n df.Constant(0.0) * psi * dx, phi, [bc1, bc2])\n\n return phi\n\n\ndef getApicobasalCoordinate(mesh, boundary_markers, BASE, degree=2):\n\n df.set_log_level(30)\n dx = df.dx(mesh)\n\n V = df.FunctionSpace(mesh, 'CG', degree)\n\n phi_trial = df.TrialFunction(V)\n psi = df.TestFunction(V)\n\n a = df.dot(df.grad(phi_trial), df.grad(psi)) * df.dx\n L = df.Constant(0.0) * psi * df.dx\n\n\n# quotes = np.matmul(self.mesh.coordinates(),self.centerline) # centerline??\n quotes = mesh.coordinates()[:, 2]\n min_quote = df.MPI.max(df.MPI.comm_world, quotes.max())\n# min_quote = 0.060184572583718024\n\n def apex(x):\n result = abs(x[2] - min_quote) < df.DOLFIN_EPS\n return result\n bcs = [df.DirichletBC(V, df.Constant(1.0), boundary_markers, BASE),\n df.DirichletBC(V, df.Constant(0.0), apex, method='pointwise')]\n\n phi = df.Function(V, name=\"phi_apicobasal\")\n df.solve(a == L, phi, bcs)\n return phi\n\n# TODO: works in parallel?\ndef generateFibers(mesh, boundary_markers, ENDO, EPI, BASE, output_dir=None):\n\n if df.MPI.rank(df.MPI.comm_world) == 0:\n print(\"generating fibers...\", flush=True)\n theta_endo = 60.\n theta_epi = -60.\n\n degree = 1\n phi_transmural = getTransmuralCoordinate(\n mesh, boundary_markers, ENDO, EPI, degree=degree)\n\n def Max(a, b): return (a + b + abs(a - b)) / df.Constant(2.)\n def Min(a, b): return (a + b - abs(a - b)) / df.Constant(2.)\n\n W = df. VectorFunctionSpace(mesh, 'CG', degree)\n n = df.Function(W) # W?\n\n # -1: analytical fibers\n # 0: SR (on fields) NB: f and s are not perfectly orthogonal\n # 1: SR (on dofs)\n # 2: BT (on dofs)\n alg_type = 2 # 0 faster, but f and s are not perfectly orthogonal\n if alg_type == -1:\n\n # apex_x = 0.0469341\n # apex_y = 1.34562\n\n # f = df.project(df.Expression(('-(x[1]-apex_y)/sqrt(pow(x[0]-apex_x,2)+pow(x[1]-apex_y,2))',\n # ' (x[0]-apex_x)/sqrt(pow(x[0]-apex_x,2)+pow(x[1]-apex_y,2))',\n # '0'), degree = 2,apex_x=apex_x,apex_y=apex_y),self.W)\n # s = df.project(df.Expression((' (x[0]-apex_x)/sqrt(pow(x[0]-apex_x,2)+pow(x[1]-apex_y,2))',\n # ' (x[1]-apex_y)/sqrt(pow(x[0]-apex_x,2)+pow(x[1]-apex_y,2))',\n # '0'), degree = 2,apex_x=apex_x,apex_y=apex_y),self.W)\n f = df.project(df.Expression(('1.0', '0.0', '0.0'), degree=degree), W)\n s = df.project(df.Expression(('0.0', '1.0', '0.0'), degree=degree), W)\n n = df.project(df.cross(f, s), W)\n\n elif alg_type == 0:\n s = df.grad(phi_transmural)\n# s = s / Max(1e-10, df.sqrt(df.inner(s,s)))\n s = s / df.sqrt(df.inner(s, s))\n k = df.Constant((.0, .0, -1.)) # TODO: move to option file\n kp_tilde = k - df.dot(k, s) * s\n kp = kp_tilde / df.sqrt(df.inner(kp_tilde, kp_tilde))\n f_tilde = df.cross(s, kp)\n f_tilde = f_tilde / df.sqrt(df.inner(f_tilde, f_tilde))\n theta = (theta_endo + (theta_epi - theta_endo)\n * phi_transmural) * df.pi / 180.0\n f = f_tilde + df.sin(theta) * df.cross(s, f_tilde) + 2.2 * \\\n (df.sin(theta * .5))**2 * df.cross(s, df.cross(s, f_tilde))\n f = - f / df.sqrt(df.inner(f, f))\n# n = df.cross(f,s)\n# n = n / df.sqrt(df.inner(n,n))\n s = df.project(s, W)\n f = df.project(f, W)\n# n = df.project(df.cross(f,s),self.W)\n\n# sx,sy,sz = df.split(df.project(s,self.W))\n# sx = sx.vector().get_local()\n# sy = sy.vector().get_local()\n# sz = sz.vector().get_local()\n#\n# sx,sy,sz = df.split(df.project(s,self.W))\n# sx = df.project(sx,self.V).vector().get_local()\n# sy = df.project(sy,self.V).vector().get_local()\n# sz = df.project(sz,self.V).vector().get_local()\n# s = np.concatenate((sx[:,None],sy[:,None],sz[:,None]),axis=1)\n\n elif alg_type == 1 or alg_type == 2:\n\n import numpy as np\n ndof_local = phi_transmural.vector().get_local().size\n s_vec = np.empty((ndof_local, 3))\n s_vec_tot = df.project(df.grad(phi_transmural), W).vector().get_local()\n# s_vec[:,0] = s_vec_tot[self.W.sub(0).dofmap().dofs()]\n# s_vec[:,1] = s_vec_tot[self.W.sub(1).dofmap().dofs()]\n# s_vec[:,2] = s_vec_tot[self.W.sub(2).dofmap().dofs()]\n s_vec[:, 0] = s_vec_tot[0::3]\n s_vec[:, 1] = s_vec_tot[1::3]\n s_vec[:, 2] = s_vec_tot[2::3]\n# print(\"differenza %s\" % np.linalg.norm(s-s_vec))\n s = s_vec\n\n if alg_type == 2:\n phi_apicobasal = getApicobasalCoordinate(\n mesh, boundary_markers, BASE, degree=1)\n k_vec = np.empty((ndof_local, 3))\n k_vec_tot = df.project(\n df.grad(phi_apicobasal), W).vector().get_local()\n# k_vec[:,0] = k_vec_tot[self.W.sub(0).dofmap().dofs()]\n# k_vec[:,1] = k_vec_tot[self.W.sub(1).dofmap().dofs()]\n# k_vec[:,2] = k_vec_tot[self.W.sub(2).dofmap().dofs()]\n k_vec[:, 0] = k_vec_tot[0::3]\n k_vec[:, 1] = k_vec_tot[1::3]\n k_vec[:, 2] = k_vec_tot[2::3]\n # print(\"differenza %s\" % np.linalg.norm(s-s_vec))\n k = k_vec\n else:\n pass\n# TODO k = np.tile(centerline[None,:],[ndof_local,1])\n\n# if self.meshtype == \"coarse\":\n# v2d = df.vertex_to_dof_map(self.V)\n# for i in range(self.ndof_local):\n# if self.mesh.coordinates()[i,2] < 18.2e-3:\n# s_vec[v2d[i],:] = np.array([self.mesh.coordinates()[i,0],self.mesh.coordinates()[i,1],0.])\n\n phi_vec = phi_transmural.vector().get_local()\n\n s_norm = np.empty(ndof_local)\n theta = np.empty(ndof_local)\n kp_tilde = np.empty((ndof_local, 3))\n kp = np.empty((ndof_local, 3))\n f_tilde = np.empty((ndof_local, 3))\n f = np.empty((ndof_local, 3))\n n = np.empty((ndof_local, 3))\n# k = self.centerline\n for i in range(ndof_local):\n s_norm[i] = np.sqrt(np.inner(s[i, :], s[i, :]))\n s[i, :] = s[i, :] / s_norm[i]\n kp_tilde[i, :] = k[i, :] - np.inner(k[i, :], s[i, :]) * s[i, :]\n kp[i, :] = kp_tilde[i, :] / \\\n np.sqrt(np.inner(kp_tilde[i, :], kp_tilde[i, :]))\n f_tilde[i, :] = np.cross(s[i, :], kp[i, :])\n f_tilde[i, :] = f_tilde[i, :] / \\\n np.sqrt(np.inner(f_tilde[i, :], f_tilde[i, :]))\n theta[i] = (theta_endo + (theta_epi - theta_endo)\n * phi_vec[i]) * np.pi / 180.0\n f[i, :] = f_tilde[i, :] + np.sin(theta[i]) * np.cross(s[i, :], f_tilde[i, :]) + \\\n 2.2 * (np.sin(theta[i] * .5))**2 * \\\n np.cross(s[i, :], np.cross(s[i, :], f_tilde[i, :]))\n f[i, :] = - f[i, :] / np.sqrt(np.inner(f[i, :], f[i, :]))\n n[i, :] = np.cross(f[i, :], s[i, :])\n\n# print('f.s = %e \\t f.n = %e \\t s.n = % e' % (\n# np.inner(f[i,:],s[i,:]),\n# np.inner(f[i,:],n[i,:]),\n# np.inner(s[i,:],n[i,:])))\n\n f_vec = np.empty(ndof_local * 3)\n# f_vec[self.W.sub(0).dofmap().dofs()] = f[:,0]\n# f_vec[self.W.sub(1).dofmap().dofs()] = f[:,1]\n# f_vec[self.W.sub(2).dofmap().dofs()] = f[:,2]\n for i in range(3):\n f_vec[i::3] = f[:, i]\n\n s_vec = np.empty(ndof_local * 3)\n# s_vec[self.W.sub(0).dofmap().dofs()] = s[:,0]\n# s_vec[self.W.sub(1).dofmap().dofs()] = s[:,1]\n# s_vec[self.W.sub(2).dofmap().dofs()] = s[:,2]\n for i in range(3):\n s_vec[i::3] = s[:, i]\n\n n_vec = np.empty(ndof_local * 3)\n# n_vec[self.W.sub(0).dofmap().dofs()] = n[:,0]\n# n_vec[self.W.sub(1).dofmap().dofs()] = n[:,1]\n# n_vec[self.W.sub(2).dofmap().dofs()] = n[:,2]\n for i in range(3):\n n_vec[i::3] = n[:, i]\n\n f = df.Function(W)\n s = df.Function(W)\n n = df.Function(W)\n f.vector().set_local(f_vec)\n f.vector().apply(\"insert\")\n s.vector().set_local(s_vec)\n s.vector().apply(\"insert\")\n n.vector().set_local(n_vec)\n n.vector().apply(\"insert\")\n\n\n# f.vector().set_local()\n if df.MPI.rank(df.MPI.comm_world) == 0:\n print(\"fibers generated!\", flush=True)\n\n f.rename(\"f\", \"f\")\n s.rename(\"s\", \"s\")\n n.rename(\"n\", \"n\")\n# out_file = df.File(\"geometry_prolate/fibers_and_sheets.pvd\")\n# out_file << (phi,0)\n# out_file << (f,0)\n# out_file << (s,0)\n if output_dir:\n xdmf = df.XDMFFile(\"{}/fibers.xdmf\".format(output_dir))\n xdmf.parameters[\"functions_share_mesh\"] = True\n xdmf.parameters[\"flush_output\"] = True\n xdmf.write(phi_transmural, 0)\n if alg_type == 2:\n xdmf.write(phi_apicobasal, 0)\n xdmf.write(f, 0)\n xdmf.write(s, 0)\n xdmf.write(n, 0)\n xdmf.close()\n\n return f, s, n\n"
]
| [
[
"numpy.sin",
"numpy.inner",
"numpy.empty",
"numpy.cross"
]
]
|
Axe-hyx/taichi | [
"6a907c99fbc349430363080af91726d6ba92e5b9"
]
| [
"python/taichi/lang/__init__.py"
]
| [
"import functools\nimport os\nfrom copy import deepcopy as _deepcopy\n\nfrom taichi.core.util import locale_encode\nfrom taichi.core.util import ti_core as _ti_core\nfrom taichi.lang import impl, types\nfrom taichi.lang.enums import Layout\nfrom taichi.lang.exception import InvalidOperationError\nfrom taichi.lang.impl import *\nfrom taichi.lang.kernel_arguments import (any_arr, ext_arr,\n sparse_matrix_builder, template)\nfrom taichi.lang.kernel_impl import (KernelArgError, KernelDefError,\n data_oriented, func, kernel, pyfunc)\nfrom taichi.lang.matrix import Matrix, Vector\nfrom taichi.lang.ndrange import GroupedNDRange, ndrange\nfrom taichi.lang.ops import *\nfrom taichi.lang.quant_impl import quant\nfrom taichi.lang.runtime_ops import async_flush, sync\nfrom taichi.lang.sparse_matrix import SparseMatrix, SparseMatrixBuilder\nfrom taichi.lang.sparse_solver import SparseSolver\nfrom taichi.lang.struct import Struct\nfrom taichi.lang.transformer import TaichiSyntaxError\nfrom taichi.lang.type_factory_impl import type_factory\nfrom taichi.lang.util import (has_pytorch, is_taichi_class, python_scope,\n taichi_scope, to_numpy_type, to_pytorch_type,\n to_taichi_type)\nfrom taichi.misc.util import deprecated\nfrom taichi.profiler import KernelProfiler, get_default_kernel_profiler\nfrom taichi.snode.fields_builder import FieldsBuilder\n\nimport taichi as ti\n\n# TODO(#2223): Remove\ncore = _ti_core\n\nruntime = impl.get_runtime()\n\ni = axes(0)\nj = axes(1)\nk = axes(2)\nl = axes(3)\nij = axes(0, 1)\nik = axes(0, 2)\nil = axes(0, 3)\njk = axes(1, 2)\njl = axes(1, 3)\nkl = axes(2, 3)\nijk = axes(0, 1, 2)\nijl = axes(0, 1, 3)\nikl = axes(0, 2, 3)\njkl = axes(1, 2, 3)\nijkl = axes(0, 1, 2, 3)\n\nouter_product = deprecated('ti.outer_product(a, b)',\n 'a.outer_product(b)')(Matrix.outer_product)\ncross = deprecated('ti.cross(a, b)', 'a.cross(b)')(Matrix.cross)\ndot = deprecated('ti.dot(a, b)', 'a.dot(b)')(Matrix.dot)\nnormalized = deprecated('ti.normalized(a)',\n 'a.normalized()')(Matrix.normalized)\n\ncfg = default_cfg()\nx86_64 = _ti_core.x64\nx64 = _ti_core.x64\narm64 = _ti_core.arm64\ncuda = _ti_core.cuda\nmetal = _ti_core.metal\nopengl = _ti_core.opengl\ncc = _ti_core.cc\nwasm = _ti_core.wasm\nvulkan = _ti_core.vulkan\ngpu = [cuda, metal, opengl, vulkan]\ncpu = _ti_core.host_arch()\ntimeline_clear = lambda: impl.get_runtime().prog.timeline_clear()\ntimeline_save = lambda fn: impl.get_runtime().prog.timeline_save(fn)\n\n# Legacy API\ntype_factory_ = _ti_core.get_type_factory_instance()\n\n\n@deprecated('kernel_profiler_print()', 'print_kernel_profile_info()')\ndef kernel_profiler_print():\n return print_kernel_profile_info()\n\n\ndef print_kernel_profile_info(mode='count'):\n \"\"\"Print the profiling results of Taichi kernels.\n\n To enable this profiler, set ``kernel_profiler=True`` in ``ti.init()``.\n The default print mode is ``COUNT`` mode: print the statistical results (min,max,avg time) of Taichi kernels,\n another mode ``TRACE``: print the records of launched Taichi kernels with specific profiling metrics (time, memory load/store and core utilization etc.)\n\n Args:\n mode (str): the way to print profiling results\n\n Example::\n\n >>> import taichi as ti\n\n >>> ti.init(ti.cpu, kernel_profiler=True)\n >>> var = ti.field(ti.f32, shape=1)\n\n >>> @ti.kernel\n >>> def compute():\n >>> var[0] = 1.0\n\n >>> compute()\n >>> ti.print_kernel_profile_info() #[1]\n >>> # equivalent calls :\n >>> # ti.print_kernel_profile_info('count')\n\n >>> ti.print_kernel_profile_info('trace')\n\n Note:\n [1] Currently the result of `KernelProfiler` could be incorrect on OpenGL\n backend due to its lack of support for `ti.sync()`.\n \"\"\"\n get_default_kernel_profiler().print_info(mode)\n\n\ndef query_kernel_profile_info(name):\n \"\"\"Query kernel elapsed time(min,avg,max) on devices using the kernel name.\n\n To enable this profiler, set `kernel_profiler=True` in `ti.init`.\n\n Args:\n name (str): kernel name.\n\n Returns:\n struct KernelProfilerQueryResult with member varaibles(counter, min, max, avg)\n\n Example::\n\n >>> import taichi as ti\n\n >>> ti.init(ti.cpu, kernel_profiler=True)\n >>> n = 1024*1024\n >>> var = ti.field(ti.f32, shape=n)\n\n >>> @ti.kernel\n >>> def fill():\n >>> for i in range(n):\n >>> var[i] = 0.1\n\n >>> fill()\n >>> ti.clear_kernel_profile_info() #[1]\n >>> for i in range(100):\n >>> fill()\n >>> query_result = ti.query_kernel_profile_info(fill.__name__) #[2]\n >>> print(\"kernel excuted times =\",query_result.counter)\n >>> print(\"kernel elapsed time(min_in_ms) =\",query_result.min)\n >>> print(\"kernel elapsed time(max_in_ms) =\",query_result.max)\n >>> print(\"kernel elapsed time(avg_in_ms) =\",query_result.avg)\n\n Note:\n [1] To get the correct result, query_kernel_profile_info() must be used in conjunction with\n clear_kernel_profile_info().\n\n [2] Currently the result of `KernelProfiler` could be incorrect on OpenGL\n backend due to its lack of support for `ti.sync()`.\n \"\"\"\n return get_default_kernel_profiler().query_info(name)\n\n\n@deprecated('kernel_profiler_clear()', 'clear_kernel_profile_info()')\ndef kernel_profiler_clear():\n return clear_kernel_profile_info()\n\n\ndef clear_kernel_profile_info():\n \"\"\"Clear all KernelProfiler records.\"\"\"\n get_default_kernel_profiler().clear_info()\n\n\ndef kernel_profiler_total_time():\n \"\"\"Get elapsed time of all kernels recorded in KernelProfiler.\n\n Returns:\n time (double): total time in second\n \"\"\"\n return get_default_kernel_profiler().get_total_time()\n\n\n@deprecated('memory_profiler_print()', 'print_memory_profile_info()')\ndef memory_profiler_print():\n return print_memory_profile_info()\n\n\ndef print_memory_profile_info():\n \"\"\"Memory profiling tool for LLVM backends with full sparse support.\n\n This profiler is automatically on.\n \"\"\"\n impl.get_runtime().materialize()\n impl.get_runtime().prog.print_memory_profiler_info()\n\n\nextension = _ti_core.Extension\n\n\ndef is_extension_supported(arch, ext):\n \"\"\"Checks whether an extension is supported on an arch.\n\n Args:\n arch (taichi_core.Arch): Specified arch.\n ext (taichi_core.Extension): Specified extension.\n\n Returns:\n bool: Whether `ext` is supported on `arch`.\n \"\"\"\n return _ti_core.is_extension_supported(arch, ext)\n\n\ndef reset():\n _ti_core.reset_snode_access_flag()\n impl.reset()\n global runtime\n runtime = impl.get_runtime()\n\n\nclass _EnvironmentConfigurator:\n def __init__(self, kwargs, cfg):\n self.cfg = cfg\n self.kwargs = kwargs\n self.keys = []\n\n def add(self, key, cast=None):\n cast = cast or self.bool_int\n\n self.keys.append(key)\n\n # TI_ASYNC= : no effect\n # TI_ASYNC=0 : False\n # TI_ASYNC=1 : True\n name = 'TI_' + key.upper()\n value = os.environ.get(name, '')\n if len(value):\n self[key] = cast(value)\n if key in self.kwargs:\n _ti_core.warn(\n f'ti.init argument \"{key}\" overridden by environment variable {name}={value}'\n )\n del self.kwargs[key] # mark as recognized\n elif key in self.kwargs:\n self[key] = self.kwargs[key]\n del self.kwargs[key] # mark as recognized\n\n def __getitem__(self, key):\n return getattr(self.cfg, key)\n\n def __setitem__(self, key, value):\n setattr(self.cfg, key, value)\n\n @staticmethod\n def bool_int(x):\n return bool(int(x))\n\n\nclass _SpecialConfig:\n # like CompileConfig in C++, this is the configurations that belong to other submodules\n def __init__(self):\n self.print_preprocessed = False\n self.log_level = 'info'\n self.gdb_trigger = False\n self.excepthook = False\n self.experimental_real_function = False\n\n\ndef prepare_sandbox():\n '''\n Returns a temporary directory, which will be automatically deleted on exit.\n It may contain the taichi_core shared object or some misc. files.\n '''\n import atexit\n import shutil\n from tempfile import mkdtemp\n tmp_dir = mkdtemp(prefix='taichi-')\n atexit.register(shutil.rmtree, tmp_dir)\n print(f'[Taichi] preparing sandbox at {tmp_dir}')\n os.mkdir(os.path.join(tmp_dir, 'runtime/'))\n return tmp_dir\n\n\ndef init(arch=None,\n default_fp=None,\n default_ip=None,\n _test_mode=False,\n **kwargs):\n\n # Make a deepcopy in case these args reference to items from ti.cfg, which are\n # actually references. If no copy is made and the args are indeed references,\n # ti.reset() could override the args to their default values.\n default_fp = _deepcopy(default_fp)\n default_ip = _deepcopy(default_ip)\n kwargs = _deepcopy(kwargs)\n ti.reset()\n\n spec_cfg = _SpecialConfig()\n env_comp = _EnvironmentConfigurator(kwargs, ti.cfg)\n env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)\n\n # configure default_fp/ip:\n # TODO: move these stuff to _SpecialConfig too:\n env_default_fp = os.environ.get(\"TI_DEFAULT_FP\")\n if env_default_fp:\n if default_fp is not None:\n _ti_core.warn(\n f'ti.init argument \"default_fp\" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'\n )\n if env_default_fp == '32':\n default_fp = ti.f32\n elif env_default_fp == '64':\n default_fp = ti.f64\n elif env_default_fp is not None:\n raise ValueError(\n f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')\n\n env_default_ip = os.environ.get(\"TI_DEFAULT_IP\")\n if env_default_ip:\n if default_ip is not None:\n _ti_core.warn(\n f'ti.init argument \"default_ip\" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'\n )\n if env_default_ip == '32':\n default_ip = ti.i32\n elif env_default_ip == '64':\n default_ip = ti.i64\n elif env_default_ip is not None:\n raise ValueError(\n f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')\n\n if default_fp is not None:\n impl.get_runtime().set_default_fp(default_fp)\n if default_ip is not None:\n impl.get_runtime().set_default_ip(default_ip)\n\n # submodule configurations (spec_cfg):\n env_spec.add('print_preprocessed')\n env_spec.add('log_level', str)\n env_spec.add('gdb_trigger')\n env_spec.add('excepthook')\n env_spec.add('experimental_real_function')\n\n # compiler configurations (ti.cfg):\n for key in dir(ti.cfg):\n if key in ['arch', 'default_fp', 'default_ip']:\n continue\n cast = type(getattr(ti.cfg, key))\n if cast is bool:\n cast = None\n env_comp.add(key, cast)\n\n unexpected_keys = kwargs.keys()\n if len(unexpected_keys):\n raise KeyError(\n f'Unrecognized keyword argument(s) for ti.init: {\", \".join(unexpected_keys)}'\n )\n\n # dispatch configurations that are not in ti.cfg:\n if not _test_mode:\n ti.set_gdb_trigger(spec_cfg.gdb_trigger)\n impl.get_runtime().print_preprocessed = spec_cfg.print_preprocessed\n impl.get_runtime().experimental_real_function = \\\n spec_cfg.experimental_real_function\n ti.set_logging_level(spec_cfg.log_level.lower())\n if spec_cfg.excepthook:\n # TODO(#1405): add a way to restore old excepthook\n ti.enable_excepthook()\n\n # select arch (backend):\n env_arch = os.environ.get('TI_ARCH')\n if env_arch is not None:\n ti.info(f'Following TI_ARCH setting up for arch={env_arch}')\n arch = _ti_core.arch_from_name(env_arch)\n ti.cfg.arch = adaptive_arch_select(arch)\n if ti.cfg.arch == cc:\n _ti_core.set_tmp_dir(locale_encode(prepare_sandbox()))\n print(f'[Taichi] Starting on arch={_ti_core.arch_name(ti.cfg.arch)}')\n\n if _test_mode:\n return spec_cfg\n\n get_default_kernel_profiler().set_kernel_profiler_mode(\n ti.cfg.kernel_profiler)\n\n # create a new program:\n impl.get_runtime().create_program()\n\n ti.trace('Materializing runtime...')\n impl.get_runtime().prog.materialize_runtime()\n\n impl._root_fb = FieldsBuilder()\n\n\ndef no_activate(*args):\n for v in args:\n _ti_core.no_activate(v.snode.ptr)\n\n\ndef block_local(*args):\n if ti.current_cfg().dynamic_index:\n raise InvalidOperationError(\n 'dynamic_index is not allowed when block_local is turned on.')\n for a in args:\n for v in a.get_field_members():\n _ti_core.insert_snode_access_flag(\n _ti_core.SNodeAccessFlag.block_local, v.ptr)\n\n\n@deprecated('ti.cache_shared', 'ti.block_local')\ndef cache_shared(*args):\n block_local(*args)\n\n\ndef cache_read_only(*args):\n for a in args:\n for v in a.get_field_members():\n _ti_core.insert_snode_access_flag(\n _ti_core.SNodeAccessFlag.read_only, v.ptr)\n\n\ndef assume_in_range(val, base, low, high):\n return _ti_core.expr_assume_in_range(\n Expr(val).ptr,\n Expr(base).ptr, low, high)\n\n\ndef loop_unique(val, covers=None):\n if covers is None:\n covers = []\n if not isinstance(covers, (list, tuple)):\n covers = [covers]\n covers = [x.snode.ptr if isinstance(x, Expr) else x.ptr for x in covers]\n return _ti_core.expr_loop_unique(Expr(val).ptr, covers)\n\n\nparallelize = _ti_core.parallelize\nserialize = lambda: parallelize(1)\nvectorize = _ti_core.vectorize\nbit_vectorize = _ti_core.bit_vectorize\nblock_dim = _ti_core.block_dim\n\ninversed = deprecated('ti.inversed(a)', 'a.inverse()')(Matrix.inversed)\ntransposed = deprecated('ti.transposed(a)', 'a.transpose()')(Matrix.transposed)\n\n\ndef polar_decompose(A, dt=None):\n \"\"\"Perform polar decomposition (A=UP) for arbitrary size matrix.\n\n Mathematical concept refers to https://en.wikipedia.org/wiki/Polar_decomposition.\n This is only a wrapper for :func:`taichi.lang.linalg.polar_decompose`.\n\n Args:\n A (ti.Matrix(n, n)): input nxn matrix `A`.\n dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64.\n\n Returns:\n Decomposed nxn matrices `U` and `P`.\n \"\"\"\n if dt is None:\n dt = impl.get_runtime().default_fp\n from .linalg import polar_decompose\n return polar_decompose(A, dt)\n\n\ndef svd(A, dt=None):\n \"\"\"Perform singular value decomposition (A=USV^T) for arbitrary size matrix.\n\n Mathematical concept refers to https://en.wikipedia.org/wiki/Singular_value_decomposition.\n This is only a wrappers for :func:`taichi.lang.linalg.svd`.\n\n Args:\n A (ti.Matrix(n, n)): input nxn matrix `A`.\n dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64.\n\n Returns:\n Decomposed nxn matrices `U`, 'S' and `V`.\n \"\"\"\n if dt is None:\n dt = impl.get_runtime().default_fp\n from .linalg import svd\n return svd(A, dt)\n\n\ndef eig(A, dt=None):\n \"\"\"Compute the eigenvalues and right eigenvectors of a real matrix.\n\n Mathematical concept refers to https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix.\n 2D implementation refers to :func:`taichi.lang.linalg.eig2x2`.\n\n Args:\n A (ti.Matrix(n, n)): 2D Matrix for which the eigenvalues and right eigenvectors will be computed.\n dt (DataType): The datatype for the eigenvalues and right eigenvectors.\n\n Returns:\n eigenvalues (ti.Matrix(n, 2)): The eigenvalues in complex form. Each row stores one eigenvalue. The first number of the eigenvalue represents the real part and the second number represents the imaginary part.\n eigenvectors (ti.Matrix(n*2, n)): The eigenvectors in complex form. Each column stores one eigenvector. Each eigenvector consists of n entries, each of which is represented by two numbers for its real part and imaginary part.\n \"\"\"\n if dt is None:\n dt = impl.get_runtime().default_fp\n from taichi.lang import linalg\n if A.n == 2:\n return linalg.eig2x2(A, dt)\n raise Exception(\"Eigen solver only supports 2D matrices.\")\n\n\ndef sym_eig(A, dt=None):\n \"\"\"Compute the eigenvalues and right eigenvectors of a real symmetric matrix.\n\n Mathematical concept refers to https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix.\n 2D implementation refers to :func:`taichi.lang.linalg.sym_eig2x2`.\n\n Args:\n A (ti.Matrix(n, n)): Symmetric Matrix for which the eigenvalues and right eigenvectors will be computed.\n dt (DataType): The datatype for the eigenvalues and right eigenvectors.\n\n Returns:\n eigenvalues (ti.Vector(n)): The eigenvalues. Each entry store one eigen value.\n eigenvectors (ti.Matrix(n, n)): The eigenvectors. Each column stores one eigenvector.\n \"\"\"\n assert all(A == A.transpose()), \"A needs to be symmetric\"\n if dt is None:\n dt = impl.get_runtime().default_fp\n from taichi.lang import linalg\n if A.n == 2:\n return linalg.sym_eig2x2(A, dt)\n raise Exception(\"Symmetric eigen solver only supports 2D matrices.\")\n\n\ndef randn(dt=None):\n \"\"\"Generates a random number from standard normal distribution.\n\n Implementation refers to :func:`taichi.lang.random.randn`.\n\n Args:\n dt (DataType): The datatype for the generated random number.\n\n Returns:\n The generated random number.\n \"\"\"\n if dt is None:\n dt = impl.get_runtime().default_fp\n from .random import randn\n return randn(dt)\n\n\ndeterminant = deprecated('ti.determinant(a)',\n 'a.determinant()')(Matrix.determinant)\ntr = deprecated('ti.tr(a)', 'a.trace()')(Matrix.trace)\n\n\ndef Tape(loss, clear_gradients=True):\n \"\"\"Return a context manager of :class:`~taichi.lang.tape.TapeImpl`. The\n context manager would catching all of the callings of functions that\n decorated by :func:`~taichi.lang.kernel_impl.kernel` or\n :func:`~taichi.ad.grad_replaced` under `with` statement, and calculate\n all the partial gradients of a given loss variable by calling all of the\n gradient function of the callings caught in reverse order while `with`\n statement ended.\n\n See also :func:`~taichi.lang.kernel_impl.kernel` and\n :func:`~taichi.ad.grad_replaced` for gradient functions.\n\n Args:\n loss(:class:`~taichi.lang.expr.Expr`): The loss field, which shape should be ().\n clear_gradients(Bool): Before `with` body start, clear all gradients or not.\n\n Returns:\n :class:`~taichi.lang.tape.TapeImpl`: The context manager.\n\n Example::\n\n >>> @ti.kernel\n >>> def sum(a: ti.float32):\n >>> for I in ti.grouped(x):\n >>> y[None] += x[I] ** a\n >>>\n >>> with ti.Tape(loss = y):\n >>> sum(2)\"\"\"\n impl.get_runtime().materialize()\n if len(loss.shape) != 0:\n raise RuntimeError(\n 'The loss of `Tape` must be a 0-D field, i.e. scalar')\n if not loss.snode.ptr.has_grad():\n raise RuntimeError(\n 'Gradients of loss are not allocated, please use ti.field(..., needs_grad=True)'\n ' for all fields that are required by autodiff.')\n if clear_gradients:\n clear_all_gradients()\n\n from taichi.lang.meta import clear_loss\n clear_loss(loss)\n\n return runtime.get_tape(loss)\n\n\ndef clear_all_gradients():\n \"\"\"Set all fields' gradients to 0.\"\"\"\n impl.get_runtime().materialize()\n\n def visit(node):\n places = []\n for i in range(node.ptr.get_num_ch()):\n ch = node.ptr.get_ch(i)\n if not ch.is_place():\n visit(SNode(ch))\n else:\n if not ch.is_primal():\n places.append(ch.get_expr())\n\n places = tuple(places)\n if places:\n from taichi.lang.meta import clear_gradients\n clear_gradients(places)\n\n for root_fb in FieldsBuilder.finalized_roots():\n visit(root_fb)\n\n\ndef deactivate_all_snodes():\n \"\"\"Recursively deactivate all SNodes.\"\"\"\n for root_fb in FieldsBuilder.finalized_roots():\n root_fb.deactivate_all()\n\n\ndef benchmark(func, repeat=300, args=()):\n import time\n\n def run_benchmark():\n compile_time = time.time()\n func(*args) # compile the kernel first\n ti.sync()\n compile_time = time.time() - compile_time\n ti.stat_write('compilation_time', compile_time)\n codegen_stat = _ti_core.stat()\n for line in codegen_stat.split('\\n'):\n try:\n a, b = line.strip().split(':')\n except:\n continue\n a = a.strip()\n b = int(float(b))\n if a == 'codegen_kernel_statements':\n ti.stat_write('compiled_inst', b)\n if a == 'codegen_offloaded_tasks':\n ti.stat_write('compiled_tasks', b)\n elif a == 'launched_tasks':\n ti.stat_write('launched_tasks', b)\n\n # Use 3 initial iterations to warm up\n # instruction/data caches. Discussion:\n # https://github.com/taichi-dev/taichi/pull/1002#discussion_r426312136\n for i in range(3):\n func(*args)\n ti.sync()\n ti.clear_kernel_profile_info()\n t = time.time()\n for n in range(repeat):\n func(*args)\n ti.sync()\n elapsed = time.time() - t\n avg = elapsed / repeat\n ti.stat_write('wall_clk_t', avg)\n device_time = ti.kernel_profiler_total_time()\n avg_device_time = device_time / repeat\n ti.stat_write('exec_t', avg_device_time)\n\n run_benchmark()\n\n\ndef benchmark_plot(fn=None,\n cases=None,\n columns=None,\n column_titles=None,\n archs=None,\n title=None,\n bars='sync_vs_async',\n bar_width=0.4,\n bar_distance=0,\n left_margin=0,\n size=(12, 8)):\n import matplotlib.pyplot as plt\n import yaml\n if fn is None:\n fn = os.path.join(_ti_core.get_repo_dir(), 'benchmarks', 'output',\n 'benchmark.yml')\n\n with open(fn, 'r') as f:\n data = yaml.load(f, Loader=yaml.SafeLoader)\n if bars != 'sync_vs_async': # need baseline\n baseline_dir = os.path.join(_ti_core.get_repo_dir(), 'benchmarks',\n 'baseline')\n baseline_file = f'{baseline_dir}/benchmark.yml'\n with open(baseline_file, 'r') as f:\n baseline_data = yaml.load(f, Loader=yaml.SafeLoader)\n if cases is None:\n cases = list(data.keys())\n\n assert len(cases) >= 1\n if len(cases) == 1:\n cases = [cases[0], cases[0]]\n ti.warning(\n 'Function benchmark_plot does not support plotting with only one case for now. Duplicating the item to move on.'\n )\n\n if columns is None:\n columns = list(data[cases[0]].keys())\n if column_titles is None:\n column_titles = columns\n normalize_to_lowest = lambda x: True\n figure, subfigures = plt.subplots(len(cases), len(columns))\n if title is None:\n title = 'Taichi Performance Benchmarks (Higher means more)'\n figure.suptitle(title, fontweight=\"bold\")\n for col_id in range(len(columns)):\n subfigures[0][col_id].set_title(column_titles[col_id])\n for case_id in range(len(cases)):\n case = cases[case_id]\n subfigures[case_id][0].annotate(\n case,\n xy=(0, 0.5),\n xytext=(-subfigures[case_id][0].yaxis.labelpad - 5, 0),\n xycoords=subfigures[case_id][0].yaxis.label,\n textcoords='offset points',\n size='large',\n ha='right',\n va='center')\n for col_id in range(len(columns)):\n col = columns[col_id]\n if archs is None:\n current_archs = data[case][col].keys()\n else:\n current_archs = [\n x for x in archs if x in data[case][col].keys()\n ]\n if bars == 'sync_vs_async':\n y_left = [\n data[case][col][arch]['sync'] for arch in current_archs\n ]\n label_left = 'sync'\n y_right = [\n data[case][col][arch]['async'] for arch in current_archs\n ]\n label_right = 'async'\n elif bars == 'sync_regression':\n y_left = [\n baseline_data[case][col][arch]['sync']\n for arch in current_archs\n ]\n label_left = 'before'\n y_right = [\n data[case][col][arch]['sync'] for arch in current_archs\n ]\n label_right = 'after'\n elif bars == 'async_regression':\n y_left = [\n baseline_data[case][col][arch]['async']\n for arch in current_archs\n ]\n label_left = 'before'\n y_right = [\n data[case][col][arch]['async'] for arch in current_archs\n ]\n label_right = 'after'\n else:\n raise RuntimeError('Unknown bars type')\n if normalize_to_lowest(col):\n for i in range(len(current_archs)):\n maximum = max(y_left[i], y_right[i])\n y_left[i] = y_left[i] / maximum if y_left[i] != 0 else 1\n y_right[i] = y_right[i] / maximum if y_right[i] != 0 else 1\n ax = subfigures[case_id][col_id]\n bar_left = ax.bar(x=[\n i - bar_width / 2 - bar_distance / 2\n for i in range(len(current_archs))\n ],\n height=y_left,\n width=bar_width,\n label=label_left,\n color=(0.47, 0.69, 0.89, 1.0))\n bar_right = ax.bar(x=[\n i + bar_width / 2 + bar_distance / 2\n for i in range(len(current_archs))\n ],\n height=y_right,\n width=bar_width,\n label=label_right,\n color=(0.68, 0.26, 0.31, 1.0))\n ax.set_xticks(range(len(current_archs)))\n ax.set_xticklabels(current_archs)\n figure.legend((bar_left, bar_right), (label_left, label_right),\n loc='lower center')\n figure.subplots_adjust(left=left_margin)\n\n fig = plt.gcf()\n fig.set_size_inches(size)\n\n plt.show()\n\n\ndef stat_write(key, value):\n import yaml\n case_name = os.environ.get('TI_CURRENT_BENCHMARK')\n if case_name is None:\n return\n if case_name.startswith('benchmark_'):\n case_name = case_name[10:]\n arch_name = _ti_core.arch_name(ti.cfg.arch)\n async_mode = 'async' if ti.cfg.async_mode else 'sync'\n output_dir = os.environ.get('TI_BENCHMARK_OUTPUT_DIR', '.')\n filename = f'{output_dir}/benchmark.yml'\n try:\n with open(filename, 'r') as f:\n data = yaml.load(f, Loader=yaml.SafeLoader)\n except FileNotFoundError:\n data = {}\n data.setdefault(case_name, {})\n data[case_name].setdefault(key, {})\n data[case_name][key].setdefault(arch_name, {})\n data[case_name][key][arch_name][async_mode] = value\n with open(filename, 'w') as f:\n yaml.dump(data, f, Dumper=yaml.SafeDumper)\n\n\ndef is_arch_supported(arch):\n \"\"\"Checks whether an arch is supported on the machine.\n\n Args:\n arch (taichi_core.Arch): Specified arch.\n\n Returns:\n bool: Whether `arch` is supported on the machine.\n \"\"\"\n arch_table = {\n cuda: _ti_core.with_cuda,\n metal: _ti_core.with_metal,\n opengl: _ti_core.with_opengl,\n cc: _ti_core.with_cc,\n vulkan: lambda: _ti_core.with_vulkan(),\n wasm: lambda: True,\n cpu: lambda: True,\n }\n with_arch = arch_table.get(arch, lambda: False)\n try:\n return with_arch()\n except Exception as e:\n arch = _ti_core.arch_name(arch)\n _ti_core.warn(\n f\"{e.__class__.__name__}: '{e}' occurred when detecting \"\n f\"{arch}, consider add `export TI_WITH_{arch.upper()}=0` \"\n f\" to environment variables to depress this warning message.\")\n return False\n\n\ndef supported_archs():\n \"\"\"Gets all supported archs on the machine.\n\n Returns:\n List[taichi_core.Arch]: All supported archs on the machine.\n \"\"\"\n archs = [cpu, cuda, metal, vulkan, opengl, cc]\n\n wanted_archs = os.environ.get('TI_WANTED_ARCHS', '')\n want_exclude = wanted_archs.startswith('^')\n if want_exclude:\n wanted_archs = wanted_archs[1:]\n wanted_archs = wanted_archs.split(',')\n # Note, ''.split(',') gives you [''], which is not an empty array.\n wanted_archs = list(filter(lambda x: x != '', wanted_archs))\n if len(wanted_archs):\n archs, old_archs = [], archs\n for arch in old_archs:\n if want_exclude == (_ti_core.arch_name(arch) not in wanted_archs):\n archs.append(arch)\n\n archs, old_archs = [], archs\n for arch in old_archs:\n if is_arch_supported(arch):\n archs.append(arch)\n\n return archs\n\n\ndef adaptive_arch_select(arch):\n if arch is None:\n return cpu\n if not isinstance(arch, (list, tuple)):\n arch = [arch]\n for a in arch:\n if is_arch_supported(a):\n return a\n ti.warn(f'Arch={arch} is not supported, falling back to CPU')\n return cpu\n\n\nclass _ArchCheckers(object):\n def __init__(self):\n self._checkers = []\n\n def register(self, c):\n self._checkers.append(c)\n\n def __call__(self, arch):\n assert isinstance(arch, _ti_core.Arch)\n return all([c(arch) for c in self._checkers])\n\n\n_tests_arch_checkers_argname = '_tests_arch_checkers'\n\n\ndef _get_or_make_arch_checkers(kwargs):\n k = _tests_arch_checkers_argname\n if k not in kwargs:\n kwargs[k] = _ArchCheckers()\n return kwargs[k]\n\n\n# test with all archs\ndef all_archs_with(**kwargs):\n kwargs = _deepcopy(kwargs)\n\n def decorator(test):\n # @pytest.mark.parametrize decorator only knows about regular function args,\n # without *args or **kwargs. By decorating with @functools.wraps, the\n # signature of |test| is preserved, so that @ti.all_archs can be used after\n # the parametrization decorator.\n #\n # Full discussion: https://github.com/pytest-dev/pytest/issues/6810\n @functools.wraps(test)\n def wrapped(*test_args, **test_kwargs):\n can_run_on = test_kwargs.pop(_tests_arch_checkers_argname,\n _ArchCheckers())\n # Filter away archs that don't support 64-bit data.\n fp = kwargs.get('default_fp', ti.f32)\n ip = kwargs.get('default_ip', ti.i32)\n if fp == ti.f64 or ip == ti.i64:\n can_run_on.register(lambda arch: is_extension_supported(\n arch, extension.data64))\n\n for arch in ti.supported_archs():\n if can_run_on(arch):\n print('Running test on arch={}'.format(arch))\n ti.init(arch=arch, **kwargs)\n test(*test_args, **test_kwargs)\n else:\n print('Skipped test on arch={}'.format(arch))\n\n return wrapped\n\n return decorator\n\n\n# test with all archs\ndef all_archs(test):\n return all_archs_with()(test)\n\n\n# Exclude the given archs when running the tests\n#\n# Example usage:\n#\n# @ti.archs_excluding(ti.cuda, ti.metal)\n# def test_xx():\n# ...\n#\n# @ti.archs_excluding(ti.cuda, default_fp=ti.f64)\n# def test_yy():\n# ...\ndef archs_excluding(*excluded_archs, **kwargs):\n # |kwargs| will be passed to all_archs_with(**kwargs)\n assert all([isinstance(a, _ti_core.Arch) for a in excluded_archs])\n excluded_archs = set(excluded_archs)\n\n def decorator(test):\n @functools.wraps(test)\n def wrapped(*test_args, **test_kwargs):\n def checker(arch):\n return arch not in excluded_archs\n\n _get_or_make_arch_checkers(test_kwargs).register(checker)\n return all_archs_with(**kwargs)(test)(*test_args, **test_kwargs)\n\n return wrapped\n\n return decorator\n\n\n# Specifies the extension features the archs are required to support in order\n# to run the test.\n#\n# Example usage:\n#\n# @ti.require(ti.extension.data64)\n# @ti.all_archs_with(default_fp=ti.f64)\n# def test_xx():\n# ...\ndef require(*exts):\n # Because this decorator injects an arch checker, its usage must be followed\n # with all_archs_with(), either directly or indirectly.\n assert all([isinstance(e, _ti_core.Extension) for e in exts])\n\n def decorator(test):\n @functools.wraps(test)\n def wrapped(*test_args, **test_kwargs):\n def checker(arch):\n return all([is_extension_supported(arch, e) for e in exts])\n\n _get_or_make_arch_checkers(test_kwargs).register(checker)\n test(*test_args, **test_kwargs)\n\n return wrapped\n\n return decorator\n\n\ndef archs_support_sparse(test, **kwargs):\n wrapped = all_archs_with(**kwargs)(test)\n return require(extension.sparse)(wrapped)\n\n\ndef torch_test(func):\n if ti.has_pytorch():\n # OpenGL somehow crashes torch test without a reason, unforturnately\n return ti.test(exclude=[opengl])(func)\n else:\n return lambda: None\n\n\ndef get_host_arch_list():\n return [_ti_core.host_arch()]\n\n\n# test with host arch only\ndef host_arch_only(func):\n @functools.wraps(func)\n def test(*args, **kwargs):\n archs = [_ti_core.host_arch()]\n for arch in archs:\n ti.init(arch=arch)\n func(*args, **kwargs)\n\n return test\n\n\ndef archs_with(archs, **init_kwags):\n \"\"\"\n Run the test on the given archs with the given init args.\n\n Args:\n archs: a list of Taichi archs\n init_kwargs: kwargs passed to ti.init()\n \"\"\"\n def decorator(test):\n @functools.wraps(test)\n def wrapped(*test_args, **test_kwargs):\n for arch in archs:\n ti.init(arch=arch, **init_kwags)\n test(*test_args, **test_kwargs)\n\n return wrapped\n\n return decorator\n\n\ndef must_throw(ex):\n def decorator(func):\n def func__(*args, **kwargs):\n finishes = False\n try:\n func(*args, **kwargs)\n finishes = True\n except ex:\n # throws. test passed\n pass\n except Exception as err_actual:\n assert False, 'Exception {} instead of {} thrown'.format(\n str(type(err_actual)), str(ex))\n if finishes:\n assert False, 'Test successfully finished instead of throwing {}'.format(\n str(ex))\n\n return func__\n\n return decorator\n\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.gcf"
]
]
|
sanweiliti/Segmentation-MonoDepth-Pytorch | [
"d1a3de8d10c60fe9d3b86b585e0f0089555fc8a6"
]
| [
"saliency_class_val.py"
]
| [
"from __future__ import print_function\n\nimport argparse\nimport numpy as np\nimport torch\nimport scipy.misc as m\nimport cv2\nfrom torch.utils import data\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\n\nfrom ptsemseg.models.fcn_seg import *\nfrom ptsemseg.models.segnet_seg import *\nfrom ptsemseg.models.frrn_seg import *\nfrom ptsemseg.models.deeplab_seg import *\nfrom ptsemseg.models.fcrn_seg import *\nfrom ptsemseg.models.dispnet_seg import *\n\nfrom ptsemseg.models.fcn_depth import *\nfrom ptsemseg.models.segnet_depth import *\nfrom ptsemseg.models.frrn_depth import *\nfrom ptsemseg.models.deeplab_depth import *\nfrom ptsemseg.models.fcrn_depth import *\nfrom ptsemseg.models.dispnet_depth import *\n\nfrom saliency import BackPropagation\nfrom ptsemseg.loader.kitti_loader_seg import kittiLoader_seg\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data_path\", default='datasets/kitti/semantics/', type=str,\n help='path to test images')\nparser.add_argument(\"--model_name\", type=str, default='deeplab', choices=[\"fcn\", \"frrnA\", \"segnet\", \"deeplab\", \"dispnet\", \"fcrn\"])\nparser.add_argument(\"--task\", type=str, default=\"depth\", choices=[\"seg\", \"depth\"])\nparser.add_argument(\"--model_path\", type=str,\n default='runs/deeplab_kitti_depth/4953_128_416_smooth1000_init_BNfreeze/deeplab_kitti_best_model.pkl',\n help='path to pretrained model')\n\n# the image resolution here should match the pretrained model training resolution\nparser.add_argument(\"--height\", type=int, default=128, help=\"image resize height\")\nparser.add_argument(\"--width\", type=int, default=416, help=\"image resize width\")\nparser.add_argument(\"--sample_rate\", type=int, default=2, help=\"sample rate for eval\")\nparser.add_argument(\"--num_image\", type=int, default=1, help=\"number of images to evaluate\")\n\n\nargs = parser.parse_args()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef get_model(model_name, task):\n if task == \"seg\":\n try:\n return {\n \"fcn\": fcn_seg(n_classes=19),\n \"frrnA\": frrn_seg(model_type = \"A\", n_classes=19),\n \"segnet\": segnet_seg(n_classes=19),\n \"deeplab\": deeplab_seg(n_classes=19),\n \"dispnet\": dispnet_seg(n_classes=19),\n \"fcrn\": fcrn_seg(n_classes=19),\n }[model_name]\n except:\n raise(\"Model {} not available\".format(model_name))\n elif task == \"depth\":\n try:\n return {\n \"fcn\": fcn_depth(),\n \"frrnA\": frrn_depth(model_type = \"A\"),\n \"segnet\": segnet_depth(),\n \"deeplab\": deeplab_depth(),\n \"dispnet\": dispnet_depth(),\n \"fcrn\": fcrn_depth(),\n }[model_name]\n except:\n raise(\"Model {} not available\".format(model_name))\n\n\ndef most_act_dis(saliency_map, pos_i, pos_j):\n # distance between the most activated pixel and current pixel\n height, width = saliency_map.shape\n most_act_pos = np.where(saliency_map == np.max(saliency_map))\n all_dist = 0\n for i in range(most_act_pos[0].shape[0]):\n dist = np.sqrt((most_act_pos[0][i]-pos_i) ** 2 + (most_act_pos[1][i]-pos_j) ** 2)\n all_dist = all_dist + dist\n result = (all_dist / len(most_act_pos[0])) / np.sqrt(height ** 2 + width ** 2)\n return result\n\n\n# biggest distance between current pixel and all pixels with value >= threshold\n# number of pixels >= threshold / number of total_pixels\ndef largest_radius(saliency_map, pos_i, pos_j, threshold=0.2):\n height, width = saliency_map.shape\n\n act_pixel_pos = np.where(saliency_map >= threshold)\n all_dist = np.zeros(act_pixel_pos[0].shape[0])\n\n if act_pixel_pos[0].shape[0] == 0:\n return 0, 0\n for i in range(act_pixel_pos[0].shape[0]):\n all_dist[i] = np.sqrt((act_pixel_pos[0][i]-pos_i) ** 2 + (act_pixel_pos[1][i]-pos_j) ** 2)\n radius = np.max(all_dist) / np.sqrt(height ** 2 + width ** 2)\n part = act_pixel_pos[0].shape[0] / (height * width)\n return radius, part\n\n\ndef calculate(image, label, bp, args):\n num_classes = 19\n pred_idx = bp.forward(image.to(device)) # predict lbl / depth: [h, w]\n label = label.numpy()[0]\n\n img_most_act_dis, img_mean_act = 0, 0\n img_radius1, img_radius2, img_radius3, img_radius4, img_radius5 = 0, 0, 0, 0, 0\n img_part1, img_part2, img_part3, img_part4, img_part5 = 0, 0, 0, 0, 0\n\n clc_img_most_act_dis = np.zeros(num_classes, dtype=float)\n clc_img_mean_act = np.zeros(num_classes, dtype=float)\n clc_img_radius1 = np.zeros(num_classes, dtype=float)\n clc_img_radius2 = np.zeros(num_classes, dtype=float)\n clc_img_radius3 = np.zeros(num_classes, dtype=float)\n clc_img_radius4 = np.zeros(num_classes, dtype=float)\n clc_img_radius5 = np.zeros(num_classes, dtype=float)\n clc_img_part1 = np.zeros(num_classes, dtype=float)\n clc_img_part2 = np.zeros(num_classes, dtype=float)\n clc_img_part3 = np.zeros(num_classes, dtype=float)\n clc_img_part4 = np.zeros(num_classes, dtype=float)\n clc_img_part5 = np.zeros(num_classes, dtype=float)\n clc_total_pixel = np.zeros(num_classes, dtype=int)\n\n y1, y2 = int(0.40810811 * args.height), int(0.99189189 * args.height)\n x1, x2 = int(0.03594771 * args.width), int(0.96405229 * args.width)\n # valid_height = y2 - y1\n # valid_width = x2 - x1\n total_pixel = 0\n\n # y1, y2 = 1, 3\n # x1, x2 = 1, 4\n\n for pos_i in tqdm(range(y1, y2+1, args.sample_rate)):\n for pos_j in tqdm(range(x1, x2+1, args.sample_rate)):\n i_class = label[pos_i, pos_j]\n if i_class < num_classes:\n bp.backward(pos_i=pos_i, pos_j=pos_j, idx=pred_idx[pos_i, pos_j])\n output_vanilla, output_saliency = bp.generate() # [3, h, w]\n\n output_saliency = output_saliency[y1:y2, x1:x2]\n # normalized saliency map for a pixel in an image\n if np.max(output_saliency) > 0:\n output_saliency = (output_saliency - np.min(output_saliency)) / np.max(output_saliency)\n # m.imsave(\"output.png\", output_saliency)\n\n most_act_pt_dis = most_act_dis(output_saliency, pos_i=pos_i-y1, pos_j=pos_j-x1)\n mean_act = np.mean(output_saliency) # mean value for the saliency map\n radius1, part1 = largest_radius(output_saliency, pos_i=pos_i-y1, pos_j=pos_j-x1, threshold=0.1)\n radius2, part2 = largest_radius(output_saliency, pos_i=pos_i-y1, pos_j=pos_j-x1, threshold=0.3)\n radius3, part3 = largest_radius(output_saliency, pos_i=pos_i-y1, pos_j=pos_j-x1, threshold=0.5)\n radius4, part4 = largest_radius(output_saliency, pos_i=pos_i-y1, pos_j=pos_j-x1, threshold=0.7)\n radius5, part5 = largest_radius(output_saliency, pos_i=pos_i-y1, pos_j=pos_j-x1, threshold=0.9)\n\n clc_img_most_act_dis[i_class] += most_act_pt_dis\n clc_img_mean_act[i_class] += mean_act\n clc_img_radius1[i_class] += radius1\n clc_img_radius2[i_class] += radius2\n clc_img_radius3[i_class] += radius3\n clc_img_radius4[i_class] += radius4\n clc_img_radius5[i_class] += radius5\n clc_img_part1[i_class] += part1\n clc_img_part2[i_class] += part2\n clc_img_part3[i_class] += part3\n clc_img_part4[i_class] += part4\n clc_img_part5[i_class] += part5\n clc_total_pixel[i_class] += 1\n\n img_most_act_dis += most_act_pt_dis\n img_mean_act += mean_act\n img_radius1 += radius1\n img_radius2 += radius2\n img_radius3 += radius3\n img_radius4 += radius4\n img_radius5 += radius5\n img_part1 += part1\n img_part2 += part2\n img_part3 += part3\n img_part4 += part4\n img_part5 += part5\n total_pixel += 1\n\n img_most_act_dis = img_most_act_dis / total_pixel\n img_mean_act = img_mean_act / total_pixel\n img_radius1 = img_radius1 / total_pixel\n img_radius2 = img_radius2 / total_pixel\n img_radius3 = img_radius3 / total_pixel\n img_radius4 = img_radius4 / total_pixel\n img_radius5 = img_radius5 / total_pixel\n img_part1 = img_part1 / total_pixel\n img_part2 = img_part2 / total_pixel\n img_part3 = img_part3 / total_pixel\n img_part4 = img_part4 / total_pixel\n img_part5 = img_part5 / total_pixel\n\n for i_class in range(num_classes):\n if clc_total_pixel[i_class] > 0:\n clc_img_most_act_dis[i_class] = clc_img_most_act_dis[i_class] / clc_total_pixel[i_class]\n clc_img_mean_act[i_class] = clc_img_mean_act[i_class] / clc_total_pixel[i_class]\n clc_img_radius1[i_class] = clc_img_radius1[i_class] / clc_total_pixel[i_class]\n clc_img_radius2[i_class] = clc_img_radius2[i_class] / clc_total_pixel[i_class]\n clc_img_radius3[i_class] = clc_img_radius3[i_class] / clc_total_pixel[i_class]\n clc_img_radius4[i_class] = clc_img_radius4[i_class] / clc_total_pixel[i_class]\n clc_img_radius5[i_class] = clc_img_radius5[i_class] / clc_total_pixel[i_class]\n clc_img_part1[i_class] = clc_img_part1[i_class] / clc_total_pixel[i_class]\n clc_img_part2[i_class] = clc_img_part2[i_class] / clc_total_pixel[i_class]\n clc_img_part3[i_class] = clc_img_part3[i_class] / clc_total_pixel[i_class]\n clc_img_part4[i_class] = clc_img_part4[i_class] / clc_total_pixel[i_class]\n clc_img_part5[i_class] = clc_img_part5[i_class] / clc_total_pixel[i_class]\n\n return clc_img_most_act_dis, clc_img_mean_act, \\\n clc_img_radius1, clc_img_radius2, clc_img_radius3, clc_img_radius4, clc_img_radius5, \\\n clc_img_part1, clc_img_part2, clc_img_part3, clc_img_part4, clc_img_part5, \\\n img_most_act_dis, img_mean_act, \\\n img_radius1, img_radius2, img_radius3, img_radius4, img_radius5, \\\n img_part1, img_part2, img_part3, img_part4, img_part5, \\\n clc_total_pixel\n\n\n\ndef main():\n # Model\n model = get_model(args.model_name, args.task)\n weights = torch.load(args.model_path)\n # weights = torch.load(args.model_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(weights['model_state'])\n model.to(device)\n model.eval()\n\n depth_flag = False\n if args.task == 'depth':\n depth_flag = True\n\n loader = kittiLoader_seg(\n root=args.data_path,\n split='train',\n is_transform=True,\n img_size=(args.height, args.width),\n augmentations=None,\n img_norm=True,\n saliency_eval_depth = depth_flag\n )\n\n testloader = data.DataLoader(loader,\n batch_size=1,\n num_workers=0,\n shuffle=False)\n\n bp = BackPropagation(model=model, task=args.task)\n result_clc = []\n result_img = []\n for i, (image, label, img_path) in enumerate(testloader):\n print(img_path)\n img_eval_res = calculate(image=image, label=label, bp=bp, args=args)\n\n print(img_eval_res[0:12])\n print(img_eval_res[12:-1])\n print(img_eval_res[-1])\n print(\"\\n\")\n\n result_clc.append(img_eval_res[0:12])\n result_img.append(img_eval_res[12:-1])\n\n result_clc_out = np.array(result_clc, dtype=float)\n result_img_out = np.array(result_img, dtype=float)\n np.save(\"saliency_eval_result/{}_{}_metrics_clc.npy\".format(args.task, args.model_name), result_clc_out)\n # np.savetxt('saliency_eval_result/{}_{}_metrics_clc.txt'.format(args.task, args.model_name), X=result_clc)\n np.save(\"saliency_eval_result/{}_{}_metrics_img.npy\".format(args.task, args.model_name), result_img_out)\n np.savetxt('saliency_eval_result/{}_{}_metrics_img.txt'.format(args.task, args.model_name), X=result_img_out)\n\n if i >= args.num_image:\n break\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.min",
"numpy.mean",
"numpy.where",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.sqrt"
]
]
|
nschan/models | [
"5e516e834652433dc3357d7c7a28de3ca03e6535"
]
| [
"research/deeplab/utils/train_utils.py"
]
| [
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility functions for training.\"\"\"\n\nimport six\nfrom deeplab.model import _LOGITS_SCOPE_NAME\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits,\n labels,\n num_classes,\n ignore_label,\n loss_weight=1.0,\n upsample_logits=True,\n scope=None):\n \"\"\"Adds softmax cross entropy loss for logits of each scale.\n\n Args:\n scales_to_logits: A map from logits names for different scales to logits.\n The logits have shape [batch, logits_height, logits_width, num_classes].\n labels: Groundtruth labels with shape [batch, image_height, image_width, 1].\n num_classes: Integer, number of target classes.\n ignore_label: Integer, label to ignore.\n loss_weight: Float, loss weight.\n upsample_logits: Boolean, upsample logits or not.\n scope: String, the scope for the loss.\n\n Raises:\n ValueError: Label or logits is None.\n \"\"\"\n if labels is None:\n raise ValueError('No label for softmax cross entropy loss.')\n\n for scale, logits in six.iteritems(scales_to_logits):\n loss_scope = None\n if scope:\n loss_scope = '%s_%s' % (scope, scale)\n\n if upsample_logits:\n # Label is not downsampled, and instead we upsample logits.\n logits = tf.image.resize_bilinear(\n logits, tf.shape(labels)[1:3], align_corners=True)\n scaled_labels = labels\n else:\n # Label is downsampled to the same size as logits.\n scaled_labels = tf.image.resize_nearest_neighbor(\n labels, tf.shape(logits)[1:3], align_corners=True)\n\n scaled_labels = tf.reshape(scaled_labels, shape=[-1])\n not_ignore_mask = tf.to_float(tf.not_equal(scaled_labels,\n ignore_label)) * loss_weight\n one_hot_labels = slim.one_hot_encoding(\n scaled_labels, num_classes, on_value=1.0, off_value=0.0)\n tf.losses.softmax_cross_entropy(\n one_hot_labels,\n tf.reshape(logits, shape=[-1, num_classes]),\n weights=not_ignore_mask,\n scope=loss_scope)\n\n\ndef get_model_init_fn(train_logdir,\n tf_initial_checkpoint,\n initialize_last_layer,\n last_layers,\n ignore_missing_vars=False):\n \"\"\"Gets the function initializing model variables from a checkpoint.\n\n Args:\n train_logdir: Log directory for training.\n tf_initial_checkpoint: TensorFlow checkpoint for initialization.\n initialize_last_layer: Initialize last layer or not.\n last_layers: Last layers of the model.\n ignore_missing_vars: Ignore missing variables in the checkpoint.\n\n Returns:\n Initialization function.\n \"\"\"\n if tf_initial_checkpoint is None:\n tf.logging.info('Not initializing the model from a checkpoint.')\n return None\n\n if tf.train.latest_checkpoint(train_logdir):\n tf.logging.info('Ignoring initialization; other checkpoint exists')\n return None\n\n tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)\n\n # Variables that will not be restored.\n exclude_list = ['global_step']\n exclude_list.extend(_LOGITS_SCOPE_NAME)\n\n if not initialize_last_layer:\n exclude_list.extend(last_layers)\n\n variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list)\n\n return slim.assign_from_checkpoint_fn(\n tf_initial_checkpoint,\n variables_to_restore,\n ignore_missing_vars=ignore_missing_vars)\n\n\ndef get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):\n \"\"\"Gets the gradient multipliers.\n\n The gradient multipliers will adjust the learning rates for model\n variables. For the task of semantic segmentation, the models are\n usually fine-tuned from the models trained on the task of image\n classification. To fine-tune the models, we usually set larger (e.g.,\n 10 times larger) learning rate for the parameters of last layer.\n\n Args:\n last_layers: Scopes of last layers.\n last_layer_gradient_multiplier: The gradient multiplier for last layers.\n\n Returns:\n The gradient multiplier map with variables as key, and multipliers as value.\n \"\"\"\n gradient_multipliers = {}\n\n for var in slim.get_model_variables():\n # Double the learning rate for biases.\n if 'biases' in var.op.name:\n gradient_multipliers[var.op.name] = 2.\n\n # Use larger learning rate for last layer variables.\n for layer in last_layers:\n if layer in var.op.name and 'biases' in var.op.name:\n gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier\n break\n elif layer in var.op.name:\n gradient_multipliers[var.op.name] = last_layer_gradient_multiplier\n break\n\n return gradient_multipliers\n\n\ndef get_model_learning_rate(\n learning_policy, base_learning_rate, learning_rate_decay_step,\n learning_rate_decay_factor, training_number_of_steps, learning_power,\n slow_start_step, slow_start_learning_rate):\n \"\"\"Gets model's learning rate.\n\n Computes the model's learning rate for different learning policy.\n Right now, only \"step\" and \"poly\" are supported.\n (1) The learning policy for \"step\" is computed as follows:\n current_learning_rate = base_learning_rate *\n learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)\n See tf.train.exponential_decay for details.\n (2) The learning policy for \"poly\" is computed as follows:\n current_learning_rate = base_learning_rate *\n (1 - global_step / training_number_of_steps) ^ learning_power\n\n Args:\n learning_policy: Learning rate policy for training.\n base_learning_rate: The base learning rate for model training.\n learning_rate_decay_step: Decay the base learning rate at a fixed step.\n learning_rate_decay_factor: The rate to decay the base learning rate.\n training_number_of_steps: Number of steps for training.\n learning_power: Power used for 'poly' learning policy.\n slow_start_step: Training model with small learning rate for the first\n few steps.\n slow_start_learning_rate: The learning rate employed during slow start.\n\n Returns:\n Learning rate for the specified learning policy.\n\n Raises:\n ValueError: If learning policy is not recognized.\n \"\"\"\n global_step = tf.train.get_or_create_global_step()\n if learning_policy == 'step':\n learning_rate = tf.train.exponential_decay(\n base_learning_rate,\n global_step,\n learning_rate_decay_step,\n learning_rate_decay_factor,\n staircase=True)\n elif learning_policy == 'poly':\n learning_rate = tf.train.polynomial_decay(\n base_learning_rate,\n global_step,\n training_number_of_steps,\n end_learning_rate=0,\n power=learning_power)\n else:\n raise ValueError('Unknown learning policy.')\n\n # Employ small learning rate at the first few steps for warm start.\n return tf.where(global_step < slow_start_step, slow_start_learning_rate,\n learning_rate)\n"
]
| [
[
"tensorflow.shape",
"tensorflow.train.latest_checkpoint",
"tensorflow.where",
"tensorflow.not_equal",
"tensorflow.logging.info",
"tensorflow.reshape",
"tensorflow.train.polynomial_decay",
"tensorflow.train.get_or_create_global_step",
"tensorflow.train.exponential_decay"
]
]
|
hypnopump/se3-transformer-pytorch | [
"9d6c7c94ed59f40bc4f38375abc377dcc8b25d82"
]
| [
"se3_transformer_pytorch/se3_transformer_pytorch.py"
]
| [
"from math import sqrt\nfrom itertools import product\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, einsum\n\nfrom se3_transformer_pytorch.basis import get_basis\nfrom se3_transformer_pytorch.utils import exists, default, uniq, map_values, batched_index_select, masked_mean, to_order, fourier_encode_dist\nfrom se3_transformer_pytorch.reversible import ReversibleSequence, SequentialSequence\n\nfrom einops import rearrange, repeat\n\n# fiber helpers\n\nFiberEl = namedtuple('FiberEl', ['degrees', 'dim'])\n\nclass Fiber(nn.Module):\n def __init__(\n self,\n structure\n ):\n super().__init__()\n if isinstance(structure, dict):\n structure = structure.items()\n self.structure = structure\n\n @property\n def dims(self):\n return uniq(map(lambda t: t[1], self.structure))\n\n @property\n def degrees(self):\n return map(lambda t: t[0], self.structure)\n\n @staticmethod\n def create(num_degrees, dim):\n return Fiber([FiberEl(degree, dim) for degree in range(num_degrees)])\n\n def __getitem__(self, degree):\n return dict(self.structure)[degree]\n\n def __iter__(self):\n return iter(self.structure)\n\n def __mul__(self, fiber):\n return product(self.structure, fiber.structure)\n\n def __and__(self, fiber):\n out = []\n degrees_out = fiber.degrees\n for degree, dim in self:\n if degree in fiber.degrees:\n dim_out = fiber[degree]\n out.append((degree, dim, dim_out))\n return out\n\ndef get_tensor_device_and_dtype(features):\n first_tensor = next(iter(features.items()))[1]\n return first_tensor.device, first_tensor.dtype\n\n# classes\n\nclass ResidualSE3(nn.Module):\n \"\"\" only support instance where both Fibers are identical \"\"\"\n def forward(self, x, res):\n out = {}\n for degree, tensor in x.items():\n degree = str(degree)\n out[degree] = tensor\n if degree in res:\n out[degree] = out[degree] + res[degree]\n return out\n\nclass LinearSE3(nn.Module):\n def __init__(\n self,\n fiber_in,\n fiber_out\n ):\n super().__init__()\n self.weights = nn.ParameterDict()\n\n for (degree, dim_in, dim_out) in (fiber_in & fiber_out):\n key = str(degree)\n self.weights[key] = nn.Parameter(torch.randn(dim_in, dim_out) / sqrt(dim_in))\n\n def forward(self, x):\n out = {}\n for degree, weight in self.weights.items():\n out[degree] = einsum('b n d m, d e -> b n e m', x[degree], weight)\n return out\n\nclass NormSE3(nn.Module):\n \"\"\"Norm-based SE(3)-equivariant nonlinearity.\n \n Nonlinearities are important in SE(3) equivariant GCNs. They are also quite \n expensive to compute, so it is convenient for them to share resources with\n other layers, such as normalization. The general workflow is as follows:\n\n > for feature type in features:\n > norm, phase <- feature\n > output = fnc(norm) * phase\n \n where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.\n \"\"\"\n def __init__(\n self,\n fiber,\n nonlin = nn.GELU(),\n eps = 1e-12\n ):\n super().__init__()\n self.fiber = fiber\n self.nonlin = nonlin\n self.eps = eps\n\n # Norm mappings: 1 per feature type\n self.transform = nn.ModuleDict()\n for degree, chan in fiber:\n self.transform[str(degree)] = nn.Sequential(nn.LayerNorm(chan), nonlin)\n\n def forward(self, features):\n output = {}\n for degree, t in features.items():\n # Compute the norms and normalized features\n norm = t.norm(dim = -1, keepdim = True).clamp(min = self.eps)\n phase = t / norm\n\n # Transform on norms\n fn = self.transform[degree]\n transformed = fn(norm.squeeze(-1))[..., None]\n\n # Nonlinearity on norm\n output[degree] = (transformed * phase).view(*t.shape)\n\n return output\n\nclass ConvSE3(nn.Module):\n \"\"\"A tensor field network layer\n \n ConvSE3 stands for a Convolution SE(3)-equivariant layer. It is the \n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n\n At each node, the activations are split into different \"feature types\",\n indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..\n \"\"\"\n def __init__(\n self,\n fiber_in,\n fiber_out,\n self_interaction = True,\n pool = True,\n edge_dim = 0,\n fourier_encode_dist = False\n ):\n super().__init__()\n self.fiber_in = fiber_in\n self.fiber_out = fiber_out\n self.edge_dim = edge_dim\n self.self_interaction = self_interaction\n\n # Neighbor -> center weights\n self.kernel_unary = nn.ModuleDict()\n\n for (di, mi), (do, mo) in (self.fiber_in * self.fiber_out):\n self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist)\n\n self.pool = pool\n\n # Center -> center weights\n if self_interaction:\n assert self.pool, 'must pool edges if followed with self interaction'\n self.self_interact = LinearSE3(fiber_in, fiber_out)\n self.self_interact_sum = ResidualSE3()\n\n def forward(\n self,\n inp,\n edge_info,\n rel_dist = None,\n basis = None\n ):\n neighbor_indices, neighbor_masks, edges = edge_info\n rel_dist = rearrange(rel_dist, 'b m n -> b m n ()')\n\n kernels = {}\n outputs = {}\n\n for (di, mi), (do, mo) in (self.fiber_in * self.fiber_out):\n etype = f'({di},{do})'\n kernel_fn = self.kernel_unary[etype]\n edge_features = torch.cat((rel_dist, edges), dim = -1) if exists(edges) else rel_dist\n kernels[etype] = kernel_fn(edge_features, basis = basis)\n \n for degree_out in self.fiber_out.degrees:\n output = 0\n degree_out_key = str(degree_out)\n\n for degree_in, m_in in self.fiber_in:\n x = inp[str(degree_in)]\n x = batched_index_select(x, neighbor_indices, dim = 1)\n x = x.view(*x.shape[:3], to_order(degree_in) * m_in, 1)\n\n etype = f'({degree_in},{degree_out})'\n kernel = kernels[etype]\n output = output + einsum('... o i, ... i c -> ... o c', kernel, x)\n\n if self.pool:\n output = masked_mean(output, neighbor_masks, dim = 2) if exists(neighbor_masks) else output.mean(dim = 2)\n\n leading_shape = x.shape[:2] if self.pool else x.shape[:3]\n output = output.view(*leading_shape, -1, to_order(degree_out))\n\n outputs[degree_out_key] = output\n\n if self.self_interaction:\n self_interact_out = self.self_interact(inp)\n outputs = self.self_interact_sum(outputs, self_interact_out)\n\n return outputs\n\nclass RadialFunc(nn.Module):\n \"\"\"NN parameterized radial profile function.\"\"\"\n def __init__(\n self,\n num_freq,\n in_dim,\n out_dim,\n edge_dim = None,\n fourier_encode_dist = False,\n num_fourier_features = 4,\n mid_dim = 128\n ):\n super().__init__()\n self.num_freq = num_freq\n self.in_dim = in_dim\n self.mid_dim = mid_dim\n self.out_dim = out_dim\n self.edge_dim = default(edge_dim, 0)\n\n self.fourier_encode_dist = fourier_encode_dist\n self.num_fourier_features = num_fourier_features if fourier_encode_dist else 0\n\n input_dim = self.edge_dim + 1 + (self.num_fourier_features * 2)\n\n self.net = nn.Sequential(\n nn.Linear(input_dim, mid_dim),\n nn.LayerNorm(mid_dim),\n nn.ReLU(),\n nn.Linear(mid_dim, mid_dim),\n nn.LayerNorm(mid_dim),\n nn.ReLU(),\n nn.Linear(mid_dim, num_freq * in_dim * out_dim)\n )\n\n self.apply(self.init_)\n\n def init_(self, m):\n if m in {nn.Linear}:\n nn.init.kaiming_uniform_(m.weight)\n\n def forward(self, x):\n if self.fourier_encode_dist:\n x = fourier_encode_dist(x, num_encodings = self.num_fourier_features)\n x = rearrange(x, 'b n m () d -> b n m d')\n\n y = self.net(x)\n return rearrange(y, '... (o i f) -> ... o () i () f', i = self.in_dim, o = self.out_dim)\n\nclass PairwiseConv(nn.Module):\n \"\"\"SE(3)-equivariant convolution between two single-type features\"\"\"\n def __init__(\n self,\n degree_in,\n nc_in,\n degree_out,\n nc_out,\n edge_dim = 0,\n fourier_encode_dist = False\n ):\n super().__init__()\n self.degree_in = degree_in\n self.degree_out = degree_out\n self.nc_in = nc_in\n self.nc_out = nc_out\n\n self.num_freq = to_order(min(degree_in, degree_out))\n self.d_out = to_order(degree_out)\n self.edge_dim = edge_dim\n\n self.rp = RadialFunc(self.num_freq, nc_in, nc_out, edge_dim, fourier_encode_dist)\n\n def forward(self, feat, basis):\n R = self.rp(feat)\n kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], dim = -1)\n out = kernel.view(*kernel.shape[:3], self.d_out * self.nc_out, -1)\n return out\n\n# feed forwards\n\nclass FeedForwardSE3(nn.Module):\n def __init__(\n self,\n fiber,\n mult = 4\n ):\n super().__init__()\n self.fiber = fiber\n fiber_hidden = Fiber(list(map(lambda t: (t[0], t[1] * mult), fiber)))\n\n self.project_in = LinearSE3(fiber, fiber_hidden)\n self.nonlin = NormSE3(fiber_hidden)\n self.project_out = LinearSE3(fiber_hidden, fiber)\n\n def forward(self, features):\n outputs = self.project_in(features)\n outputs = self.nonlin(outputs)\n outputs = self.project_out(outputs)\n return outputs\n\nclass FeedForwardBlockSE3(nn.Module):\n def __init__(\n self,\n fiber,\n ):\n super().__init__()\n self.fiber = fiber\n self.prenorm = NormSE3(fiber)\n self.feedforward = FeedForwardSE3(fiber)\n self.residual = ResidualSE3()\n\n def forward(self, features):\n res = features\n out = self.prenorm(features)\n out = self.feedforward(out)\n return self.residual(out, res)\n\n# attention\n\nclass AttentionSE3(nn.Module):\n def __init__(\n self,\n fiber,\n dim_head = 64,\n heads = 8,\n attend_self = False,\n edge_dim = None,\n fourier_encode_dist = False,\n use_null_kv = False\n ):\n super().__init__()\n hidden_dim = dim_head * heads\n hidden_fiber = Fiber(list(map(lambda t: (t[0], hidden_dim), fiber)))\n project_out = not (heads == 1 and len(fiber.dims) == 1 and dim_head == fiber.dims[0])\n\n self.scale = dim_head ** -0.5\n self.heads = heads\n\n self.to_q = LinearSE3(fiber, hidden_fiber)\n self.to_k = ConvSE3(fiber, hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist)\n self.to_v = ConvSE3(fiber, hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist)\n self.to_out = LinearSE3(hidden_fiber, fiber) if project_out else nn.Identity()\n\n self.use_null_kv = use_null_kv\n if use_null_kv:\n self.null_keys = nn.ParameterDict()\n self.null_values = nn.ParameterDict()\n\n for degree in fiber.degrees:\n m = to_order(degree)\n degree_key = str(degree)\n self.null_keys[degree_key] = nn.Parameter(torch.zeros(heads, dim_head, m))\n self.null_values[degree_key] = nn.Parameter(torch.zeros(heads, dim_head, m))\n\n self.attend_self = attend_self\n if attend_self:\n self.to_self_k = LinearSE3(fiber, hidden_fiber)\n self.to_self_v = LinearSE3(fiber, hidden_fiber)\n\n def forward(self, features, edge_info, rel_dist, basis):\n h, attend_self = self.heads, self.attend_self\n device, dtype = get_tensor_device_and_dtype(features)\n neighbor_indices, neighbor_mask, edges = edge_info\n\n max_neg_value = -torch.finfo().max\n\n if exists(neighbor_mask):\n neighbor_mask = rearrange(neighbor_mask, 'b i j -> b () i j')\n\n neighbor_indices = rearrange(neighbor_indices, 'b i j -> b () i j')\n\n queries = self.to_q(features)\n keys, values = self.to_k(features, edge_info, rel_dist, basis), self.to_v(features, edge_info, rel_dist, basis)\n\n if attend_self:\n self_keys, self_values = self.to_self_k(features), self.to_self_v(features)\n\n outputs = {}\n for degree in features.keys():\n q, k, v = map(lambda t: t[degree], (queries, keys, values))\n\n q = rearrange(q, 'b i (h d) m -> b h i d m', h = h)\n k, v = map(lambda t: rearrange(t, 'b i j (h d) m -> b h i j d m', h = h), (k, v))\n\n if self.use_null_kv:\n null_k, null_v = map(lambda t: t[degree], (self.null_keys, self.null_values))\n null_k, null_v = map(lambda t: repeat(t, 'h d m -> b h i () d m', b = q.shape[0], i = q.shape[2]), (null_k, null_v))\n k = torch.cat((null_k, k), dim = 3)\n v = torch.cat((null_v, v), dim = 3)\n\n if attend_self:\n self_k, self_v = map(lambda t: t[degree], (self_keys, self_values))\n self_k, self_v = map(lambda t: rearrange(t, 'b n (h d) m -> b h n () d m', h = h), (self_k, self_v))\n k = torch.cat((self_k, k), dim = 3)\n v = torch.cat((self_v, v), dim = 3)\n\n sim = einsum('b h i d m, b h i j d m -> b h i j', q, k) * self.scale\n\n if exists(neighbor_mask):\n num_left_pad = int(attend_self) + int(self.use_null_kv)\n mask = F.pad(neighbor_mask, (num_left_pad, 0), value = True)\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1)\n out = einsum('b h i j, b h i j d m -> b h i d m', attn, v)\n outputs[degree] = rearrange(out, 'b h n d m -> b n (h d) m')\n\n return self.to_out(outputs)\n\nclass AttentionBlockSE3(nn.Module):\n def __init__(\n self,\n fiber,\n dim_head = 64,\n heads = 8,\n attend_self = False,\n edge_dim = None,\n use_null_kv = False,\n fourier_encode_dist = False\n ):\n super().__init__()\n self.attn = AttentionSE3(fiber, heads = heads, dim_head = dim_head, attend_self = attend_self, edge_dim = edge_dim, use_null_kv = use_null_kv)\n self.prenorm = NormSE3(fiber)\n self.residual = ResidualSE3()\n\n def forward(self, features, edge_info, rel_dist, basis):\n res = features\n outputs = self.prenorm(features)\n outputs = self.attn(outputs, edge_info, rel_dist, basis)\n return self.residual(outputs, res)\n\n# main class\n\nclass SE3Transformer(nn.Module):\n def __init__(\n self,\n *,\n dim,\n heads = 8,\n dim_head = 64,\n depth = 2,\n input_degrees = 1,\n num_degrees = 2,\n output_degrees = 1,\n valid_radius = 1e5,\n reduce_dim_out = False,\n num_tokens = None,\n num_edge_tokens = None,\n edge_dim = None,\n reversible = False,\n attend_self = True,\n use_null_kv = False,\n differentiable_coors = False,\n fourier_encode_dist = False,\n num_neighbors = float('inf'),\n attend_sparse_neighbors = False,\n num_adj_degrees = None,\n adj_dim = 0,\n max_sparse_neighbors = float('inf')\n ):\n super().__init__()\n self.dim = dim\n\n self.token_emb = None\n self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None\n\n assert not (exists(num_edge_tokens) and not exists(edge_dim)), 'edge dimension (edge_dim) must be supplied if SE3 transformer is to have edge tokens'\n self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None\n\n self.input_degrees = input_degrees\n self.num_degrees = num_degrees\n self.output_degrees = output_degrees\n\n # whether to differentiate through basis, needed for alphafold2\n\n self.differentiable_coors = differentiable_coors\n\n # neighbors hyperparameters\n\n self.valid_radius = valid_radius\n self.num_neighbors = num_neighbors\n\n # sparse neighbors, derived from adjacency matrix or edges being passed in\n\n self.attend_sparse_neighbors = attend_sparse_neighbors\n self.max_sparse_neighbors = max_sparse_neighbors\n\n # adjacent neighbor derivation and embed\n\n assert not (exists(num_adj_degrees) and num_adj_degrees < 1), 'make sure adjacent degrees is greater than 1'\n self.num_adj_degrees = num_adj_degrees\n self.adj_emb = nn.Embedding(num_adj_degrees + 1, adj_dim) if exists(num_adj_degrees) and adj_dim > 0 else None\n\n edge_dim = (edge_dim if exists(self.edge_emb) else 0) + (adj_dim if exists(self.adj_emb) else 0)\n\n # main network\n\n fiber_in = Fiber.create(input_degrees, dim)\n fiber_hidden = Fiber.create(num_degrees, dim)\n fiber_out = Fiber.create(output_degrees, dim)\n\n self.conv_in = ConvSE3(fiber_in, fiber_hidden, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist)\n\n layers = nn.ModuleList([])\n for _ in range(depth):\n layers.append(nn.ModuleList([\n AttentionBlockSE3(fiber_hidden, heads = heads, dim_head = dim_head, attend_self = attend_self, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist, use_null_kv = use_null_kv),\n FeedForwardBlockSE3(fiber_hidden)\n ]))\n\n execution_class = ReversibleSequence if reversible else SequentialSequence\n self.net = execution_class(layers)\n\n self.conv_out = ConvSE3(fiber_hidden, fiber_out, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist)\n\n self.norm = NormSE3(fiber_out)\n\n self.linear_out = LinearSE3(\n fiber_out,\n Fiber.create(output_degrees, 1)\n ) if reduce_dim_out else None\n\n def forward(self, feats, coors, mask = None, adj_mat = None, edges = None, return_type = None, return_pooled = False):\n _mask = mask\n\n if self.output_degrees == 1:\n return_type = 0\n\n if exists(self.token_emb):\n feats = self.token_emb(feats)\n\n assert not (self.attend_sparse_neighbors and not exists(adj_mat)), 'adjacency matrix (adjacency_mat) or edges (edges) must be passed in'\n assert not (exists(edges) and not exists(self.edge_emb)), 'edge embedding (num_edge_tokens & edge_dim) must be supplied if one were to train on edge types'\n\n if torch.is_tensor(feats):\n feats = {'0': feats[..., None]}\n\n b, n, d, *_, device = *feats['0'].shape, feats['0'].device\n\n assert d == self.dim, f'feature dimension {d} must be equal to dimension given at init {self.dim}'\n assert set(map(int, feats.keys())) == set(range(self.input_degrees)), f'input must have {self.input_degrees} degree'\n\n num_degrees, neighbors, max_sparse_neighbors, valid_radius = self.num_degrees, self.num_neighbors, self.max_sparse_neighbors, self.valid_radius\n\n assert self.attend_sparse_neighbors or neighbors > 0, 'you must either attend to sparsely bonded neighbors, or set number of locally attended neighbors to be greater than 0'\n\n # create N-degrees adjacent matrix from 1st degree connections\n\n if exists(self.num_adj_degrees):\n if len(adj_mat.shape) == 2:\n adj_mat = repeat(adj_mat.clone(), 'i j -> b i j', b = b)\n\n adj_indices = adj_mat.clone().long()\n\n for ind in range(self.num_adj_degrees - 1):\n degree = ind + 2\n\n next_degree_adj_mat = (adj_mat.float() @ adj_mat.float()) > 0\n next_degree_mask = (next_degree_adj_mat.float() - adj_mat.float()).bool()\n adj_indices.masked_fill_(next_degree_mask, degree)\n adj_mat = next_degree_adj_mat.clone()\n\n # se3 transformer by default cannot have a node attend to itself\n\n exclude_self_mask = rearrange(~torch.eye(n, dtype = torch.bool, device = device), 'i j -> () i j')\n\n # calculate sparsely connected neighbors\n\n sparse_neighbor_mask = None\n num_sparse_neighbors = 0\n\n if self.attend_sparse_neighbors:\n assert exists(adj_mat), 'adjacency matrix must be passed in (keyword argument adj_mat)'\n\n if exists(adj_mat):\n if len(adj_mat) == 2:\n adj_mat = repeat(adj_mat, 'i j -> b i j', b = b)\n\n adj_mat = adj_mat.masked_select(exclude_self_mask).reshape(b, n, n -1)\n\n adj_mat_values = adj_mat.float()\n adj_mat_max_neighbors = adj_mat_values.sum(dim = -1).max().item()\n\n if max_sparse_neighbors < adj_mat_max_neighbors:\n noise = torch.empty_like(adj_mat_values).uniform_(-0.01, 0.01)\n adj_mat_values += noise\n\n num_sparse_neighbors = int(min(max_sparse_neighbors, adj_mat_max_neighbors))\n values, indices = adj_mat_values.topk(num_sparse_neighbors, dim = -1)\n sparse_neighbor_mask = torch.zeros_like(adj_mat_values).scatter_(-1, indices, values)\n sparse_neighbor_mask = sparse_neighbor_mask > 0.5\n\n # exclude edge of token to itself\n\n indices = repeat(torch.arange(n, device = device), 'i -> b i j', b = b, j = n)\n rel_pos = rearrange(coors, 'b n d -> b n () d') - rearrange(coors, 'b n d -> b () n d')\n\n indices = indices.masked_select(exclude_self_mask).reshape(b, n, n - 1)\n rel_pos = rel_pos.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, 3)\n\n if exists(mask):\n mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')\n mask = mask.masked_select(exclude_self_mask).reshape(b, n, n - 1)\n\n if exists(edges):\n edges = self.edge_emb(edges)\n edges = edges.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, -1)\n\n if exists(self.adj_emb):\n adj_emb = self.adj_emb(adj_indices)\n edges = torch.cat((edges, adj_emb), dim = -1) if exists(edges) else adj_emb\n\n rel_dist = rel_pos.norm(dim = -1)\n\n # use sparse neighbor mask to assign priority of bonded\n\n modified_rel_dist = rel_dist\n if exists(sparse_neighbor_mask):\n modified_rel_dist.masked_fill_(sparse_neighbor_mask, 0.)\n\n # if number of local neighbors by distance is set to 0, then only fetch the sparse neighbors defined by adjacency matrix\n\n if neighbors == 0:\n valid_radius = 0\n\n # get neighbors and neighbor mask, excluding self\n\n neighbors = int(min(neighbors, n - 1))\n total_neighbors = int(neighbors + num_sparse_neighbors)\n assert total_neighbors > 0, 'you must be fetching at least 1 neighbor'\n\n total_neighbors = int(min(total_neighbors, n - 1)) # make sure total neighbors does not exceed the length of the sequence itself\n\n dist_values, nearest_indices = modified_rel_dist.topk(total_neighbors, dim = -1, largest = False)\n neighbor_mask = dist_values <= valid_radius\n\n neighbor_rel_dist = batched_index_select(rel_dist, nearest_indices, dim = 2)\n neighbor_rel_pos = batched_index_select(rel_pos, nearest_indices, dim = 2)\n neighbor_indices = batched_index_select(indices, nearest_indices, dim = 2)\n\n if exists(mask):\n neighbor_mask = neighbor_mask & batched_index_select(mask, nearest_indices, dim = 2)\n\n if exists(edges):\n edges = batched_index_select(edges, nearest_indices, dim = 2)\n\n # calculate basis\n\n basis = get_basis(neighbor_rel_pos, num_degrees - 1, differentiable = self.differentiable_coors)\n\n # main logic\n\n edge_info = (neighbor_indices, neighbor_mask, edges)\n x = feats\n\n # project in\n\n x = self.conv_in(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)\n\n # transformer layers\n\n x = self.net(x, edge_info = edge_info, rel_dist = neighbor_rel_dist, basis = basis)\n\n # project out\n\n x = self.conv_out(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)\n\n # norm\n\n x = self.norm(x)\n\n # reduce dim if specified\n\n if exists(self.linear_out):\n x = self.linear_out(x)\n x = map_values(lambda t: t.squeeze(dim = 2), x)\n\n if return_pooled:\n mask_fn = (lambda t: masked_mean(t, _mask, dim = 1)) if exists(_mask) else (lambda t: t.mean(dim = 1))\n x = map_values(mask_fn, x)\n\n if '0' in x:\n x['0'] = x['0'].squeeze(dim = -1)\n\n if exists(return_type):\n return x[str(return_type)]\n\n return x\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.einsum",
"torch.nn.ModuleDict",
"torch.finfo",
"torch.eye",
"torch.nn.functional.pad",
"torch.sum",
"torch.nn.LayerNorm",
"torch.is_tensor",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.nn.ParameterDict",
"torch.zeros",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Identity",
"torch.nn.ReLU",
"torch.nn.GELU",
"torch.arange",
"torch.randn",
"torch.empty_like"
]
]
|
vene/sparseattn | [
"e89a2162bdde3a86b7dfdba22e292ea3bd3880d3"
]
| [
"pytorch/torchsparseattn/test_oscar.py"
]
| [
"from __future__ import division\n\nimport pytest\nfrom numpy.testing import assert_allclose\nimport numpy as np\nimport torch\nfrom torch.autograd import gradcheck, Variable\n\nfrom .oscar import OscarProxFunction, oscar_prox_jv\n\n\ndef _oscar_prox_jacobian(y_star, dout=None):\n y_star = y_star.numpy()\n dim = y_star.shape[0]\n J = torch.zeros(dim, dim)\n\n _, inv, counts = np.unique(np.abs(y_star),\n return_inverse=True,\n return_counts=True)\n\n for i in range(dim):\n for j in range(dim):\n if (inv[i] == inv[j] and\n y_star[i] != 0):\n J[i, j] = (np.sign(y_star[i]) * np.sign(y_star[j])\n / counts[inv[i]])\n if dout is not None:\n return torch.mv(J, dout)\n else:\n return J\n\n\[email protected]('alpha', [0.001, 0.01, 0.1, 1])\[email protected]('beta', [0.001, 0.01, 0.1, 1])\ndef test_jv(alpha, beta):\n\n torch.manual_seed(1)\n torch.set_default_tensor_type('torch.DoubleTensor')\n\n for _ in range(30):\n x = Variable(torch.randn(15))\n dout = torch.randn(15)\n y_hat = OscarProxFunction(alpha=alpha, beta=beta)(x).data\n\n ref = _oscar_prox_jacobian(y_hat, dout)\n din = oscar_prox_jv(y_hat, dout)\n assert_allclose(ref.numpy(), din.numpy(), atol=1e-5)\n\n\[email protected]('alpha', [0.001, 0.01, 0.1, 1])\[email protected]('beta', [0.001, 0.01, 0.1, 1])\ndef test_finite_diff(alpha, beta):\n torch.manual_seed(1)\n torch.set_default_tensor_type('torch.DoubleTensor')\n\n for _ in range(30):\n x = Variable(torch.randn(20), requires_grad=True)\n func = OscarProxFunction(alpha, beta=beta)\n assert gradcheck(func, (x,), eps=1e-5, atol=1e-3)\n"
]
| [
[
"torch.zeros",
"torch.mv",
"torch.set_default_tensor_type",
"torch.autograd.gradcheck",
"torch.manual_seed",
"numpy.sign",
"numpy.abs",
"torch.randn"
]
]
|
marc2332/sardana | [
"48dc9191baaa63f6c714d8c025e8f3f96548ad26"
]
| [
"src/sardana/pool/poolcontrollers/test/base.py"
]
| [
"#!/usr/bin/env python\n\n##############################################################################\n##\n# This file is part of Sardana\n##\n# http://www.tango-controls.org/static/sardana/latest/doc/html/index.html\n##\n# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain\n##\n# Sardana is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n##\n# Sardana is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n##\n# You should have received a copy of the GNU Lesser General Public License\n# along with Sardana. If not, see <http://www.gnu.org/licenses/>.\n##\n##############################################################################\n\nimport time\nimport threading\nimport numpy\n\nimport unittest\n\nfrom sardana import State\nfrom sardana.pool.poolcontrollers.DummyMotorController import Motion\nfrom sardana.pool.pooldefs import SynchParam\nfrom sardana.sardanaattribute import SardanaAttribute\nfrom taurus.core.util.log import Logger\n\n__all__ = ['BaseControllerTestCase', 'TriggerGateControllerTestCase',\n 'PositionGenerator', 'TriggerGateReceiver']\n\nclass BaseControllerTestCase(object):\n \"\"\" Base test case for unit testing arbitrary controllers.\n This class will create a controller instance and define an axis from the\n class member attributes:\n KLASS <type> controller class\n PROPS <dict> properties of the controller\n AXIS <int> number of the axis\n \"\"\"\n KLASS = None\n PROPS = {}\n AXIS = 1\n DEBUG = False\n\n def setUp(self):\n self.logger = Logger('BaseControllerTestCase')\n if self.DEBUG:\n self.logger.setLogLevel(Logger.Debug)\n\n if self.KLASS is None:\n raise Exception('Ctrl klass has not been defined')\n name = 'test_ctrl'\n self.ctrl = self.KLASS(name, self.PROPS)\n self.pre_AddDevice_hook()\n self.ctrl.AddDevice(self.AXIS)\n\n def tearDown(self):\n if self.ctrl is not None:\n self.ctrl.DeleteDevice(self.AXIS)\n\n def axisPar(self, name, value, expected_value=None):\n \"\"\" Helper for test the SetAxisPar & GetaxisPar methods\n \"\"\"\n axis = self.AXIS\n if expected_value is None:\n expected_value = value\n self.ctrl.SetAxisPar(axis, name, value)\n r_value = self.ctrl.GetAxisPar(axis, name)\n msg = ('The %s value is %s, and the expected value is %s'\n % (name, r_value, expected_value))\n self.assertEqual(r_value, expected_value, msg)\n\n def stateOne(self, expected_state=State.On):\n \"\"\" Helper for test the stateOne method\n \"\"\"\n sta, status = self.ctrl.StateOne(self.AXIS)\n msg = ('The current state of axis(%d) is %d when expected, %d'\n % (self.AXIS, sta, expected_state))\n self.assertEqual(sta, expected_state, msg)\n\n def start_action(self, configuration):\n \"\"\" This method set the axis parameters and pre start the axis.\n \"\"\"\n for key, value in list(configuration.items()):\n self.axisPar(key, value)\n self.ctrl.SynchOne(configuration)\n\n def pre_AddDevice_hook(self):\n pass\n\n\nclass TriggerGateControllerTestCase(unittest.TestCase, BaseControllerTestCase):\n\n def setUp(self):\n unittest.TestCase.setUp(self)\n BaseControllerTestCase.setUp(self)\n self.isAborted = False\n\n def tearDown(self):\n BaseControllerTestCase.tearDown(self)\n unittest.TestCase.tearDown(self)\n\n def post_configuration_hook(self):\n ''' Hook for post configure actions\n '''\n pass\n\n def post_generation_hook(self):\n ''' Hook for post generation actions\n '''\n pass\n\n def generation(self, configuration):\n \"\"\" Helper for test a simple generation\n \"\"\"\n self.configuration = configuration\n repetitions = 0\n for group in configuration:\n repetitions += group[SynchParam.Repeats]\n # store repeats for the assers against received triggers\n self.repetitions = repetitions\n self.ctrl.SynchOne(self.AXIS, configuration)\n # execute Hook\n self.post_configuration_hook()\n # PreStartOne the axis\n self.ctrl.PreStartOne(self.AXIS)\n self.ctrl.StartOne(self.AXIS)\n while self.ctrl.StateOne(self.AXIS)[0] == State.Moving:\n time.sleep(0.001)\n self.post_generation_hook()\n state, status = self.ctrl.StateOne(self.AXIS)\n msg = ('The axis %d is not Stopped, its status is %s'\n % (self.AXIS, status))\n self.assertEqual(state, State.get('On'), msg)\n\n def abort(self, configuration, abort):\n \"\"\" Helper for test the abort\n \"\"\"\n self.configuration = configuration\n self.ctrl.SynchOne(self.AXIS, configuration)\n self.post_configuration_hook()\n # PreStartOne the axis\n self.ctrl.PreStartOne(self.AXIS)\n self.ctrl.StartOne(self.AXIS)\n while self.ctrl.StateOne(self.AXIS)[0] == State.Moving:\n time.sleep(abort)\n self.ctrl.AbortOne(self.AXIS)\n self.isAborted = True\n self.post_generation_hook()\n state, status = self.ctrl.StateOne(self.AXIS)\n msg = ('The axis %d is not Stopped, its status is %s'\n % (self.AXIS, status))\n self.assertEqual(state, State.get('On'), msg)\n\n\nclass PositionGenerator(threading.Thread):\n \"\"\" It is a position generator. A Sardana Motion class is used for simulate\n the motor. The attribute value has the current user position of the motor.\n \"\"\"\n\n def __init__(self, start_pos, end_pos, period):\n \"\"\"\n :param start_pos: start position for the motion\n :param end_pos: end position for the motion\n :param period: nap time between fireevents\n :return:\n \"\"\"\n threading.Thread.__init__(self)\n self.motor = Motion()\n self.motor.setMinVelocity(0)\n self.motor.setMaxVelocity(10)\n self.motor.setAccelerationTime(1)\n self.motor.setDecelerationTime(1)\n self.motor.setCurrentPosition(0)\n self._start_pos = start_pos\n self._end_pos = end_pos\n self._period = period\n self.value = SardanaAttribute(self, name='Position',\n initial_value=0)\n\n def run(self):\n \"\"\"\n Start the motion and update the SardanaAttribute value with the current\n position of the motion between every nap period\n \"\"\"\n self.motor.startMotion(self._start_pos, self._end_pos)\n while self.motor.isInMotion():\n value = self.motor.getCurrentUserPosition()\n self.value.set_value(value, timestamp=time.time(), propagate=1)\n time.sleep(self._period)\n value = self.motor.getCurrentUserPosition()\n self.value.set_value(value, timestamp=time.time(), propagate=1)\n\n def getMotor(self):\n \"\"\" Get the motion object\n \"\"\"\n return self.motor\n\n def setStartPos(self, pos):\n \"\"\" Update start position\n \"\"\"\n self._start_pos = pos\n self.value.set_value(pos)\n\n def setEndPos(self, pos):\n \"\"\" Update end position\n \"\"\"\n self._end_pos = pos\n\n def setPeriod(self, time):\n \"\"\" Update the nap time\n \"\"\"\n self._end_pos = time\n\n def add_listener(self, listener):\n self.value.add_listener(listener)\n\n def remove_listener(self, listener):\n self.value.remove_listener(listener)\n\n\nclass TriggerGateReceiver(object):\n '''Software TriggerGateReceiver which captures timestamps whenever an event\n comes. Provides useful methods for calculating the event generation\n performance\n '''\n # TODO: add more jitter measurements e.g. drift\n\n def __init__(self):\n self.active_events = {}\n self.passive_events = {}\n\n def getCount(self):\n count = len(list(self.passive_events.keys()))\n return count\n\n count = property(getCount)\n\n def event_received(self, *args, **kwargs):\n # store also a timestamp of the start event when it will be implemented\n timestamp = time.time()\n _, type_, value = args\n name = type.name\n if name == \"active\":\n self.active_events[value] = timestamp\n elif name == \"passive\":\n self.passive_events[value] = timestamp\n else:\n raise ValueError('Unknown EventType')\n\n def calc_characteristics(self):\n # TODO: refactor the characteristics calculation method to use numpy\n i = 0\n count = self.count\n characteristics = {}\n # there is no active event ending the last passive period, that's why\n # calculate characteristics until (count - 1)\n while i < (count - 1):\n t1 = self.active_events[i]\n t2 = self.passive_events[i]\n t3 = self.active_events[i + 1]\n active_period = t2 - t1\n passive_period = t3 - t2\n characteristics[i] = (active_period, passive_period)\n i += 1\n return characteristics\n\n def calc_cycletocycle(self):\n '''Calculate the cycle-to-cycle jitter characteristics: mean, std and max.\n Cycle-to-cycle jitter is a difference between a cycle period and a cycle\n period before it. To calculate one cycle-to-cycle jitter one needs\n exactly 3 active events:\n\n c2c_jitter_1 = cycle_2 - cycle_1\n cycle_2 = active_3 - active_2\n cycle_1 = active_2 - active_1\n '''\n i = 0\n count = self.count\n periods = []\n mean_c2c, std_c2c, max_c2c = 0, 0, 0\n # there is no active event ending the last passive period, that's why\n # calculate characteristics until (count - 1)\n while i < (count - 1):\n t1 = self.active_events[i]\n t2 = self.active_events[i + 1]\n period = t2 - t1\n periods.append(period)\n i += 1\n if len(periods) > 0:\n periods_array = numpy.array(periods)\n print(periods_array)\n c2c = numpy.diff(periods_array)\n mean_c2c = c2c.mean()\n std_c2c = c2c.std()\n max_c2c = c2c.max()\n return mean_c2c, std_c2c, max_c2c\n"
]
| [
[
"numpy.array",
"numpy.diff"
]
]
|
BaekduChoi/Halftoning | [
"9459d202c0b3b4e587e6d89af04c4bcfaa604d31"
]
| [
"utils/misc.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 25 16:35:06 2020\n\n@author: baekduchoi\n\"\"\"\n\n\"\"\"\n Script for miscellaneous functions used\n\"\"\"\n\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__),'.'))\n\nimport json\nimport torch\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom data import HalftoneDataset, screenImage, readScreen\nfrom torch.nn import functional as F\n\nimport cv2\nimport scipy.signal\nimport numpy as np\n\n\"\"\"\n Function that reads the json file and generates the dataloader to be used\n Only generates training and validation dataloader\n\"\"\"\ndef create_dataloaders(params) :\n train_img_root = params[\"datasets\"][\"train\"][\"root_img\"]\n train_halftone_root = params[\"datasets\"][\"train\"][\"root_halftone\"]\n batch_size = int(params[\"datasets\"][\"train\"][\"batch_size\"])\n train_img_type = params['datasets']['train']['img_type']\n n_workers = int(params['datasets']['train']['n_workers'])\n train_use_aug = params['datasets']['train']['use_aug']\n \n val_img_root = params[\"datasets\"][\"val\"][\"root_img\"]\n val_halftone_root = params[\"datasets\"][\"val\"][\"root_halftone\"]\n val_img_type = params['datasets']['val']['img_type']\n \n train_dataset = HalftoneDataset(train_img_root,\n train_halftone_root,\n train_img_type,\n train_use_aug)\n train_dataloader = DataLoader(train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n shuffle=True)\n \n # no need to use augmentation for validation data\n val_dataset = HalftoneDataset(val_img_root,\n val_halftone_root,\n val_img_type)\n val_dataloader = DataLoader(val_dataset,\n batch_size=1,\n num_workers=n_workers,\n shuffle=False)\n \n return train_dataloader, val_dataloader\n\n\"\"\"\n Added extra smooth patch image / halftone pairs\n\"\"\"\ndef create_dataloaders_extra(params) :\n train_img_root = params[\"datasets\"][\"train\"][\"root_img\"]\n train_halftone_root = params[\"datasets\"][\"train\"][\"root_halftone\"]\n batch_size = int(params[\"datasets\"][\"train\"][\"batch_size\"])\n train_img_type = params['datasets']['train']['img_type']\n n_workers = int(params['datasets']['train']['n_workers'])\n train_use_aug = params['datasets']['train']['use_aug']\n \n val_img_root = params[\"datasets\"][\"val\"][\"root_img\"]\n val_halftone_root = params[\"datasets\"][\"val\"][\"root_halftone\"]\n val_img_type = params['datasets']['val']['img_type']\n \n train_dataset1 = HalftoneDataset(train_img_root,\n train_halftone_root,\n train_img_type,\n train_use_aug)\n train_dataset2 = HalftoneDataset('./img_patch/',\n './halftone_patch/',\n '.png',\n train_use_aug)\n train_dataset = ConcatDataset([train_dataset1,train_dataset2])\n train_dataloader = DataLoader(train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n shuffle=True)\n \n # no need to use augmentation for validation data\n val_dataset = HalftoneDataset(val_img_root,\n val_halftone_root,\n val_img_type)\n val_dataloader = DataLoader(val_dataset,\n batch_size=1,\n num_workers=n_workers,\n shuffle=False)\n \n return train_dataloader, val_dataloader\n\n\"\"\"\n Function that reads the components of the json file and returns a dataloader for test dataset\n Refer to test_naive.json for the structure of json file\n For test dataset we do not use data augmentation\n\n params : output of read_json(json_file_location)\n\"\"\"\ndef create_test_dataloaders(params) :\n test_img_root = params[\"datasets\"][\"test\"][\"root_img\"]\n test_halftone_root = params[\"datasets\"][\"test\"][\"root_halftone\"]\n test_img_type = params['datasets']['test']['img_type']\n n_workers = int(params['datasets']['test']['n_workers'])\n \n test_dataset = HalftoneDataset(test_img_root,\n test_halftone_root,\n test_img_type,\n False)\n test_dataloader = DataLoader(test_dataset,\n batch_size=1,\n num_workers=n_workers,\n shuffle=False)\n \n return test_dataloader\n\n\"\"\"\n Function that reads the json file\n\"\"\"\ndef read_json(json_dir) : \n with open(json_dir,'r') as f :\n params = json.load(f)\n return params\n\n\"\"\"\n Nasanen's HVS model\n\"\"\"\nclass HVS(object) :\n \n def __init__(self) :\n N = 23\n c = 0.525\n d = 3.91\n G = 11.0\n pi = np.pi\n fs = pi*3500.0/180.0\n k = fs/(c*np.log(G)+d)\n \n self.hvs = np.zeros((2*N+1,2*N+1))\n \n for i in range(2*N+1) :\n for j in range(2*N+1) :\n m = i-N\n n = j-N\n \n denom = ((k**2)+4.0*(pi**2)*((m**2)+(n**2)))**1.5 \n val = 2.0*pi*k/denom\n \n dist = (float(m)**2.0+float(n)**2.0)**0.5\n if dist > float(N) :\n self.hvs[i][j] = 0.0\n else :\n self.hvs[i][j] = val*(float(N)+1-dist)\n \n # print(np.sum(self.hvs)**2)\n self.hvs = self.hvs/np.sum(self.hvs)\n self.N = N\n \n def __getitem__(self, keys) :\n m = keys[0]+self.N\n n = keys[1]+self.N\n return self.hvs[m][n]\n \n def getHVS(self) :\n return self.hvs.astype(np.float32)\n \n def size(self) :\n return self.hvs.shape\n\n\"\"\"\n HVS error loss function\n\"\"\"\ndef HVSloss(img1,img2,hvs) :\n k = hvs.size(2)\n M = img1.size(2)\n N = img1.size(3)\n\n pd = (k-1)//2\n\n img1p = F.pad(img1,(pd,pd,pd,pd),mode='circular')\n img2p = F.pad(img2,(pd,pd,pd,pd),mode='circular')\n img1_filtered = F.conv2d(img1p,hvs)\n img2_filtered = F.conv2d(img2p,hvs)\n\n return F.mse_loss(img1_filtered,img2_filtered)"
]
| [
[
"torch.utils.data.ConcatDataset",
"numpy.zeros",
"numpy.log",
"numpy.sum",
"torch.nn.functional.mse_loss",
"torch.utils.data.DataLoader",
"torch.nn.functional.pad",
"torch.nn.functional.conv2d"
]
]
|
JackKelly/slicedpy | [
"c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62"
]
| [
"slicedpy/powerstate.py"
]
| [
"from __future__ import print_function, division\nfrom bunch import Bunch\nimport copy\nfrom sklearn.mixture import GMM\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pda.channel import DEFAULT_TIMEZONE\nfrom slicedpy.normal import Normal\nfrom slicedpy.datastore import DataStore\nimport feature_detectors as fd\n\nclass PowerSuper(Bunch):\n def __init__(self, **kwds):\n self.duration = None\n self.power = None\n self.slope = None\n self.intercept = None\n self.spike_histogram = None\n super(PowerSuper, self).__init__(**kwds)\n\n def __str__(self):\n s = \"\"\n if self.power is not None:\n model = self.power.get_model()\n s += \"power={:.1f}W\\n\".format(model.mean)\n s += \"std={:.1f}\\n\".format(model.std)\n s += \"min={:.1f}\\n\".format(model.min)\n s += \"max={:.1f}\\n\".format(model.max)\n s += \"size={:.1f}\\n\".format(model.size)\n return s\n\nclass PowerState(PowerSuper):\n \"\"\"\n A washing machine might have three power states: washing, heating,\n spinning.\n\n Attributes:\n * duration: DataStore (GMM) (seconds)\n * power: DataStore (Normal)\n * slope: DataStore (GMM)\n * intercept (float)\n * spike_histogram: 2D DataStore (GMM), one col per bin \n * count_per_run = DataStore (GMM): number of times this power state is \n seen per run \n * current_count_per_run (int)\n\n \"\"\"\n\n def __init__(self, other=None, name='', **kwds):\n super(PowerState, self).__init__(**kwds)\n\n # \"cast\" from PowerSegment...\n if isinstance(other, PowerSegment):\n self.power = other.power\n self.count_per_run = DataStore(model=GMM())\n self.current_count_per_run = 1\n self.essential = None\n\n other.duration = (other.end - other.start).total_seconds()\n\n # Convert from scalars to DataStores:\n for attr, n_columns in [('duration', 1), \n ('slope', 1), \n ('intercept', 1),\n ('spike_histogram', 8)]:\n if other.__dict__.get(attr) is not None:\n self.__dict__[attr] = DataStore(n_columns=n_columns, model=GMM())\n self.__dict__[attr].append(other.__dict__[attr])\n\n def configure_as_off(self):\n \"\"\"Configure this PowerState as 'off'.\"\"\"\n self.power = DataStore(model=Normal())\n self.power.append(0)\n self.count_per_run = DataStore(model=GMM())\n self.current_count_per_run = 1\n self.essential = None\n self.end = pd.Timestamp('1970-01-01 00:00:00+00:00', tz=DEFAULT_TIMEZONE)\n\n def get_feature_vector(self):\n fv = [self.duration.data[0]]\n\n if self.slope is None:\n fv.append(0)\n else:\n fv.append(self.slope.data[0])\n\n if self.spike_histogram.data.size == 0:\n fv.extend([0]*8)\n else:\n fv.extend(self.spike_histogram.data[0,:].tolist())\n return fv\n\n def save_count_per_run(self):\n self.count_per_run.append(np.array([self.current_count_per_run]))\n self.current_count_per_run = 0\n\n def similar(self, other, mode='min max', plus_minus=50):\n \"\"\"\n Args:\n mode (str): 'min max' | 'plus minus' | 'ttest'\n 'min max': uses fd.min_max_power_segments to decide if power segments\n are similar or not.\n \"\"\"\n if mode == 'min max':\n concdata = np.concatenate([self.power.data, other.power.data])\n pwr_segs = fd.min_max_power_sgmnts(concdata)\n return len(pwr_segs) == 1\n elif mode == 'plus minus':\n own_mean = self.power.get_model().mean\n other_mean = other.power.get_model().mean\n return own_mean - plus_minus < other_mean < own_mean + plus_minus\n elif mode == 'ttest':\n return self.power.get_model().similar_mean(other.power.get_model())\n else:\n raise Exception('unrecognised mode.')\n\n def merge(self, other):\n \"\"\"Merges ``other`` into ``self``.\"\"\"\n print(\"Merging {:.2f}W\".format(self.power.get_model().mean))\n self.current_count_per_run += 1\n for attribute in ['duration', 'power', 'slope', 'intercept', \n 'spike_histogram', 'count_per_run']:\n if self.__dict__[attribute] is not None:\n try:\n self.__dict__[attribute].extend(other.__dict__[attribute])\n except KeyError:\n # Not all powerstates have every attribute.\n pass\n\n\nclass PowerSegment(PowerSuper):\n \"\"\"\n A washing machine might have lots PowerSegments: wash, heat, wash, \n heat, wash, spin...\n\n PowerSegments have start and end variables; PowerStates do not.\n\n Attributes:\n * start: datetime of start of each power state\n * end: datetime of end of each power state\n * duration: float, seconds\n * power: DataStore (Normal)\n * slope: float\n * intercept: float\n * spike_histogram: pd.DataFrame\n (don't bother recording bin edges, assume these remain constant\n in fact, put bin edges in a config.py file)\n \"\"\"\n\n def __init__(self, **kwds):\n self.start = None\n self.end = None\n super(PowerSegment, self).__init__(**kwds)\n\n def plot(self, ax, color='k'):\n model = self.power.get_model()\n\n # Plot mean line\n ax.plot([self.start, self.end], \n [model.mean, model.mean], \n color=color, linewidth=2, alpha=0.8)\n\n # Plot 1 standard deviation\n num_start = mdates.date2num(self.start)\n num_end = mdates.date2num(self.end)\n width = num_end - num_start\n std_rect = plt.Rectangle(xy=(self.start, model.mean - model.std),\n width=width,\n height=(model.std * 2), \n alpha=0.8, color=\"#aaaaaa\")\n ax.add_patch(std_rect)\n\n # Plot min and max\n std_rect = plt.Rectangle(xy=(self.start, model.min),\n width=width,\n height=(model.max - model.min), \n alpha=0.5, color=\"#aaaaaa\")\n ax.add_patch(std_rect)\n \n # Plot slop\n if self.slope is not None:\n print(\"plotting slope: intercept=\", self.intercept, \n \"slope=\", self.slope)\n curve = lambda x, c, m: c + (m / x)\n num_end = num_start + (10 / mdates.SEC_PER_DAY)\n X = np.linspace(num_start, num_end, 10)\n x = X * mdates.SEC_PER_DAY\n ax.plot(X, \n curve((x-x[0])+1, self.intercept, self.slope),\n color=color)\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"sklearn.mixture.GMM",
"pandas.Timestamp",
"matplotlib.pyplot.Rectangle",
"numpy.linspace",
"matplotlib.dates.date2num"
]
]
|
ammaddd/detectron2 | [
"7f15a71c4d44bfe0b61bf410684b38eeaf4689a1"
]
| [
"detectron2/export/flatten.py"
]
| [
"import collections\nfrom dataclasses import dataclass\nfrom typing import Callable, List, Optional, Tuple\nimport torch\nfrom torch import nn\n\nfrom detectron2.structures import Boxes, Instances\nfrom detectron2.utils.registry import _convert_target_to_string, locate\n\nfrom .torchscript_patch import patch_builtin_len\n\n\n@dataclass\nclass Schema:\n \"\"\"\n A Schema defines how to flatten a possibly hierarchical object into tuple of\n primitive objects, so it can be used as inputs/outputs of PyTorch's tracing.\n\n PyTorch does not support tracing a function that produces rich output\n structures (e.g. dict, Instances, Boxes). To trace such a function, we\n flatten the rich object into tuple of tensors, and return this tuple of tensors\n instead. Meanwhile, we also need to know how to \"rebuild\" the original object\n from the flattened results, so we can evaluate the flattened results.\n A Schema defines how to flatten an object, and while flattening it, it records\n necessary schemas so that the object can be rebuilt using the flattened outputs.\n\n The flattened object and the schema object is returned by ``.flatten`` classmethod.\n Then the original object can be rebuilt with the ``__call__`` method of schema.\n\n A Schema is a dataclass that can be serialized easily.\n \"\"\"\n\n # inspired by FetchMapper in tensorflow/python/client/session.py\n\n @classmethod\n def flatten(cls, obj):\n raise NotImplementedError\n\n def __call__(self, values):\n raise NotImplementedError\n\n @staticmethod\n def _concat(values):\n ret = ()\n sizes = []\n for v in values:\n assert isinstance(v, tuple), \"Flattened results must be a tuple\"\n ret = ret + v\n sizes.append(len(v))\n return ret, sizes\n\n @staticmethod\n def _split(values, sizes):\n if len(sizes):\n expected_len = sum(sizes)\n assert (\n len(values) == expected_len\n ), f\"Values has length {len(values)} but expect length {expected_len}.\"\n ret = []\n for k in range(len(sizes)):\n begin, end = sum(sizes[:k]), sum(sizes[: k + 1])\n ret.append(values[begin:end])\n return ret\n\n\n@dataclass\nclass ListSchema(Schema):\n schemas: List[Schema] # the schemas that define how to flatten each element in the list\n sizes: List[int] # the flattened length of each element\n\n def __call__(self, values):\n values = self._split(values, self.sizes)\n if len(values) != len(self.schemas):\n raise ValueError(\n f\"Values has length {len(values)} but schemas \" f\"has length {len(self.schemas)}!\"\n )\n values = [m(v) for m, v in zip(self.schemas, values)]\n return list(values)\n\n @classmethod\n def flatten(cls, obj):\n res = [flatten_to_tuple(k) for k in obj]\n values, sizes = cls._concat([k[0] for k in res])\n return values, cls([k[1] for k in res], sizes)\n\n\n@dataclass\nclass TupleSchema(ListSchema):\n def __call__(self, values):\n return tuple(super().__call__(values))\n\n\n@dataclass\nclass IdentitySchema(Schema):\n def __call__(self, values):\n return values[0]\n\n @classmethod\n def flatten(cls, obj):\n return (obj,), cls()\n\n\n@dataclass\nclass DictSchema(ListSchema):\n keys: List[str]\n\n def __call__(self, values):\n values = super().__call__(values)\n return dict(zip(self.keys, values))\n\n @classmethod\n def flatten(cls, obj):\n for k in obj.keys():\n if not isinstance(k, str):\n raise KeyError(\"Only support flattening dictionaries if keys are str.\")\n keys = sorted(obj.keys())\n values = [obj[k] for k in keys]\n ret, schema = ListSchema.flatten(values)\n return ret, cls(schema.schemas, schema.sizes, keys)\n\n\n@dataclass\nclass InstancesSchema(DictSchema):\n def __call__(self, values):\n image_size, fields = values[-1], values[:-1]\n fields = super().__call__(fields)\n return Instances(image_size, **fields)\n\n @classmethod\n def flatten(cls, obj):\n ret, schema = super().flatten(obj.get_fields())\n size = obj.image_size\n if not isinstance(size, torch.Tensor):\n size = torch.tensor(size)\n return ret + (size,), schema\n\n\n@dataclass\nclass TensorWrapSchema(Schema):\n \"\"\"\n For classes that are simple wrapper of tensors, e.g.\n Boxes, RotatedBoxes, BitMasks\n \"\"\"\n\n class_name: str\n\n def __call__(self, values):\n return locate(self.class_name)(values[0])\n\n @classmethod\n def flatten(cls, obj):\n return (obj.tensor,), cls(_convert_target_to_string(type(obj)))\n\n\n# if more custom structures needed in the future, can allow\n# passing in extra schemas for custom types\ndef flatten_to_tuple(obj):\n \"\"\"\n Flatten an object so it can be used for PyTorch tracing.\n Also returns how to rebuild the original object from the flattened outputs.\n\n Returns:\n res (tuple): the flattened results that can be used as tracing outputs\n schema: an object with a ``__call__`` method such that ``schema(res) == obj``.\n It is a pure dataclass that can be serialized.\n \"\"\"\n schemas = [\n ((str, bytes), IdentitySchema),\n (list, ListSchema),\n (tuple, TupleSchema),\n (collections.abc.Mapping, DictSchema),\n (Instances, InstancesSchema),\n (Boxes, TensorWrapSchema),\n ]\n for klass, schema in schemas:\n if isinstance(obj, klass):\n F = schema\n break\n else:\n F = IdentitySchema\n\n return F.flatten(obj)\n\n\nclass TracingAdapter(nn.Module):\n \"\"\"\n A model may take rich input/output format (e.g. dict or custom classes).\n This adapter flattens input/output format of a model so it becomes traceable.\n\n It also records the necessary schema to rebuild model's inputs/outputs from flattened\n inputs/outputs.\n\n Example:\n\n ::\n outputs = model(inputs) # inputs/outputs may be rich structure\n adapter = TracingAdapter(model, inputs)\n\n # can now trace the model, with adapter.flattened_inputs, or another\n # tuple of tensors with the same length and meaning\n traced = torch.jit.trace(adapter, adapter.flattened_inputs)\n\n # traced model can only produce flattened outputs (tuple of tensors)\n flattened_outputs = traced(*adapter.flattened_inputs)\n # adapter knows the schema to convert it back (new_outputs == outputs)\n new_outputs = adapter.outputs_schema(flattened_outputs)\n \"\"\"\n\n flattened_inputs: Tuple[torch.Tensor] = None\n \"\"\"\n Flattened version of inputs given to this class's constructor.\n \"\"\"\n\n inputs_schema: Schema = None\n \"\"\"\n Schema of the inputs given to this class's constructor.\n \"\"\"\n\n outputs_schema: Schema = None\n \"\"\"\n Schema of the output produced by calling the given model with inputs.\n \"\"\"\n\n def __init__(self, model: nn.Module, inputs, inference_func: Optional[Callable] = None):\n \"\"\"\n Args:\n model: an nn.Module\n inputs: An input argument or a tuple of input arguments used to call model.\n After flattening, it has to only consist of tensors.\n inference_func: a callable that takes (model, *inputs), calls the\n model with inputs, and return outputs. By default it\n is ``lambda model, *inputs: model(*inputs)``. Can be override\n if you need to call the model differently.\n \"\"\"\n super().__init__()\n if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):\n model = model.module\n self.model = model\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n self.inputs = inputs\n\n if inference_func is None:\n inference_func = lambda model, *inputs: model(*inputs) # noqa\n self.inference_func = inference_func\n\n self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs)\n for input in self.flattened_inputs:\n if not isinstance(input, torch.Tensor):\n raise ValueError(\n f\"Inputs for tracing must only contain tensors. Got a {type(input)} instead.\"\n )\n\n def forward(self, *args: torch.Tensor):\n with torch.no_grad(), patch_builtin_len():\n inputs_orig_format = self.inputs_schema(args)\n outputs = self.inference_func(self.model, *inputs_orig_format)\n flattened_outputs, schema = flatten_to_tuple(outputs)\n if self.outputs_schema is None:\n self.outputs_schema = schema\n else:\n assert (\n self.outputs_schema == schema\n ), \"Model should always return outputs with the same structure so it can be traced!\"\n return flattened_outputs\n\n def _create_wrapper(self, traced_model):\n \"\"\"\n Return a function that has an input/output interface the same as the\n original model, but it calls the given traced model under the hood.\n \"\"\"\n\n def forward(*args):\n flattened_inputs, _ = flatten_to_tuple(args)\n flattened_outputs = traced_model(*flattened_inputs)\n return self.outputs_schema(flattened_outputs)\n\n return forward\n"
]
| [
[
"torch.no_grad",
"torch.tensor"
]
]
|
ymetz/local_interpretability | [
"3b230a425687137f36e3b148f883af961375a57d"
]
| [
"project/tcav_gpu_server/tcav/utils.py"
]
| [
"\"\"\"\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\"\"\"Collects utility functions for TCAV.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\n\ndef create_session(timeout=10000, interactive=True):\n \"\"\"Create a tf session for the model.\n # This function is slight motification of code written by Alex Mordvintsev\n\n Args:\n timeout: tfutil param.\n\n Returns:\n TF session.\n \"\"\"\n graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.operation_timeout_in_ms = int(timeout * 1000)\n if interactive:\n return tf.InteractiveSession(graph=graph, config=config)\n else:\n return tf.Session(graph=graph, config=config)\n\n\ndef flatten(nested_list):\n \"\"\"Flatten a nested list.\"\"\"\n return [item for a_list in nested_list for item in a_list]\n\n\ndef process_what_to_run_expand(pairs_to_test,\n random_counterpart,\n num_random_exp=100,\n random_concepts=None):\n \"\"\"Get concept vs. random or random vs. random pairs to run.\n\n Given set of target, list of concept pairs, expand them to include\n random pairs. For instance [(t1, [c1, c2])...] becomes\n [(t1, [c1, random1],\n (t1, [c1, random2],...\n (t1, [c2, random1],\n (t1, [c2, random2],...]\n\n Args:\n pairs_to_test: [(target, [concept1, concept2,...]),...]\n random_counterpart: random concept that will be compared to the concept.\n num_random_exp: number of random experiments to run against each concept.\n random_concepts: A list of names of random concepts for the random\n experiments to draw from. Optional, if not provided, the\n names will be random500_{i} for i in num_random_exp.\n\n Returns:\n all_concepts: unique set of targets/concepts\n new_pairs_to_test: expanded\n \"\"\"\n\n def get_random_concept(i):\n return (random_concepts[i] if random_concepts\n else 'random500_{}'.format(i))\n\n new_pairs_to_test = []\n for (target, concept_set) in pairs_to_test:\n new_pairs_to_test_t = []\n # if only one element was given, this is to test with random.\n if len(concept_set) == 1:\n i = 0\n while len(new_pairs_to_test_t) < min(100, num_random_exp):\n # make sure that we are not comparing the same thing to each other.\n if concept_set[0] != get_random_concept(\n i) and random_counterpart != get_random_concept(i):\n new_pairs_to_test_t.append(\n (target, [concept_set[0], get_random_concept(i)]))\n i += 1\n elif len(concept_set) > 1:\n new_pairs_to_test_t.append((target, concept_set))\n else:\n tf.logging.info('PAIR NOT PROCCESSED')\n new_pairs_to_test.extend(new_pairs_to_test_t)\n\n all_concepts = list(set(flatten([cs + [tc] for tc, cs in new_pairs_to_test])))\n\n return all_concepts, new_pairs_to_test\n\n\ndef process_what_to_run_concepts(pairs_to_test):\n \"\"\"Process concepts and pairs to test.\n\n Args:\n pairs_to_test: a list of concepts to be tested and a target (e.g,\n [ (\"target1\", [\"concept1\", \"concept2\", \"concept3\"]),...])\n\n Returns:\n return pairs to test:\n target1, concept1\n target1, concept2\n ...\n target2, concept1\n target2, concept2\n ...\n\n \"\"\"\n\n pairs_for_sstesting = []\n # prepare pairs for concpet vs random.\n for pair in pairs_to_test:\n for concept in pair[1]:\n pairs_for_sstesting.append([pair[0], [concept]])\n return pairs_for_sstesting\n\n\ndef process_what_to_run_randoms(pairs_to_test, random_counterpart):\n \"\"\"Process concepts and pairs to test.\n\n Args:\n pairs_to_test: a list of concepts to be tested and a target (e.g,\n [ (\"target1\", [\"concept1\", \"concept2\", \"concept3\"]),...])\n random_counterpart: random concept that will be compared to the concept.\n\n Returns:\n return pairs to test:\n target1, random_counterpart,\n target2, random_counterpart,\n ...\n \"\"\"\n # prepare pairs for random vs random.\n pairs_for_sstesting_random = []\n targets = list(set([pair[0] for pair in pairs_to_test]))\n for target in targets:\n pairs_for_sstesting_random.append([target, [random_counterpart]])\n return pairs_for_sstesting_random\n\n\n# helper functions to write summary files\ndef print_results(results, should_print=True, class_id=-1, result_dict=None):\n \"\"\"Helper function to organize results.\n\n Args:\n results: dictionary of results from TCAV runs.\n should_print: Whether we want output to the standard output or just the dictionary\n class_id: target class to identify\n result_dict: Dictionary collecting results for different classes\n \"\"\"\n result_summary = {'random': []}\n for result in results:\n if 'random_images' == result['cav_concept'] or 'random500' in result['cav_concept']:\n result_summary['random'].append(result)\n else:\n if result['cav_concept'] not in result_summary:\n result_summary[result['cav_concept']] = []\n result_summary[result['cav_concept']].append(result) \n bottlenecks = list(set([item['bottleneck'] for item in results]))\n bottleneck_random_i_ups = {}\n for bottleneck in bottlenecks:\n bottleneck_random_i_ups[bottleneck] = [item['i_up'] for item in result_summary['random']]\n \n result_dict[class_id] = []\n for concept in result_summary:\n if 'random' is not concept:\n for bottleneck in bottlenecks:\n i_ups = [item['i_up'] for item in result_summary[concept] if item['bottleneck'] is bottleneck]\n if should_print:\n print('%s: %s: TCAV score: %.2f (+- %.2f), random was %.2f' % (\n concept, bottleneck, np.mean(i_ups), np.std(i_ups), np.mean(bottleneck_random_i_ups[bottleneck])))\n result_dict[class_id].append({'concept': concept, 'bottleneck': bottleneck ,'score': np.mean(i_ups), 'std': np.std(i_ups), 'random_score': np.mean(bottleneck_random_i_ups[bottleneck])})\n \n return result_dict\n\n\ndef make_dir_if_not_exists(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)\n"
]
| [
[
"tensorflow.Graph",
"tensorflow.gfile.Exists",
"tensorflow.Session",
"tensorflow.logging.info",
"numpy.mean",
"tensorflow.ConfigProto",
"tensorflow.gfile.MakeDirs",
"numpy.std",
"tensorflow.InteractiveSession"
]
]
|
clair513/DIY | [
"843770590a729c6aabf63367a3ab848e21ab78b9"
]
| [
"Fashion MNIST Classification/ann_fashion_mnist.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nANN Implementation on Fashion MNIST [ Source: Kaggle (https://www.kaggle.com/zalando-research/fashionmnist) ] using TensorFlow 2.0.0 on CPU.\r\nTraining dataset contains 60,000 image/records & Testing dataset contains additional 10,000 records. Dataset has 10 different label/classes of 28*28 grayscale images for Classification.\r\nModel attains around 94.13% accuracy on Training dataset, whereas succumbs to 89.87% accuracy on Testing dataset.\r\n\"\"\"\r\n\r\n# Importing external dependencies:\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.datasets import fashion_mnist\r\n\r\n\r\n# Loading Dataset:\r\n(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()\r\n\r\n\r\n# Pre-processing training data (Normalizing to [0,1] by dividing by max pixels[255] + Vector conversion by reshaping) for faster processing:\r\nX_train = X_train/255.0\r\nX_test = X_test/255.0\r\n\r\nX_train = X_train.reshape(-1, 28*28) #Grayscale images in our data are in 28*28 shape\r\nX_test = X_test.reshape(-1, 28*28)\r\n\r\n\r\n# Compiling fully-connected ANN Model:\r\nann = tf.keras.models.Sequential() #Initializing our model.\r\n\r\nann.add(tf.keras.layers.Dense(units=256, activation='relu', input_shape=(784,))) #First Layer\r\nann.add(tf.keras.layers.Dropout(0.25)) #First layer regularization to avoid overfitting during backpropagation\r\n\r\nann.add(tf.keras.layers.Dense(units=128, activation='relu')) #Second layer\r\nann.add(tf.keras.layers.Dropout(0.20)) #Second layer regularization\r\n\r\nann.add(tf.keras.layers.Dense(units=64, activation='relu')) #Third layer\r\n\r\nann.add(tf.keras.layers.Dense(units=10, activation='softmax')) #Final layer with units representing our num of label/classes to be predicted\r\n\r\nann.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) #Chosen 'loss' function suitable only for 2+ classes\r\n\r\n\r\n# Overview of model architecture:\r\nprint(ann.summary())\r\n\r\n\r\n# Training & Evaluation of our model:\r\nann.fit(X_train, y_train, epochs=80)\r\ntest_loss, test_accuracy = ann.evaluate(X_test, y_test)\r\nprint(f\"Test data Accuracy: {test_accuracy}\")\r\n\r\n\r\n# Saving our Model architecture & network weights:\r\nwith open('fashion_mnist_ann.json', 'w') as json_file:\r\n json_file.write(ann.to_json())\r\n\r\nann.save_weights('fashion_mnist_ann_weights.h5')\r\n"
]
| [
[
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.datasets.fashion_mnist.load_data",
"tensorflow.keras.layers.Dense"
]
]
|
nlp-greyfoss/metagrad | [
"0f32f177ced1478f0c75ad37bace9a9fc4044ba3"
]
| [
"tests/tensor/test_index_fill.py"
]
| [
"import numpy as np\n\nfrom metagrad.tensor import Tensor\n\n\ndef test_simple_index_fill():\n x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\n index = Tensor([0, 2])\n x.index_fill_(1, index, -1)\n\n assert x.data.tolist() == [[-1., 2., - 1.],\n [-1., 5., - 1.],\n [-1., 8., - 1.]]\n\n\ndef test_index_fill():\n x = Tensor.ones(10)\n # [1 3 5 7]\n index = Tensor(np.arange(1, 9, 2))\n # 将 1 3 5 7处的值置为-1\n x.index_fill_(0, index, -1)\n assert x.data.tolist() == [1., -1., 1., -1., 1., -1., 1., -1., 1., 1.]\n"
]
| [
[
"numpy.arange"
]
]
|
YP-Learning/streamlit-fcc | [
"e5c34d195f51a29d066cfdc9b7b2e08e4424ab36"
]
| [
"project8/model.py"
]
| [
"import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom joblib import dump\n\npenguins = pd.read_csv(\"./penguins_cleaned.csv\")\n\ndf = penguins.copy()\ntarget = 'species'\nencode = [ 'sex', 'island' ]\ntarget_mapper = { 'Adelie': 0, 'Chinstrap': 1, 'Gentoo': 2 }\n\nfor col in encode:\n dummy = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df,dummy], axis=1)\n del df[col]\n\ndef target_encode(val):\n return target_mapper[val]\n\ndf['species'] = df['species'].apply(target_encode)\n\nX = df.drop(\"species\", axis=1)\ny = df[\"species\"]\n\nclf = RandomForestClassifier()\nclf.fit(X, y)\n\ndump(clf, \"model.joblib\")\n"
]
| [
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.concat",
"pandas.read_csv",
"pandas.get_dummies"
]
]
|
kenblikylee/kcrawler | [
"1516cd74bc879f52c9cd29eb48f0763265fe4a02"
]
| [
"kcrawler/juejin/books.py"
]
| [
"#!/usr/local/bin/python\n\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport argparse\nimport time\n\nheaders = {\n 'Origin': 'https://juejin.im',\n 'Referer': 'https://juejin.im/books',\n 'Sec-Fetch-Mode': 'cors',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36'\n}\n\ndef decode_url(url):\n adr, query = url.split(\"?\")\n params = { kv.split(\"=\")[0]:kv.split(\"=\")[1] for kv in query.split(\"&\")}\n return adr, params\n\ndef encode_url(url, params):\n query = \"&\".join([\"{}={}\".format(k, v) for k, v in params.items()])\n return \"{}?{}\".format(url, query)\n\ndef get_url(alias, page, url=None):\n if url is None:\n url = 'https://xiaoce-timeline-api-ms.juejin.im/v1/getListByLastTime?uid=5bd2b8b25188252a784d19d7&client_id=1567748420039&token=eyJhY2Nlc3NfdG9rZW4iOiJTTHVPcVRGQ1BseWdTZHF4IiwicmVmcmVzaF90b2tlbiI6ImJHZkJDVDlWQkZiQUNMdTYiLCJ0b2tlbl90eXBlIjoibWFjIiwiZXhwaXJlX2luIjoyNTkyMDAwfQ%3D%3D&src=web&alias=&pageNum=1'\n base_url, query = decode_url(url)\n query['alias'] = alias\n query['pageNum'] = str(page)\n return encode_url(base_url, query)\n\ndef get_allbooks(alias='', url=None):\n page = 1\n allbooks = []\n while True:\n books = get_books(alias, page, url)\n if books is None or (len(books) == 0):\n print('\\ncrawled {} records for {} in total.'.format(len(allbooks), alias if alias != '' else 'all'))\n break\n page += 1\n allbooks += books\n return allbooks\n\ndef get_books(alias='', page=1, url=None):\n url = get_url(alias, page, url)\n res = requests.get(url, headers=headers)\n if res.status_code != 200:\n print('数据获取失败,请尝试提供 url 参数!')\n return None\n json_data = res.json()\n if not 'd' in json_data:\n return None\n book_list = json_data['d']\n return extract_data_from_book_list(book_list)\n\ndef extract_data_from_book_list(book_list):\n return [(book['userData']['username'], # 作者\n book['userData']['company'], # 作者所在公司\n book['userData']['jobTitle'], # 作者职业\n book['profile'], # 作者头衔\n book['title'], # 小册标题\n book['price'], # 小册价格\n book['buyCount'], # 购买数量\n book['pv'], # pv\n book['lastSectionCount'], # 最新章节数\n book['contentSize'], # 内容长度\n book['desc'], # 描述\n book['createdAt'], # 小册创建时间\n book['finishedAt'], # 小册完成时间\n book['updatedAt'], # 小册更新时间\n ) for book in book_list]\n\ndef get_allcates(url=None):\n allbooks = []\n insert_cate = lambda c, l: [(c, *t) for t in l]\n for c, t in [('frontend', '前端'),\n ('backend', '后端'),\n ('mobile', '移动开发'),\n ('blockchain', '区块链'),\n ('general', '通用')]:\n allbooks += insert_cate(t, get_allbooks(c, url=url))\n return allbooks\n\ndef analysis(allbooks):\n df = pd.DataFrame(allbooks, columns=['分类',\n '作者',\n '公司',\n '职业',\n '头衔',\n '小册',\n '价格',\n '购买数量',\n '访问量',\n '章节数',\n '字数',\n '介绍',\n '创建时间',\n '完成时间',\n '更新时间'])\n totalAmount = df['价格'] * df['购买数量']\n df.insert(8, '销售收入', totalAmount)\n\n dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n\n csv_file = 'juejin_books_{}.csv'.format(dt)\n df.to_csv(csv_file, index=False)\n print('\\nsaved to csvfile {}.\\n'.format(csv_file))\n\n excel_file = 'juejin_books_{}.xls'.format(dt)\n df.to_excel(excel_file, sheet_name=dt, index=False)\n print('\\nsaved to excel {}.\\n'.format(excel_file))\n\n try:\n df_cate_ave_price = df.groupby('分类').mean()[['价格']]\n df_cate_sum_buy = df.groupby('分类').sum()[['购买数量']]\n df_cate_sum_sales = df.groupby('分类').sum()[['销售收入']]\n \n price_average = df['价格'].mean()\n buy_total = df['购买数量'].sum()\n sales_total = df['销售收入'].sum()\n\n fig, axs = plt.subplots(1, 3, figsize=(12, 4))\n\n ax = df_cate_ave_price.plot(ax=axs[0], table=df_cate_ave_price.T.applymap(lambda x: format(round(x), ',')), kind='bar', grid=True)\n ax.set_title('小册平均价格 {}元'.format(round(price_average)))\n ax.get_xaxis().set_visible(False)\n \n ax = df_cate_sum_buy.plot(ax=axs[1], table=df_cate_sum_buy.T.applymap(lambda x: format(round(x), ',')), kind='bar', grid=True)\n ax.set_title('总购买数 {}'.format(format(buy_total, ',')))\n ax.get_xaxis().set_visible(False)\n \n ax = df_cate_sum_sales.plot(ax=axs[2], table=df_cate_sum_sales.T.applymap(lambda x: format(round(x), ',')), kind='bar', grid=True)\n ax.set_title('总销售收入 {}元'.format(format(int(round(sales_total)), ',')))\n ax.get_xaxis().set_visible(False)\n \n fig.tight_layout()\n plt.subplots_adjust(bottom=0.1)\n plt.show()\n except:\n pass\n return df\n\ndef get_cli_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--url', type=str, help='url.')\n args = parser.parse_args()\n return args\n\ndef main(args):\n url = None\n if 'url' in args:\n url = args['url']\n analysis(get_allcates(url))\n\nif __name__ == \"__main__\":\n args = get_cli_args()\n analysis(get_allcates(args.url))\n"
]
| [
[
"pandas.DataFrame",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
]
|
naturalis/FormicID | [
"68f257da56192e044deb192a3f26f25096617a0f"
]
| [
"formicID/AntWeb/AW2_to_json.py"
]
| [
"###############################################################################\n# __ _ ___ ____ #\n# / _| ___ _ __ _ __ ___ (_) ___|_ _| _ \\ #\n# | |_ / _ \\| '__| '_ ` _ \\| |/ __|| || | | | #\n# | _| (_) | | | | | | | | | (__ | || |_| | #\n# |_| \\___/|_| |_| |_| |_|_|\\___|___|____/ #\n# #\n# ANTWEB API v2 #\n# AntWeb to json #\n###############################################################################\n\"\"\"Description:\nThese functions are for accessing the AntWeb servers, exploiting the API\nversion 2. It is possible to download the most imaged species list and the all\nspecimen information for these species stored in json files.\n\"\"\"\n\n# Packages\n###############################################################################\n\n# Standard library imports\nimport logging\nimport os\nimport re\nimport sys\nimport time\nfrom itertools import islice\nfrom urllib.error import HTTPError\nfrom urllib.request import urlopen\n\n# Data tools imports\nimport json\nimport pandas as pd\nfrom csv import Sniffer\n\n# Additional project imports\nimport requests\nfrom tqdm import tqdm\n\n# Extract most imaged species from AntWeb\n###############################################################################\n\n\ndef _get_species_name_from_line(htmlline):\n \"\"\"Inside a htmlline, return the genus and species name of a specimen.\n\n Args:\n htmlline (str): A string of html, should be from AntWeb.\n\n Returns:\n str: Genus and species\n\n \"\"\"\n a = \"?genus=\"\n b = \"&species=\"\n genus = htmlline.split(a)[-1].split(b)[0]\n c = \"species=\"\n d = \"&rank=\"\n species = htmlline.split(c)[-1].split(d)[0]\n return genus, species\n\n\ndef _get_relevant_lines_from_html(url, min_images):\n \"\"\"From the AntWeb html code get lines with relevant code for most imaged\n species extraction. Relevant lines are those containing the string:\n \"list_extras images\" and with images over `min_images`.\n\n Args:\n url (str): An url, should be from AntWeb.org.\n min_images (int): Defines the minimum number of images per species to\n download.\n Returns:\n list: A list of strings; html lines.\n\n \"\"\"\n htmldata = requests.get(url)\n htmldata.text\n lines = []\n string = \"list_extras images\"\n for line in tqdm(\n htmldata.iter_lines(decode_unicode=\"utf-8\"),\n desc=\"Reading HTML lines\",\n unit=\" lines\",\n ):\n if line:\n if string in line:\n if re.findall(\"\\d+\", line):\n nb_images = int(re.search(r\"\\d+\", line).group())\n if nb_images >= min_images:\n lines.append(line)\n return lines\n\n\ndef most_imaged_species_to_csv(output, min_images=100):\n \"\"\"Create a list with the most imaged species. However there is a problem,\n as some specimens have lots of close-up pictures, e.g. for genetelia (see\n https://www.antweb.org/specimenImages.do?name=antweb1008499). These\n specimens show much more images than the standard 3 (dorsal, head,\n profile) and therefore the list will be wrong. I have yet to find a good\n function to get a most imaged species list.\n\n Args:\n output (str): the path or name the output csv file.\n min_images (int): Defines the minimum number of images per species to\n download. Defaults to 100.\n\n Returns:\n csv file with genus and species names in 2 columns\n\n \"\"\"\n url = \"https://www.antweb.org/taxonomicPage.do?rank=species&project=allantwebants&statusSetSize=max&statusSet=valid%20extant&statusSet=typed\"\n relevant_lines = _get_relevant_lines_from_html(url, min_images)\n # print(relevant_lines)\n rows = []\n for line in relevant_lines:\n nb_images = int(re.search(r\"\\d+\", line).group())\n genus, species = _get_species_name_from_line(line)\n row = [genus, species, nb_images]\n rows.append(row)\n df = pd.DataFrame(rows, columns=(\"genus\", \"species\", \"nb_images\"))\n df.to_csv(os.path.join(\"data\", output), sep=\",\", index=False)\n\n\n# Creating an URL\n###############################################################################\n\n\ndef _create_url(limit, offset, **kwargs):\n \"\"\"Creation of the url to access AntWebs API V2, using a base_url and\n arguments.\n\n Args:\n limit (int): sets the limit for accessing specimens.\n offset (int): sets the offset for accessing specimens.\n\n **Kwargs:\n genus (str): specifies the genus.\n species (str): specifies the species.\n country (str): specifies the country.\n caste (str): specifies the caste (does not work in API v2).\n\n Returns:\n URL object: Returns an URL as response object that can be opened by the\n function `request.get()`.\n\n Raises:\n TypeError: In case of an invalid kwarg.\n\n \"\"\"\n allowed_kwargs = {\"genus\", \"species\", \"country\", \"caste\"}\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError(\n \"Unexpected keyword argument passed to \"\n \"_create_url: {}\".format(str(k))\n )\n\n genus = kwargs.get(\"genus\", None)\n species = kwargs.get(\"species\", None)\n country = kwargs.get(\"country\", None)\n caste = kwargs.get(\"caste\", None)\n base_url = \"http://www.antweb.org/api/v2/?\"\n arguments = {\n \"limit\": limit,\n \"offset\": offset,\n \"genus\": genus,\n \"species\": species,\n \"country\": country,\n \"caste\": caste, # not working\n }\n url = requests.get(url=base_url, params=arguments)\n return url\n\n\n# Download JSON files from URLs\n###############################################################################\n\n\ndef _get_json(input_url):\n \"\"\"Scrapes JSON files from AntWeb URLs.\n\n Args:\n input_url (URL object): an URL containing a JSON object.\n\n Returns:\n JSON: A JSON object.\n\n Raises:\n AssertionError: If the json object contains nothing.\n\n \"\"\"\n r = requests.get(url=input_url)\n data = r.json()\n if data != None:\n return data\n\n else:\n raise AssertionError(\n \"There is no JSON data in the url: {0}.\".format(input_url.url)\n )\n\n\ndef urls_to_json(\n csv_file, dataset_name, n_jsonfiles=None, offset_set=0, limit_set=9999\n):\n \"\"\"This function downloads JSON files for a list of species and places them\n in a drecitory. An limit_set higher than 10,000 will usually create\n problems if no species and genus is provided. If you get HTTP ERROR 500\n you will probably need to set the limit lower.\n\n Args:\n csv_file (str): The csv file with genus and species names.\n dataset_name (str): Name for the dataset, and also for naming the\n directory that will hold this dataset. The JSON files will be\n saved here.\n n_jsonfiles (int): Set the number of jsonfiles you want to process.\n Usually this will to be the same number as species you are\n processing. If `None`, all jsonfiles will be processed. Defaults\n to `None.`\n offset_set (int): The offset for downloading AntWeb records in\n batches. Defaults to `0`.\n limit_set (int): The limit for downloading a set of AntWeb records.\n Defaults to `9999`.\n\n Returns:\n A directory of JSON files for different species.\n\n Raises:\n ValueError: If `limit_set` is > 12,000.\n AssertionError: If `csv_file` is not a .csv file.\n AssertionError: If the csv file is not comma delimited.\n AssertionError: When the .csv does not have 2 columns.\n AssertionError: When the columns are not named correctly; `genus` and\n `species`.\n\n \"\"\"\n nb_indet = 0\n nb_invalid = 0\n attempts = 5\n # if limit_set > 12000:\n # raise ValueError('The `limit_set` should be lower than 12,000.')\n output_dir = os.path.join(\"data\", dataset_name, \"json_files\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n if csv_file.endswith(\".csv\") == True:\n csv_file = os.path.join(\"data\", csv_file)\n else:\n raise AssertionError(\n \"{0} is not in the correct format of `.csv`.\".format(csvfile)\n )\n\n logging.info(\n \"Reading {0} and creating json_files folder.\".format(csv_file)\n )\n with open(csv_file, \"rt\") as csv_open:\n dialect = Sniffer().sniff(csv_open.readline(), [\",\", \";\"])\n csv_open.seek(0)\n if dialect.delimiter == \";\":\n raise AssertionError(\n \"Please us a comma (,) delimited csv file \",\n \"instead of {0}.\".format(dialect.delimiter),\n )\n\n csv_df = pd.read_csv(csv_open, sep=\",\")\n if len(csv_df.columns) != 2:\n raise AssertionError(\n \"The `.csv` should only have 2 column \",\n \"instead of {0} column(s).\".format(len(csv_df.columns)),\n )\n\n if csv_df.columns.tolist() != [\"genus\", \"species\"]:\n raise AssertionError(\n \"The columns are not correctly named: \"\n \"{} and {}. The column headers should be \"\n \"column 1: `genus` and column 2: \"\n \"`species`.\".format(csv_df.columns.tolist())\n )\n\n for index, row in csv_df.iterrows():\n if row[\"species\"] == \"indet\":\n nb_indet += 1\n logging.info(\n \"{0} indet species found and will be skipped from \"\n \"downloading.\".format(nb_indet)\n )\n nb_specimens = csv_df.shape[0] - nb_indet\n if n_jsonfiles is not None:\n nb_specimens = n_jsonfiles\n\n for index, row in tqdm(\n islice(csv_df.iterrows(), 0, n_jsonfiles),\n total=nb_specimens,\n desc=\"Downloading species JSON files\",\n unit=\"Species\",\n ):\n url = _create_url(\n limit=limit_set,\n offset=offset_set,\n genus=row[\"genus\"],\n species=row[\"species\"],\n )\n # Skip `indet` species:\n if row[\"species\"] == \"indet\":\n logging.info('Skipped: \"{}\".'.format(url.url))\n # Download `non-indet` species:\n else:\n logging.info(\"Downloading JSON from: {0}\".format(url.url))\n file_name = row[\"genus\"] + \"_\" + row[\"species\"] + \".json\"\n for attempt in range(attempts):\n try:\n species = _get_json(url.url)\n if species[\"count\"] > 0:\n # TODO: fix line below. Stops checking after 2\n # json files are present.\n if not os.path.isfile(\n os.path.join(output_dir, file_name)\n ):\n with open(\n os.path.join(output_dir, file_name), \"w\"\n ) as jsonfile:\n json.dump(species, jsonfile)\n else:\n logging.info(\n \"JSON file for {0} {1} already exists \"\n \"and will not be downloaded \"\n \"again.\".format(\n row[\"genus\"], row[\"species\"]\n )\n )\n return\n\n # If server returns species with 0 specimen count:\n if species[\"count\"] == 0:\n nb_invalid += 1\n logging.info(\n '\"{0} {1}\" has {2} records or does not '\n \"exist as a valid species\".format(\n row[\"genus\"],\n row[\"species\"],\n species[\"count\"],\n )\n )\n\n except HTTPError as e:\n print(e)\n else:\n break\n\n else:\n logging.debug(\n \"For {0} attempts the server did not respond for \"\n \"URL: {1}\".format(attempts, url.url)\n )\n nb_downloaded = n_jsonfiles - nb_invalid\n logging.info(\n \"Downloading is finished. {} JSON files have been \"\n \"downloaded. With {} invalid name(s).\".format(\n nb_downloaded, nb_invalid\n )\n )\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv"
]
]
|
jianzhnie/MultimodalTransformer | [
"6cd4ca8034a53da361149745aecead68fbe304a0"
]
| [
"mmt/data/preprocessor/tabular/numeric_encoder.py"
]
| [
"'''\nAuthor: jianzhnie\nDate: 2021-11-12 15:40:06\nLastEditTime: 2022-02-24 16:25:19\nLastEditors: jianzhnie\nDescription:\n\n'''\n\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler\n\nfrom .base_preprocessor import BasePreprocessor, check_is_fitted\n\n\nclass NumericalFeatureTransformer(BasePreprocessor):\n \"\"\"\n CONTINUOUS_TRANSFORMS = {\n \"quantile_uniform\": {\n \"callable\": QuantileTransformer,\n \"params\": dict(output_distribution=\"uniform\", random_state=42),\n },\n \"quantile_normal\": {\n \"callable\": QuantileTransformer,\n \"params\": dict(output_distribution=\"normal\", random_state=42),\n },\n \"box_cox\": {\n \"callable\": PowerTransformer,\n \"params\": dict(method=\"box-cox\", standardize=True),\n },\n \"yeo_johnson\": {\n \"callable\": PowerTransformer,\n \"params\": dict(method=\"yeo-johnson\", standardize=True),\n },\n \"nomalize\": {\n \"callable\": StandardScaler,\n \"params\": dict(with_mean=True, with_std=True),\n }\n }\n \"\"\"\n\n CONTINUOUS_TRANSFORMS = {\n 'quantile_uniform': {\n 'callable': QuantileTransformer,\n 'params': dict(output_distribution='uniform', random_state=42),\n },\n 'quantile_normal': {\n 'callable': QuantileTransformer,\n 'params': dict(output_distribution='normal', random_state=42),\n },\n 'box_cox': {\n 'callable': PowerTransformer,\n 'params': dict(method='box-cox', standardize=True),\n },\n 'yeo_johnson': {\n 'callable': PowerTransformer,\n 'params': dict(method='yeo-johnson', standardize=True),\n },\n 'nomalize': {\n 'callable': StandardScaler,\n 'params': dict(with_mean=True, with_std=True),\n }\n }\n\n def __init__(self,\n numerical_cols: List[str] = None,\n numerical_transformer_method: str = None,\n handle_na: bool = True):\n super(NumericalFeatureTransformer, self).__init__()\n\n self.numerical_cols = numerical_cols\n self.numerical_transformer_method = numerical_transformer_method\n self.handle_na = handle_na\n self.is_fitted = False\n\n def fit(self, df: pd.DataFrame) -> BasePreprocessor:\n if (self.numerical_transformer_method\n is not None) and (len(self.numerical_cols) > 0):\n transform = self.CONTINUOUS_TRANSFORMS[\n self.numerical_transformer_method]\n self.numerical_transformer = transform['callable'](\n **transform['params'])\n df_cont = self._prepare_continuous(df)\n self.transformer = self.numerical_transformer.fit(df_cont)\n self.is_fitted = True\n return self\n\n def transform(self, df: pd.DataFrame) -> np.ndarray:\n \"\"\"Returns the processed ``dataframe`` as a np.ndarray.\"\"\"\n check_is_fitted(self, condition=self.is_fitted)\n if self.numerical_cols is not None:\n df_cont = self._prepare_continuous(df)\n if self.numerical_transformer_method:\n df_cont[self.numerical_cols] = self.transformer.transform(\n df_cont)\n\n self.column_idx = {k: v for v, k in enumerate(df_cont.columns)}\n return df_cont.values\n\n def inverse_transform(self, encoded: np.ndarray) -> pd.DataFrame:\n r\"\"\"Takes as input the output from the ``transform`` method and it will\n return the original values.\n\n Parameters\n ----------\n encoded: np.ndarray\n array with the output of the ``transform`` method\n \"\"\"\n decoded = pd.DataFrame(encoded, columns=self.column_idx.keys())\n try:\n decoded[self.numerical_cols] = self.transformer.inverse_transform(\n decoded[self.numerical_cols])\n except AttributeError:\n pass\n return decoded\n\n def fit_transform(self, df: pd.DataFrame) -> np.ndarray:\n \"\"\"Combines ``fit`` and ``transform``\"\"\"\n return self.fit(df).transform(df)\n\n def _prepare_continuous(self, df: pd.DataFrame) -> pd.DataFrame:\n df = df.copy()[self.numerical_cols]\n df[self.numerical_cols] = df[self.numerical_cols].astype(float)\n if self.handle_na:\n df[self.numerical_cols] = df[self.numerical_cols].fillna(\n dict(df[self.numerical_cols].median()), inplace=False)\n return df\n\n\nif __name__ == '__main__':\n import pandas as pd\n df = pd.read_csv(\n '/media/robin/DATA/datatsets/structure_data/titanic/Titanic.csv')\n cols = ['Fare', 'Age']\n print(df[cols])\n cat_feats = NumericalFeatureTransformer(\n numerical_cols=cols, numerical_transformer_method='quantile_uniform')\n full_data_transformed = cat_feats.fit_transform(df)\n print(full_data_transformed)\n df = cat_feats.inverse_transform(full_data_transformed)\n print(df)\n"
]
| [
[
"pandas.read_csv"
]
]
|
pFernbach/crocoddyl | [
"cbf81a329e3abaf4ce1b4a8fab1431f93cd9a5c8"
]
| [
"benchmark/quadrupedal_gaits_optctrl.py"
]
| [
"import crocoddyl\nfrom crocoddyl.utils.quadruped import SimpleQuadrupedalGaitProblem\nimport pinocchio\nimport example_robot_data\nimport numpy as np\nimport os\nimport sys\nimport time\nimport subprocess\n\nT = int(sys.argv[1]) if (len(sys.argv) > 1) else int(5e3) # number of trials\nMAXITER = 1\nCALLBACKS = False\nWALKING = 'walk' in sys.argv\nTROTTING = 'trot' in sys.argv\nPACING = 'pace' in sys.argv\nBOUNDING = 'bound' in sys.argv\nJUMPING = 'jump' in sys.argv\n\nGAIT = \"walking\" # 104 nodes\nif WALKING:\n print('running walking benchmark ...')\n GAIT = \"walking\" # 104 nodes\nif TROTTING:\n print('running trotting benchmark ...')\n GAIT = \"trotting\" # 54 nodes\nif PACING:\n print('running pacing benchmark ...')\n GAIT = \"pacing\" # 54 nodes\nif BOUNDING:\n print('running bounding benchmark ...')\n GAIT = \"bounding\" # 54 nodes\nif JUMPING:\n print('running jumping benchmark ...')\n GAIT = \"jumping\" # 61 nodes\n\n\ndef createProblem(gait_phase):\n robot_model = example_robot_data.loadHyQ().model\n lfFoot, rfFoot, lhFoot, rhFoot = 'lf_foot', 'rf_foot', 'lh_foot', 'rh_foot'\n gait = SimpleQuadrupedalGaitProblem(robot_model, lfFoot, rfFoot, lhFoot, rhFoot)\n q0 = robot_model.referenceConfigurations['standing'].copy()\n v0 = pinocchio.utils.zero(robot_model.nv)\n x0 = np.concatenate([q0, v0])\n\n type_of_gait = list(gait_phase.keys())[0]\n value = gait_phase[type_of_gait]\n if type_of_gait == 'walking':\n # Creating a walking problem\n problem = gait.createWalkingProblem(x0, value['stepLength'], value['stepHeight'], value['timeStep'],\n value['stepKnots'], value['supportKnots'])\n elif type_of_gait == 'trotting':\n # Creating a trotting problem\n problem = gait.createTrottingProblem(x0, value['stepLength'], value['stepHeight'], value['timeStep'],\n value['stepKnots'], value['supportKnots'])\n elif type_of_gait == 'pacing':\n # Creating a pacing problem\n problem = gait.createPacingProblem(x0, value['stepLength'], value['stepHeight'], value['timeStep'],\n value['stepKnots'], value['supportKnots'])\n elif type_of_gait == 'bounding':\n # Creating a bounding problem\n problem = gait.createBoundingProblem(x0, value['stepLength'], value['stepHeight'], value['timeStep'],\n value['stepKnots'], value['supportKnots'])\n elif type_of_gait == 'jumping':\n # Creating a jumping problem\n problem = gait.createJumpingProblem(x0, value['jumpHeight'], value['jumpLength'], value['timeStep'],\n value['groundKnots'], value['flyingKnots'])\n\n xs = [robot_model.defaultState] * (len(problem.runningModels) + 1)\n us = problem.quasiStatic([robot_model.defaultState] * problem.T)\n return xs, us, problem\n\n\ndef runDDPSolveBenchmark(xs, us, problem):\n ddp = crocoddyl.SolverFDDP(problem)\n if CALLBACKS:\n ddp.setCallbacks([crocoddyl.CallbackVerbose()])\n duration = []\n for _ in range(T):\n c_start = time.time()\n ddp.solve(xs, us, MAXITER, False, 0.1)\n c_end = time.time()\n duration.append(1e3 * (c_end - c_start))\n\n avrg_duration = sum(duration) / len(duration)\n min_duration = min(duration)\n max_duration = max(duration)\n return avrg_duration, min_duration, max_duration\n\n\ndef runShootingProblemCalcBenchmark(xs, us, problem):\n duration = []\n for _ in range(T):\n c_start = time.time()\n problem.calc(xs, us)\n c_end = time.time()\n duration.append(1e3 * (c_end - c_start))\n\n avrg_duration = sum(duration) / len(duration)\n min_duration = min(duration)\n max_duration = max(duration)\n return avrg_duration, min_duration, max_duration\n\n\ndef runShootingProblemCalcDiffBenchmark(xs, us, problem):\n duration = []\n for i in range(T):\n c_start = time.time()\n problem.calcDiff(xs, us)\n c_end = time.time()\n duration.append(1e3 * (c_end - c_start))\n\n avrg_duration = sum(duration) / len(duration)\n min_duration = min(duration)\n max_duration = max(duration)\n return avrg_duration, min_duration, max_duration\n\n\n# Setting up all tasks\nif GAIT == 'walking':\n GAITPHASE = {\n 'walking': {\n 'stepLength': 0.25,\n 'stepHeight': 0.25,\n 'timeStep': 1e-2,\n 'stepKnots': 25,\n 'supportKnots': 2\n }\n }\nelif GAIT == 'trotting':\n GAITPHASE = {\n 'trotting': {\n 'stepLength': 0.15,\n 'stepHeight': 0.2,\n 'timeStep': 1e-2,\n 'stepKnots': 25,\n 'supportKnots': 2\n }\n }\nelif GAIT == 'pacing':\n GAITPHASE = {\n 'pacing': {\n 'stepLength': 0.15,\n 'stepHeight': 0.2,\n 'timeStep': 1e-2,\n 'stepKnots': 25,\n 'supportKnots': 2\n }\n }\nelif GAIT == 'bounding':\n GAITPHASE = {\n 'bounding': {\n 'stepLength': 0.007,\n 'stepHeight': 0.05,\n 'timeStep': 1e-2,\n 'stepKnots': 25,\n 'supportKnots': 12\n }\n }\nelif GAIT == 'jumping':\n GAITPHASE = {\n 'jumping': {\n 'jumpHeight': 0.15,\n 'jumpLength': [0.0, 0.3, 0.],\n 'timeStep': 1e-2,\n 'groundKnots': 10,\n 'flyingKnots': 20\n }\n }\n\nprint('\\033[1m')\nprint('C++:')\npopen = subprocess.check_call([os.path.dirname(os.path.abspath(__file__)) + \"/quadrupedal-gaits-optctrl\", str(T)])\n\nprint('Python bindings:')\nxs, us, problem = createProblem(GAITPHASE)\navrg_duration, min_duration, max_duration = runDDPSolveBenchmark(xs, us, problem)\nprint(' DDP.solve [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))\navrg_duration, min_duration, max_duration = runShootingProblemCalcBenchmark(xs, us, problem)\nprint(' ShootingProblem.calc [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))\navrg_duration, min_duration, max_duration = runShootingProblemCalcDiffBenchmark(xs, us, problem)\nprint(' ShootingProblem.calcDiff [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))\nprint('\\033[0m')\n"
]
| [
[
"numpy.concatenate"
]
]
|
desihub/imaginglss | [
"09258d20015869fead9bad6020da2bc0d161f670"
]
| [
"imaginglss/utils/mpl_aea.py"
]
| [
"\"\"\" \n Native matplotlib support of frequently used 2d projections,\n for looking up to the sky.\n\n This file is initially developed as part of skymapper by Peter Melchior\n based on the example in matplotlib.\n\n It is later adopted by me (Yu Feng), and I will maintain a copy in\n imaginglss for easier access, also because I do plan to clean up\n the function signatures and variable naming (breaking compatibility with\n old skymapper code).\n\n The current version adds the ability to generate equal area histograms\n on HealPix pixels.\n\n It does not depend on healpy, there is a minimal python implementation of \n healpix at the end of the file; imported in the javascript/lua style.\n \n The intention is one day we will submit a PR of this to matplotlib.\n\n What does not work:\n \n 1. Panning.\n 2. Color bar is sometimes in the wrong place\n 3. Label locations are poorly calculated.\n\n What does work:\n Evertying else.\n\n Author: Yu Feng \n Peter Melchior\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport matplotlib\nfrom matplotlib.axes import Axes\nfrom matplotlib.patches import Rectangle, Polygon\nfrom matplotlib.path import Path\nfrom matplotlib.collections import PolyCollection, TriMesh\nfrom matplotlib.tri.triangulation import Triangulation\n\nfrom matplotlib.ticker import NullLocator, Formatter, FixedLocator, MaxNLocator\nfrom matplotlib.transforms import Affine2D, BboxTransformTo, Transform, blended_transform_factory, Bbox\nfrom matplotlib.projections import register_projection\nimport matplotlib.spines as mspines\nimport matplotlib.axis as maxis\n\nimport numpy as np\n\n__author__ = \"Yu Feng\"\n__email__ = \"[email protected]\"\n\nclass SkymapperAxes(Axes):\n \"\"\"\n A base class for a Skymapper axes that takes in ra0, dec0, dec1, dec2.\n\n The base class takes care of clipping and interpolating with matplotlib.\n\n Subclass and override class method get_projection_class.\n\n \"\"\"\n # The subclass projection must specify a name. This will be used be the\n # user to select the projection.\n\n name = None\n\n @classmethod\n def get_projection_class(kls):\n raise NotImplementedError('Must implement this in subclass')\n\n def __init__(self, *args, **kwargs):\n self.ra0 = None\n self.dec0 = None\n self.dec1 = None\n self.dec2 = None\n\n Axes.__init__(self, *args, **kwargs)\n\n self.cla()\n\n def _init_axis(self):\n # Axes._init_axis() -- until HammerAxes.xaxis.cla() works.\n self.xaxis = maxis.XAxis(self)\n self.spines['bottom'].register_axis(self.xaxis)\n self.spines['top'].register_axis(self.xaxis)\n self.yaxis = maxis.YAxis(self)\n self.spines['left'].register_axis(self.yaxis)\n self.spines['right'].register_axis(self.yaxis)\n self._update_transScale()\n\n def cla(self):\n \"\"\"\n Override to set up some reasonable defaults.\n \"\"\"\n # Don't forget to call the base class\n Axes.cla(self)\n\n # Turn off minor ticking altogether\n self.xaxis.set_minor_locator(NullLocator())\n self.yaxis.set_minor_locator(NullLocator())\n\n self.xaxis.set_major_locator(MaxNLocator(5, prune='both'))\n self.yaxis.set_major_locator(MaxNLocator(5, prune='both'))\n\n # Do not display ticks -- we only want gridlines and text\n self.xaxis.set_ticks_position('none')\n self.yaxis.set_ticks_position('none')\n\n self.set_center(None, None)\n\n # FIXME: probabaly want to override autoscale_view\n # to properly handle wrapping introduced by margin\n # and properlty wrap data. \n # It doesn't make sense to have xwidth > 360. \n self._tight = True\n\n def _set_lim_and_transforms(self):\n \"\"\"\n This is called once when the plot is created to set up all the\n transforms for the data, text and grids.\n \"\"\"\n # There are three important coordinate spaces going on here:\n #\n # 1. Data space: The space of the data itself\n #\n # 2. Axes space: The unit rectangle (0, 0) to (1, 1)\n # covering the entire plot area.\n #\n # 3. Display space: The coordinates of the resulting image,\n # often in pixels or dpi/inch.\n\n # This function makes heavy use of the Transform classes in\n # ``lib/matplotlib/transforms.py.`` For more information, see\n # the inline documentation there.\n\n # The goal of the first two transformations is to get from the\n # data space (in this case meridian and parallel) to axes\n # space. It is separated into a non-affine and affine part so\n # that the non-affine part does not have to be recomputed when\n # a simple affine change to the figure has been made (such as\n # resizing the window or changing the dpi).\n\n # 1) The core transformation from data space into\n # rectilinear space defined in the HammerTransform class.\n self.transProjection = self.get_projection_class()()\n self.transProjection.set_center((180, 0))\n self.transProjection.set_dec1(-65)\n self.transProjection.set_dec2(80)\n\n # 2) The above has an output range that is not in the unit\n # rectangle, so scale and translate it so it fits correctly\n # within the axes. The peculiar calculations of xscale and\n # yscale are specific to a Aitoff-Hammer projection, so don't\n # worry about them too much.\n\n # This will be updated after the xy limits are set.\n self.transAffine = Affine2D()\n\n # 3) This is the transformation from axes space to display\n # space.\n self.transAxes = BboxTransformTo(self.bbox)\n\n # Now put these 3 transforms together -- from data all the way\n # to display coordinates. Using the '+' operator, these\n # transforms will be applied \"in order\". The transforms are\n # automatically simplified, if possible, by the underlying\n # transformation framework.\n self.transData = \\\n self.transProjection + \\\n self.transAffine + \\\n self.transAxes\n\n self.transClip = \\\n self.transProjection + \\\n self.transAffine\n\n # The main data transformation is set up. Now deal with\n # gridlines and tick labels.\n\n # Longitude gridlines and ticklabels. The input to these\n # transforms are in display space in x and axes space in y.\n # Therefore, the input values will be in range (-xmin, 0),\n # (xmax, 1). The goal of these transforms is to go from that\n # space to display space. The tick labels will be offset 4\n # pixels from the equator.\n self._xaxis_pretransform = \\\n Affine2D() \\\n .scale(1.0, 180) \\\n .translate(0.0, -90)\n\n self._xaxis_transform = \\\n self._xaxis_pretransform + \\\n self.transData\n\n self._xaxis_text1_transform = \\\n self._xaxis_pretransform + \\\n self.transData + \\\n Affine2D().translate(0.0, -8.0)\n self._xaxis_text2_transform = \\\n self._xaxis_pretransform+ \\\n self.transData + \\\n Affine2D().translate(0.0, -8.0)\n\n # Now set up the transforms for the parallel ticks. The input to\n # these transforms are in axes space in x and display space in\n # y. Therefore, the input values will be in range (0, -ymin),\n # (1, ymax). The goal of these transforms is to go from that\n # space to display space. The tick labels will be offset 4\n # pixels from the edge of the axes ellipse.\n self._yaxis_stretch = Affine2D().scale(360, 1.0).translate(0.0, 0.0)\n self._yaxis_stretch1 = Affine2D().scale(360, 1.0).translate(0.0, 0.0)\n self._yaxis_stretch2 = Affine2D().scale(360, 1.0).translate(0.0, 0.0)\n\n self._yaxis_transform = \\\n self._yaxis_stretch + \\\n self.transData\n\n self._yaxis_text1_transform = \\\n self._yaxis_stretch1 + \\\n self.transData\n# Affine2D().translate(-8.0, 0.0)\n\n self._yaxis_text2_transform = \\\n self._yaxis_stretch2 + \\\n self.transData\n# Affine2D().translate(8.0, 0.0)\n\n def _update_affine(self):\n # update the transformations and clip paths\n # after new lims are set.\n if self.ra0 is None:\n x0, x1 = self.viewLim.intervalx\n ra0 = 0.5 * (x0 + x1)\n else:\n ra0 = self.ra0\n if self.dec0 is None:\n y0, y1 = self.viewLim.intervaly\n dec0 = 0.5 * (y0 + y1)\n else:\n dec0 = self.dec0\n if self.dec1 is None:\n y0, y1 = self.viewLim.intervaly\n dec1 = y0 + (y1 - y0) / 12.\n else:\n dec1 = self.dec1\n if self.dec2 is None:\n y0, y1 = self.viewLim.intervaly\n dec2 = y1 - (y1 - y0) / 12.\n else:\n dec2 = self.dec2\n\n self.transProjection.set_center((ra0, dec0))\n self.transProjection.set_dec1(dec1)\n self.transProjection.set_dec2(dec2)\n\n self._yaxis_stretch\\\n .clear() \\\n .scale(self.viewLim.width, 1.0) \\\n .translate(self.viewLim.x0, 0)\n\n self._yaxis_stretch1\\\n .clear() \\\n .scale(self.viewLim.width, 1.0) \\\n .translate(self.viewLim.x0 - 0.00 * self.viewLim.width, 0)\n\n self._yaxis_stretch2\\\n .clear() \\\n .scale(self.viewLim.width, 1.0) \\\n .translate(self.viewLim.x0 + 0.00 * self.viewLim.width, 0)\n\n self._xaxis_pretransform \\\n .clear() \\\n .scale(1.0, self.viewLim.height) \\\n .translate(0.0, self.viewLim.y0)\n\n corners_data = np.array([[self.viewLim.x0, self.viewLim.y0],\n [ra0, self.viewLim.y0],\n [self.viewLim.x1, self.viewLim.y0],\n [self.viewLim.x1, self.viewLim.y1],\n [self.viewLim.x0, self.viewLim.y1],])\n\n corners = self.transProjection.transform_non_affine(corners_data)\n\n x0 = corners[0][0]\n x1 = corners[2][0]\n\n # special case when x1 is wrapped back to x0\n # FIXME: I don't think we need it anymore.\n if x0 == x1: x1 = - x0\n\n y0 = corners[1][1]\n y1 = max([corners[3][1], corners[4][1]])\n\n xscale = x1 - x0\n yscale = y1 - y0\n\n self.transAffine.clear() \\\n .translate( - (x0 + x1) * 0.5, - (y0 + y1) * 0.5) \\\n .scale(0.95 / xscale, 0.95 / yscale) \\\n .translate(0.5, 0.5)\n\n # now update the clipping path\n path = Path(corners_data)\n path0 = self.transProjection.transform_path(path)\n path = self.transClip.transform_path(path)\n self.patch.set_xy(path.vertices)\n\n def get_xaxis_transform(self, which='grid'):\n \"\"\"\n Override this method to provide a transformation for the\n x-axis grid and ticks.\n \"\"\"\n assert which in ['tick1', 'tick2', 'grid']\n return self._xaxis_transform\n\n def get_xaxis_text1_transform(self, pixelPad):\n \"\"\"\n Override this method to provide a transformation for the\n x-axis tick labels.\n\n Returns a tuple of the form (transform, valign, halign)\n \"\"\"\n return self._xaxis_text1_transform, 'center', 'center'\n\n def get_xaxis_text2_transform(self, pixelPad):\n \"\"\"\n Override this method to provide a transformation for the\n secondary x-axis tick labels.\n\n Returns a tuple of the form (transform, valign, halign)\n \"\"\"\n return self._xaxis_text2_transform, 'center', 'center'\n\n def get_yaxis_transform(self, which='grid'):\n \"\"\"\n Override this method to provide a transformation for the\n y-axis grid and ticks.\n \"\"\"\n assert which in ['tick1', 'tick2', 'grid']\n return self._yaxis_transform\n\n def get_yaxis_text1_transform(self, pixelPad):\n \"\"\"\n Override this method to provide a transformation for the\n y-axis tick labels.\n\n Returns a tuple of the form (transform, valign, halign)\n \"\"\"\n return self._yaxis_text1_transform, 'center', 'center'\n\n def get_yaxis_text2_transform(self, pixelPad):\n \"\"\"\n Override this method to provide a transformation for the\n secondary y-axis tick labels.\n\n Returns a tuple of the form (transform, valign, halign)\n \"\"\"\n return self._yaxis_text2_transform, 'center', 'center'\n\n def _gen_axes_patch(self):\n \"\"\"\n ClipPath.\n\n Initially set to a size of 2 box in transAxes.\n\n After xlim and ylim are set, this will be changed to the actual\n region in transData.\n\n For unclear reason the very initial clip path is always applied\n to the grid. Therefore we set size to 2.0 to avoid bad clipping.\n \"\"\"\n return Polygon([(0, 0), (2, 0), (2, 2), (0, 2)], fill=False)\n\n def _gen_axes_spines(self):\n d = {\n 'left': mspines.Spine.linear_spine(self, spine_type='left'),\n 'right': mspines.Spine.linear_spine(self, spine_type='right'),\n 'top': mspines.Spine.linear_spine(self, spine_type='top'),\n 'bottom': mspines.Spine.linear_spine(self, spine_type='bottom'),\n }\n d['left'].set_position(('axes', 0))\n d['right'].set_position(('axes', 1))\n d['top'].set_position(('axes', 0))\n d['bottom'].set_position(('axes', 1))\n #FIXME: these spines can be moved wit set_position(('axes', ?)) but\n # 'data' fails. Because the transformation is non-separatable,\n # and because spines / data makes that assumption, we probably\n # do not have a easy way to support moving spines via native matplotlib\n # api on data axis.\n\n # also the labels currently do not follow the spines. Likely because\n # they are not registered?\n\n return d\n\n # Prevent the user from applying scales to one or both of the\n # axes. In this particular case, scaling the axes wouldn't make\n # sense, so we don't allow it.\n def set_xscale(self, *args, **kwargs):\n if args[0] != 'linear':\n raise NotImplementedError\n Axes.set_xscale(self, *args, **kwargs)\n\n def set_yscale(self, *args, **kwargs):\n if args[0] != 'linear':\n raise NotImplementedError\n Axes.set_yscale(self, *args, **kwargs)\n\n def set_center(self, ra0, dec0):\n \"\"\" Set the center of ra \"\"\"\n self.ra0 = ra0\n self.dec0 = dec0\n self._update_affine()\n\n def set_parallels(self, dec1, dec2):\n \"\"\" Set the parallels \"\"\"\n self.dec1 = dec1\n self.dec2 = dec2\n self._update_affine()\n\n # when xlim and ylim are updated, the transformation\n # needs to be updated too.\n def set_xlim(self, *args, **kwargs):\n Axes.set_xlim(self, *args, **kwargs)\n\n # FIXME: wrap x0 x1 to ensure they enclose ra0.\n x0, x1 = self.viewLim.intervalx\n if self.ra0 is not None:\n if not x0 <= self.transProjection.ra0 or \\\n not x1 > self.transProjection.ra0:\n raise ValueError(\"The given limit in RA does not enclose ra0\")\n\n self._update_affine()\n\n def set_ylim(self, *args, **kwargs):\n Axes.set_ylim(self, *args, **kwargs)\n self._update_affine()\n\n def _histmap(self, show, ra, dec, weights=None, nside=32, perarea=False, mean=False, range=None, **kwargs):\n r = histogrammap(ra, dec, weights, nside, perarea=perarea, range=range)\n\n if weights is not None:\n w, N = r\n else:\n w = r\n if mean:\n mask = N != 0\n w[mask] /= N[mask]\n else:\n mask = w > 0\n return w, mask, show(w, mask, nest=False, **kwargs)\n\n def histmap(self, ra, dec, weights=None, nside=32, perarea=False, mean=False, range=None, **kwargs):\n return self._histmap(self.mapshow, ra, dec, weights, nside, perarea, mean, range, **kwargs)\n\n def histcontour(self, ra, dec, weights=None, nside=32, perarea=False, mean=False, range=None, **kwargs):\n return self._histmap(self.mapcontour, ra, dec, weights, nside, perarea, mean, range, **kwargs)\n\n def mapshow(self, map, mask=None, nest=False, shading='flat', **kwargs):\n \"\"\" Display a healpix map \"\"\"\n vmin = kwargs.pop('vmin', None)\n vmax = kwargs.pop('vmax', None)\n defaults = dict(rasterized=True,\n alpha=1.0,\n linewidth=0)\n defaults.update(kwargs)\n if mask is None:\n mask = map == map\n\n if shading == 'flat':\n coll = HealpixCollection(map, mask, \n transform=self.transData, **defaults)\n else:\n coll = HealpixTriCollection(map, mask, transform=self.transData, **defaults)\n \n coll.set_clim(vmin=vmin, vmax=vmax)\n self.add_collection(coll)\n self._sci(coll)\n self.autoscale_view(tight=True)\n\n return coll\n\n def mapcontour(self, map, mask=None, nest=False, **kwargs):\n \"\"\" Display a healpix map as coutours. This is approximate. \"\"\"\n if mask is None:\n mask = map == map\n\n ra, dec = pix2radec(healpix.npix2nside(len(map)), mask.nonzero()[0])\n im = self.tricontour(ra, dec, map[mask], **kwargs)\n self._sci(im)\n self.autoscale_view(tight=True)\n return im\n\n def format_coord(self, lon, lat):\n \"\"\"\n Override this method to change how the values are displayed in\n the status bar.\n\n In this case, we want them to be displayed in degrees N/S/E/W.\n \"\"\"\n lon = lon\n lat = lat\n if lat >= 0.0:\n ns = 'N'\n else:\n ns = 'S'\n if lon >= 0.0:\n ew = 'E'\n else:\n ew = 'W'\n # \\u00b0 : degree symbol\n return '%f\\u00b0%s, %f\\u00b0%s' % (abs(lat), ns, abs(lon), ew)\n\n class DegreeFormatter(Formatter):\n \"\"\"\n This is a custom formatter that converts the native unit of\n radians into (truncated) degrees and adds a degree symbol.\n \"\"\"\n\n def __init__(self, round_to=1.0):\n self._round_to = round_to\n\n def __call__(self, x, pos=None):\n degrees = round(x / self._round_to) * self._round_to\n # \\u00b0 : degree symbol\n return \"%d\\u00b0\" % degrees\n\n def set_meridian_grid(self, degrees):\n \"\"\"\n Set the number of degrees between each meridian grid.\n\n It provides a more convenient interface to set the ticking than set_xticks would.\n \"\"\"\n # Set up a FixedLocator at each of the points, evenly spaced\n # by degrees.\n x0, x1 = self.get_xlim()\n number = abs((x1 - x0) / degrees) + 1\n self.xaxis.set_major_locator(\n FixedLocator(\n np.linspace(x0, x1, number, True)[1:-1]))\n # Set the formatter to display the tick labels in degrees,\n # rather than radians.\n self.xaxis.set_major_formatter(self.DegreeFormatter(degrees))\n\n def set_parallel_grid(self, degrees):\n \"\"\"\n Set the number of degrees between each meridian grid.\n\n It provides a more convenient interface than set_yticks would.\n \"\"\"\n # Set up a FixedLocator at each of the points, evenly spaced\n # by degrees.\n y0, y1 = self.get_ylim()\n number = ((y1 - y0) / degrees) + 1\n self.yaxis.set_major_locator(\n FixedLocator(\n np.linspace(y0, y1, number, True)[1:-1]))\n # Set the formatter to display the tick labels in degrees,\n # rather than radians.\n self.yaxis.set_major_formatter(self.DegreeFormatter(degrees))\n\n # Interactive panning and zooming is not supported with this projection,\n # so we override all of the following methods to disable it.\n def _in_axes(self, mouseevent):\n if hasattr(self._pan_trans):\n return True\n else:\n return Axes._in_axes(self, mouseevent)\n\n def can_zoom(self):\n \"\"\"\n Return True if this axes support the zoom box\n \"\"\"\n return True\n\n def start_pan(self, x, y, button):\n self._pan_trans = self.transAxes.inverted() + \\\n blended_transform_factory(\n self._yaxis_stretch,\n self._xaxis_pretransform,)\n\n def end_pan(self):\n delattr(self, '_pan_trans')\n\n def drag_pan(self, button, key, x, y):\n pan1 = self._pan_trans.transform([(x, y)])[0]\n self.set_ra0(360 - pan1[0])\n self.set_dec0(pan1[1])\n self._update_affine()\n\n# now define the Albers equal area axes\n\nclass AlbersEqualAreaAxes(SkymapperAxes):\n \"\"\"\n A custom class for the Albers Equal Area projection.\n\n https://en.wikipedia.org/wiki/Albers_projection\n \"\"\"\n\n name = 'aea'\n\n @classmethod\n def get_projection_class(kls):\n return kls.AlbersEqualAreaTransform\n\n # Now, the transforms themselves.\n class AlbersEqualAreaTransform(Transform):\n \"\"\"\n The base Hammer transform.\n \"\"\"\n input_dims = 2\n output_dims = 2\n is_separable = False\n\n def __init__(self, **kwargs):\n Transform.__init__(self, **kwargs)\n self.dec0 = 0\n self.ra0 = 180\n self.dec1 = -60\n self.dec2 = 30\n self._update()\n\n def set_center(self, center):\n ra0, dec0 = center\n self.ra0 = ra0\n self.dec0 = dec0\n self._update()\n\n def set_dec1(self, dec1):\n self.dec1 = dec1\n self._update()\n\n def set_dec2(self, dec2):\n self.dec2 = dec2\n self._update()\n\n def _update(self):\n self.n = 0.5 * (np.sin(np.radians(self.dec1)) \n + np.sin(np.radians(self.dec2)))\n\n self.C = np.cos(np.radians(self.dec1))**2 + 2 * self.n * np.sin(np.radians(self.dec1))\n self.rho0 = self.__rho__(self.dec0)\n\n def __rho__(self, dec):\n if self.n == 0:\n return np.sqrt(self.C - 2 * self.n * np.sin(np.radians(dec)))\n else:\n return np.sqrt(self.C - 2 * self.n * np.sin(np.radians(dec))) / self.n\n\n def transform_non_affine(self, ll):\n \"\"\"\n Override the transform_non_affine method to implement the custom\n transform.\n\n The input and output are Nx2 numpy arrays.\n \"\"\"\n ra = ll[:,0]\n dec = ll[:,1]\n ra0 = self.ra0\n ra_ = np.radians(ra - ra0) # Do not inverse for RA\n\n # FIXME: problem with the slices sphere: outer parallel needs to be dubplicated at the expense of the central one\n if self.n == 0:\n rt = np.array([\n self.rho0 * (ra_),\n - self.rho0 * (np.sin(np.radians(self.dec0) - np.sin(np.radians(dec)))),\n ]).T\n else:\n theta = self.n * ra_\n rho = self.__rho__(dec)\n rt = np.array([\n rho*np.sin(theta),\n self.rho0 - rho*np.cos(theta)]).T\n #if np.isnan(rt).any(): \n # raise ValueError('nan occured : ll =%s' % (str(ll)))\n return rt\n\n # This is where things get interesting. With this projection,\n # straight lines in data space become curves in display space.\n # This is done by interpolating new values between the input\n # values of the data. Since ``transform`` must not return a\n # differently-sized array, any transform that requires\n # changing the length of the data array must happen within\n # ``transform_path``.\n def transform_path_non_affine(self, path):\n # Adaptive interpolation:\n # we keep adding control points, till all control points\n # have an error of less than 0.01 (about 1%)\n # or if the number of control points is > 80.\n ra0 = self.ra0\n path = path.cleaned(curves=False)\n v = path.vertices\n diff = v[:, 0] - v[0, 0]\n v00 = v[0][0] - ra0\n while v00 > 180: v00 -= 360\n while v00 < -180: v00 += 360\n v00 += ra0\n v[:, 0] = v00 + diff\n nonstop = path.codes > 0\n path = Path(v[nonstop], path.codes[nonstop])\n isteps = int(path._interpolation_steps * 1.5)\n if isteps < 10: isteps = 10\n while True:\n ipath = path.interpolated(isteps)\n tiv = self.transform(ipath.vertices)\n itv = Path(self.transform(path.vertices)).interpolated(isteps).vertices\n if np.mean(np.abs(tiv - itv)) < 0.01:\n break\n if isteps > 20:\n break\n isteps = int(isteps * 1.5)\n return Path(tiv, ipath.codes)\n\n transform_path_non_affine.__doc__ = \\\n Transform.transform_path_non_affine.__doc__\n\n if matplotlib.__version__ < '1.2':\n transform = transform_non_affine\n transform_path = transform_path_non_affine\n transform_path.__doc__ = Transform.transform_path.__doc__\n\n def inverted(self):\n return AlbersEqualAreaAxes.InvertedAlbersEqualAreaTransform(self)\n inverted.__doc__ = Transform.inverted.__doc__\n\n class InvertedAlbersEqualAreaTransform(Transform):\n \"\"\" Inverted transform.\n\n This will always only give values in the prime ra0-180 ~ ra0+180 range, I believe.\n So it is inherently broken. I wonder when matplotlib actually calls this function,\n given that interactive is disabled.\n \"\"\"\n input_dims = 2\n output_dims = 2\n is_separable = False\n\n def __init__(self, inverted, **kwargs):\n Transform.__init__(self, **kwargs)\n self.inverted = inverted\n\n def transform_non_affine(self, xy):\n x = xy[:,0]\n y = xy[:,1]\n inverted = self.inverted\n\n rho = np.sqrt(x**2 + (inverted.rho0 - y)**2)\n\n # make sure that the signs are correct\n if inverted.n == 0:\n rt = np.degrees(\n [\n np.radians(inverted.ra0) + x / inverted.rho0,\n np.arcsin(y / inverted.rho0 + np.sin(np.radians(inverted.dec0)))\n ]).T\n return rt\n elif inverted.n > 0:\n theta = np.degrees(np.arctan2(x, inverted.rho0 - y))\n else:\n theta = np.degrees(np.arctan2(-x, -(inverted.rho0 - y)))\n return np.degrees([np.radians(inverted.ra0) + theta/inverted.n,\n np.arcsin((inverted.C - (rho * inverted.n)**2)/(2*inverted.n))]).T\n\n transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__\n\n if matplotlib.__version__ < '1.2':\n transform = transform_non_affine\n\n def inverted(self):\n # The inverse of the inverse is the original transform... ;)\n return self.inverted\n\n inverted.__doc__ = Transform.inverted.__doc__\n\nclass HealpixCollection(PolyCollection):\n def __init__(self, map, mask, nest=False, **kwargs):\n nside = healpix.npix2nside(len(mask))\n self.v = pix2quad(nside, mask.nonzero()[0], nest)\n PolyCollection.__init__(self, self.v, array=map[mask], **kwargs)\n\n def get_datalim(self, transData):\n \"\"\" The data lim of a healpix collection.\n \"\"\" \n # FIXME: it is currently set to the full sky.\n # This could have been trimmed down. \n # We want to set xlim smartly such that the largest\n # empty region is chopped off. I think it is possible, by\n # doing a histogram in ra, for example. \n vmin = (0, -90)\n vmax = (360, 90)\n return Bbox((vmin, vmax))\n\nimport matplotlib.transforms as mtransforms\nimport warnings\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib as mpl\nimport matplotlib.cbook as cbook\nimport matplotlib.colors as mcolors\nimport matplotlib.cm as cm\nfrom matplotlib import docstring\nimport matplotlib.transforms as transforms\nimport matplotlib.artist as artist\nfrom matplotlib.artist import allow_rasterization\nimport matplotlib.backend_bases as backend_bases\nimport matplotlib.path as mpath\nfrom matplotlib import _path\nimport matplotlib.mlab as mlab\nimport matplotlib.lines as mlines\nfrom matplotlib.collections import Collection\n\nclass HealpixTriCollection(Collection):\n \"\"\"\n Class for the efficient drawing of a triangular mesh using\n Gouraud shading.\n\n A triangular mesh is a :class:`~matplotlib.tri.Triangulation`\n object.\n \"\"\"\n def __init__(self, map, mask, nest=False, **kwargs):\n Collection.__init__(self, **kwargs)\n nside = healpix.npix2nside(len(map))\n # remove the first axes\n verts = pix2tri(nside, mask.nonzero()[0]).reshape(-1, 3, 2)\n c = np.ones((verts.shape[0], verts.shape[1])) * np.repeat(map[mask][:, None], 2, axis=0)\n\n self._verts = verts\n self._shading = 'gouraud'\n self._is_filled = True\n self.set_array(c.reshape(-1))\n \n def get_paths(self):\n if self._paths is None:\n self.set_paths()\n return self._paths\n\n def set_paths(self):\n self._paths = self.convert_mesh_to_paths(self._verts)\n\n @staticmethod\n def convert_mesh_to_paths(verts):\n \"\"\"\n Converts a given mesh into a sequence of\n :class:`matplotlib.path.Path` objects for easier rendering by\n backends that do not directly support meshes.\n\n This function is primarily of use to backend implementers.\n \"\"\"\n Path = mpath.Path\n return [Path(x) for x in verts]\n\n @allow_rasterization\n def draw(self, renderer):\n if not self.get_visible():\n return\n renderer.open_group(self.__class__.__name__)\n transform = self.get_transform()\n\n # Get a list of triangles and the color at each vertex.\n \n verts = self._verts\n \n self.update_scalarmappable()\n colors = self._facecolors.reshape(-1, 3, 4)\n \n oldshape = list(verts.shape)\n \n verts = transform.transform(verts.reshape(-1, 2)).reshape(oldshape)\n\n gc = renderer.new_gc()\n self._set_gc_clip(gc)\n gc.set_linewidth(self.get_linewidth()[0])\n renderer.draw_gouraud_triangles(gc, verts, colors, mtransforms.IdentityTransform())\n gc.restore()\n renderer.close_group(self.__class__.__name__)\n\n def get_datalim(self, transData):\n \"\"\" The data lim of a healpix collection.\n \"\"\" \n # FIXME: it is currently set to the full sky.\n # This could have been trimmed down. \n # We want to set xlim smartly such that the largest\n # empty region is chopped off. I think it is possible, by\n # doing a histogram in ra, for example. \n vmin = (0, -90)\n vmax = (360, 90)\n return Bbox((vmin, vmax))\n\n\ndef _wrap360(phi, dir='left'):\n phi[np.abs(phi) < 1e-9] = 0\n if dir == 'left':\n ref = phi.min(axis=-1)\n else:\n ref = phi.max(axis=-1)\n# print('ref', ref, phi, ref % 360 - ref)\n diff = (ref % 360) - ref \n phi = phi + diff[:, None]\n \n #diff = phi - ref[:, None] \n #print('great', (diff > 180).sum())\n #diff[diff > 180] -= 360 \n #print('less', (diff < -180).sum())\n #diff[diff < -180] += 360\n #phi = ref[:, None] + diff\n return phi \n\n# a few helper functions talking to healpy/healpix.\ndef pix2quad(nside, pix, nest=False):\n \"\"\"Generate healpix quad vertices for pixels where mask is True\n\n Args:\n pix: list of pixel numbers\n nest: nested or not\n nside: HealPix nside\n\n Returns:\n vertices\n vertices: (N,4,2), RA/Dec coordinates of 4 boundary points of cell\n \"\"\"\n\n pix = np.asarray(pix)\n vertices = np.zeros((pix.size, 4, 2))\n\n theta, phi = healpix.vertices(nside, pix)\n theta = np.degrees(theta)\n phi = np.degrees(phi)\n\n vertices[:, :, 0] = phi\n vertices[:, :, 1] = 90.0 - theta\n\n # ensure objects are in the same image plane.\n vertices[:, :, 0] = _wrap360(phi, 'right')\n\n return vertices\n\ndef pix2tri(nside, pix, nest=False):\n \"\"\"Generate healpix quad vertices for pixels where mask is True\n\n Args:\n pix: list of pixel numbers\n nest: nested or not\n nside: HealPix nside\n\n Returns:\n vertices\n vertices: (N,3,2,2), RA/Dec coordinates of 3 boundary points of 2 triangles\n \"\"\"\n\n # each pixel contains 2 triangles.\n pix = np.asarray(pix)\n vertices = np.zeros((pix.size, 2, 3, 2))\n\n theta, phi = healpix.vertices(nside, pix)\n theta = np.degrees(theta)\n phi = np.degrees(phi)\n\n vertices[:, 0, :, 0] = _wrap360(phi[:, [0, 1, 3]], 'left')\n vertices[:, 0, :, 1] = 90.0 - theta[:, [0, 1, 3]]\n vertices[:, 1, :, 0] = _wrap360(phi[:, [1, 2, 3]], 'right')\n vertices[:, 1, :, 1] = 90.0 - theta[:, [1, 2, 3]]\n\n return vertices\n\ndef pix2radec(nside, pix):\n theta, phi = healpix.pix2ang(nside, pix)\n return np.degrees(phi), 90 - np.degrees(theta)\n\ndef radec2pix(nside, ra, dec):\n phi = np.radians(ra)\n theta = np.radians(90 - dec)\n return healpix.ang2pix(nside, theta, phi)\n \ndef histogrammap(ra, dec, weights=None, nside=32, perarea=False, range=None):\n if range is not None:\n (ra1, ra2), (dec1, dec2) = range\n m = (ra >= ra1)& (ra <= ra2)\n m &= (dec >= dec1)& (dec <= dec2)\n ra = ra[m]\n dec = dec[m]\n if weights is not None:\n weights = weights[m]\n\n ipix = healpix.ang2pix(nside, np.radians(90-dec), np.radians(ra))\n npix = healpix.nside2npix(nside)\n if perarea:\n npix = healpix.nside2npix(nside)\n sky = 360. ** 2 / np.pi\n area = 1. * (sky / npix)\n else:\n area = 1\n\n if weights is not None:\n w = np.bincount(ipix, weights=weights, minlength=npix)\n N = np.bincount(ipix, minlength=npix)\n w = w / area\n N = N / area\n return w, N\n else:\n w = 1.0 * np.bincount(ipix, minlength=npix)\n return w / area\n\n# Now register the projection with matplotlib so the user can select\n# it.\nregister_projection(AlbersEqualAreaAxes)\n\ndef create_healpix():\n \"\"\" A pure python (numpy-based) version of key healpix functions.\n\n The ring scheme is implemented. \n\n Depencency: numpy.\n\n It shall probably be self-hosted as an individual python package.\n\n Author: Yu Feng <[email protected]>\n \"\"\"\n\n import numpy\n\n def npix2nside(npix):\n # FIXME: this could be buggy for large npix\n nside2 = npix // 12\n nside = numpy.array(nside2 ** 0.5).astype('i8')\n return nside\n\n def nside2npix(nside):\n return nside * nside * 12\n\n def ang2pix(nside, theta, phi):\n r\"\"\"Convert angle :math:`\\theta` :math:`\\phi` to pixel.\n\n This is translated from chealpix.c; but refer to Section 4.1 of\n http://adsabs.harvard.edu/abs/2005ApJ...622..759G\n \"\"\"\n nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)\n \n def equatorial(nside, tt, z):\n t1 = nside * (0.5 + tt)\n t2 = nside * z * 0.75\n jp = (t1 - t2).astype('i8')\n jm = (t1 + t2).astype('i8')\n ir = nside + 1 + jp - jm # in {1, 2n + 1}\n kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd \n \n ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}\n \n ip = ip % (4 * nside)\n return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip\n \n def polecaps(nside, tt, z, s):\n tp = tt - numpy.floor(tt)\n za = numpy.abs(z)\n tmp = nside * s / ((1 + za) / 3) ** 0.5\n mp = za > 0.99\n tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5\n jp = (tp * tmp).astype('i8')\n jm = ((1 - tp) * tmp).astype('i8')\n ir = jp + jm + 1\n ip = (tt * ir).astype('i8')\n ip = ip % (4 * ir)\n\n r1 = 2 * ir * (ir - 1) \n r2 = 2 * ir * (ir + 1)\n \n r = numpy.empty_like(r1)\n \n r[z > 0] = r1[z > 0] + ip[z > 0]\n r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]\n return r\n \n z = numpy.cos(theta)\n s = numpy.sin(theta)\n \n tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]\n \n result = numpy.zeros(z.shape, dtype='i8')\n mask = (z < 2. / 3) & (z > -2. / 3)\n \n result[mask] = equatorial(nside[mask], tt[mask], z[mask])\n result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])\n return result\n \n def pix2ang(nside, pix):\n r\"\"\"Convert pixel to angle :math:`\\theta` :math:`\\phi`.\n\n nside and pix are broadcast with numpy rules.\n\n Returns: theta, phi\n\n This is translated from chealpix.c; but refer to Section 4.1 of\n http://adsabs.harvard.edu/abs/2005ApJ...622..759G\n \"\"\"\n nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)\n \n ncap = nside * (nside - 1) * 2\n npix = 12 * nside * nside\n \n def northpole(pix, npix):\n iring = (1 + ((1 + 2 * pix) ** 0.5)).astype('i8') // 2\n iphi = (pix + 1) - 2 * iring * (iring - 1)\n z = 1.0 - (iring*iring) * 4. / npix\n phi = (iphi - 0.5) * 0.5 * numpy.pi / iring\n return z, phi\n \n def equatorial(pix, nside, npix, ncap):\n ip = pix - ncap\n iring = ip // (4 * nside) + nside\n iphi = ip % (4 * nside) + 1\n fodd = (((iring + nside) &1) + 1.) * 0.5\n z = (2 * nside - iring) * nside * 8.0 / npix\n phi = (iphi - fodd) * (0.5 * numpy.pi) / nside\n return z, phi\n \n def southpole(pix, npix):\n ip = npix - pix\n iring = (1 + ((2 * ip - 1)**0.5).astype('i8')) // 2\n iphi = 4 * iring + 1 - (ip - 2 * iring * (iring - 1))\n z = -1 + (iring * iring) * 4. / npix\n phi = (iphi - 0.5 ) * 0.5 * numpy.pi / iring\n return z, phi\n \n mask1 = pix < ncap\n \n mask2 = (~mask1) & (pix < npix - ncap)\n mask3 = pix >= npix - ncap\n\n z = numpy.zeros(pix.shape, dtype='f8')\n phi = numpy.zeros(pix.shape, dtype='f8')\n z[mask1], phi[mask1] = northpole(pix[mask1], npix[mask1])\n z[mask2], phi[mask2] = equatorial(pix[mask2], nside[mask2], npix[mask2], ncap[mask2])\n z[mask3], phi[mask3] = southpole(pix[mask3], npix[mask3])\n return numpy.arccos(z), phi\n\n def ang2xy(theta, phi):\n r\"\"\"Convert :math:`\\theta` :math:`\\phi` to :math:`x_s` :math:`y_s`.\n\n Returns: x, y\n\n Refer to Section 4.4 of http://adsabs.harvard.edu/abs/2005ApJ...622..759G\n \"\"\"\n theta, phi = numpy.lib.stride_tricks.broadcast_arrays(theta, phi)\n z = numpy.cos(theta)\n x = numpy.empty(theta.shape, dtype='f8')\n y = numpy.empty(theta.shape, dtype='f8')\n def sigma(z):\n return numpy.sign(z) * (2 - (3 * (1- numpy.abs(z))) ** 0.5)\n \n def equatorial(z, phi):\n return phi, 3 * numpy.pi / 8 * z\n def polarcaps(z, phi):\n s = sigma(z)\n x = phi - (numpy.abs(s) - 1) * (phi % (0.5 * numpy.pi) - 0.25 * numpy.pi)\n y = 0.25 * numpy.pi * s\n return x, y\n \n mask = numpy.abs(z) < 2. / 3\n\n x[mask], y[mask] = equatorial(z[mask], phi[mask])\n x[~mask], y[~mask] = polarcaps(z[~mask], phi[~mask])\n return x, y\n\n def xy2ang(x, y):\n r\"\"\"Convert :math:`x_s` :math:`y_s` to :math:`\\theta` :math:`\\phi`.\n \n Returns: theta, phi\n\n Refer to Section 4.4 of http://adsabs.harvard.edu/abs/2005ApJ...622..759G\n \"\"\"\n x, y = numpy.lib.stride_tricks.broadcast_arrays(x, y)\n \n theta = numpy.empty(x.shape, dtype='f8')\n phi = numpy.empty(x.shape, dtype='f8')\n \n def equatorial(x, y):\n return numpy.arccos(8 * y / (3 * numpy.pi)), x\n \n def polarcaps(x, y):\n ya = numpy.abs(y)\n xt = x % (0.5 * numpy.pi)\n phi = x - (ya - numpy.pi * 0.25) / (ya - numpy.pi * 0.5) * (xt - 0.25 * numpy.pi)\n z = (1 - 1. / 3 * (2 - 4 * ya / numpy.pi)**2) * y / ya\n return numpy.arccos(z), phi\n \n mask = numpy.abs(y) < numpy.pi * 0.25\n \n theta[mask], phi[mask] = equatorial(x[mask], y[mask])\n theta[~mask], phi[~mask] = polarcaps(x[~mask], y[~mask])\n return theta, phi\n\n def vertices(nside, pix):\n r\"\"\" Calculate the vertices for pixels \n\n Returns: theta, phi\n for each (nside, pix) pair, a four-vector of theta, and\n a four-vector of phi is returned, corresponding to\n the theta, phi of each vertex of the pixel boundary.\n (left, bottom, right, top)\n \"\"\"\n nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)\n x = numpy.zeros(nside.shape, dtype=('f8', 4))\n y = numpy.zeros(nside.shape, dtype=('f8', 4))\n theta, phi = pix2ang(nside, pix)\n xc, yc = ang2xy(theta, phi)\n xstep = numpy.pi / (2 * nside)\n ystep = numpy.pi / (2 * nside)\n x[..., 0] = xc - 0.5 * xstep\n y[..., 0] = yc\n x[..., 1] = xc\n y[..., 1] = yc + 0.5 * ystep\n x[..., 2] = xc + 0.5 * xstep\n y[..., 2] = yc\n x[..., 3] = xc\n y[..., 3] = yc - 0.5 * ystep\n \n theta, phi = xy2ang(x, y)\n return theta, phi\n return locals()\n\nclass Namespace(object):\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\nhealpix = Namespace(**create_healpix())\n\nif __name__ == '__main__':\n from matplotlib.figure import Figure\n from matplotlib.backends.backend_agg import FigureCanvasAgg\n\n # Now make a simple example using the custom projection.\n\n import numpy as np\n\n fig = Figure(figsize=(6, 6))\n\n# ra = np.random.uniform(size=100, low=0, high=360)\n# dec = np.random.uniform(size=100, low=-90, high=90)\n ra = np.linspace(0, 360, 100)\n dec = np.linspace(-90, 90, 100)\n ax = fig.add_subplot(111, projection=\"aea\")\n ax.set_xlim(359, 0)\n ax.set_ylim(-70, 70)\n ax.set_parallels(-20, 60)\n ax.set_center(180, 0)\n ax.plot(ra, dec, '*')\n ax.axhline(-20)\n ax.axvline(140)\n\n ax.plot(*pix2tri(8, [104, 105, 106]).reshape(-1, 2).T, color='k')\n\n ra = np.random.uniform(size=1000, low=30, high=60)\n dec = np.random.uniform(size=1000, low=-50, high=50)\n ax.histmap(ra, dec, nside=32, weights=ra * dec, mean=True)\n\n ra = np.random.uniform(size=1000, low=120, high=160)\n dec = np.random.uniform(size=1000, low=-50, high=50)\n ax.histcontour(ra, dec, weights=ra * dec, nside=32, mean=True)\n\n ax.tick_params(labelright=True, labeltop=True)\n\n ax.tripcolor(ra, dec, ra*dec)\n fig.colorbar(ax._gci())\n\n ra = np.random.uniform(size=1000, low=180, high=200)\n dec = np.random.uniform(size=1000, low=-50, high=50)\n\n ax.set_meridian_grid(30)\n ax.set_parallel_grid(30)\n ax.grid()\n canvas = FigureCanvasAgg(fig)\n fig.savefig('xxx.png')\n"
]
| [
[
"numpy.repeat",
"matplotlib.transforms.Transform.__init__",
"matplotlib.axes.Axes.set_ylim",
"numpy.arccos",
"matplotlib.axes.Axes.cla",
"matplotlib.axes.Axes.set_xscale",
"numpy.radians",
"numpy.sign",
"numpy.cos",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.bincount",
"numpy.sin",
"numpy.empty",
"matplotlib.ticker.MaxNLocator",
"matplotlib.axes.Axes._in_axes",
"numpy.arcsin",
"matplotlib.axes.Axes.__init__",
"matplotlib.axis.YAxis",
"matplotlib.axes.Axes.set_xlim",
"matplotlib.transforms.BboxTransformTo",
"matplotlib.transforms.IdentityTransform",
"matplotlib.collections.Collection.__init__",
"matplotlib.projections.register_projection",
"numpy.lib.stride_tricks.broadcast_arrays",
"matplotlib.axis.XAxis",
"matplotlib.transforms.blended_transform_factory",
"numpy.sqrt",
"matplotlib.transforms.Affine2D",
"numpy.empty_like",
"numpy.array",
"numpy.zeros",
"matplotlib.patches.Polygon",
"matplotlib.axes.Axes.set_yscale",
"matplotlib.collections.PolyCollection.__init__",
"matplotlib.spines.Spine.linear_spine",
"numpy.arctan2",
"matplotlib.transforms.Bbox",
"numpy.floor",
"matplotlib.path.Path",
"numpy.asarray",
"numpy.ones",
"numpy.degrees",
"numpy.random.uniform",
"numpy.abs",
"matplotlib.figure.Figure",
"matplotlib.ticker.NullLocator",
"numpy.linspace"
]
]
|
toolmen-lab/R3Det_piglet-detection | [
"9e256570e157ee184eb9a4dc11ebafdc1b56f121"
]
| [
"libs/networks/build_whole_network_r3det.py"
]
| [
"# -*-coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\n\nfrom libs.networks import resnet, resnet_gluoncv_r3det, mobilenet_v2, xception\nfrom libs.box_utils import anchor_utils, generate_anchors, generate_rotate_anchors\nfrom libs.configs import cfgs\nfrom libs.losses import losses\nfrom libs.box_utils import show_box_in_tensor\nfrom libs.detection_oprations.refine_proposal_opr import postprocess_detctions\nfrom libs.detection_oprations.anchor_target_layer_without_boxweight import anchor_target_layer\nfrom libs.detection_oprations.refinebox_target_layer_without_boxweight import refinebox_target_layer\nfrom libs.box_utils import bbox_transform\n\n\nclass DetectionNetwork(object):\n\n def __init__(self, base_network_name, is_training):\n\n self.base_network_name = base_network_name\n self.is_training = is_training\n if cfgs.METHOD == 'H':\n self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS)\n else:\n self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS) * len(cfgs.ANCHOR_ANGLES)\n self.method = cfgs.METHOD\n self.losses_dict = {}\n\n def build_base_network(self, input_img_batch):\n\n if self.base_network_name.startswith('resnet_v1'):\n return resnet.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)\n\n elif self.base_network_name in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:\n\n return resnet_gluoncv_r3det.resnet_base(input_img_batch, scope_name=self.base_network_name,\n is_training=self.is_training)\n\n elif self.base_network_name.startswith('MobilenetV2'):\n return mobilenet_v2.mobilenetv2_base(input_img_batch, is_training=self.is_training)\n\n elif self.base_network_name.startswith('xception'):\n return xception.xception_base(input_img_batch, is_training=self.is_training)\n\n else:\n raise ValueError('Sry, we only support resnet, mobilenet_v2 and xception')\n\n def rpn_cls_net(self, inputs, scope_list, reuse_flag, level):\n rpn_conv2d_3x3 = inputs\n for i in range(4):\n rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[3, 3],\n stride=1,\n activation_fn=tf.nn.relu,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n scope='{}_{}'.format(scope_list[0], i),\n reuse=reuse_flag)\n\n rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,\n num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location,\n kernel_size=[3, 3],\n stride=1,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER,\n scope=scope_list[2],\n activation_fn=None,\n reuse=reuse_flag)\n\n rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM],\n name='rpn_{}_classification_reshape'.format(level))\n rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))\n\n return rpn_box_scores, rpn_box_probs\n\n def refine_cls_net(self, inputs, scope_list, reuse_flag, level):\n rpn_conv2d_3x3 = inputs\n for i in range(4):\n rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[3, 3],\n stride=1,\n activation_fn=tf.nn.relu,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n scope='{}_{}'.format(scope_list[0], i),\n reuse=reuse_flag)\n\n rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,\n num_outputs=cfgs.CLASS_NUM,\n kernel_size=[3, 3],\n stride=1,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER,\n scope=scope_list[2],\n activation_fn=None,\n reuse=reuse_flag)\n\n rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM],\n name='refine_{}_classification_reshape'.format(level))\n rpn_box_probs = tf.sigmoid(rpn_box_scores, name='refine_{}_classification_sigmoid'.format(level))\n\n return rpn_box_scores, rpn_box_probs\n\n def rpn_reg_net(self, inputs, scope_list, reuse_flag, level):\n rpn_delta_boxes = inputs\n for i in range(4):\n rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[3, 3],\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n stride=1,\n activation_fn=tf.nn.relu,\n scope='{}_{}'.format(scope_list[1], i),\n reuse=reuse_flag)\n\n rpn_delta_boxes = slim.conv2d(rpn_delta_boxes,\n num_outputs=5 * self.num_anchors_per_location,\n kernel_size=[3, 3],\n stride=1,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n scope=scope_list[3],\n activation_fn=None,\n reuse=reuse_flag)\n\n rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5],\n name='rpn_{}_regression_reshape'.format(level))\n return rpn_delta_boxes\n\n def refine_reg_net(self, inputs, scope_list, reuse_flag, level):\n rpn_delta_boxes = inputs\n for i in range(4):\n rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[3, 3],\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n stride=1,\n activation_fn=tf.nn.relu,\n scope='{}_{}'.format(scope_list[1], i),\n reuse=reuse_flag)\n\n rpn_delta_boxes = slim.conv2d(rpn_delta_boxes,\n num_outputs=5,\n kernel_size=[3, 3],\n stride=1,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n scope=scope_list[3],\n activation_fn=None,\n reuse=reuse_flag)\n\n rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5],\n name='refine_{}_regression_reshape'.format(level))\n return rpn_delta_boxes\n\n def rpn_net(self, feature_pyramid, name):\n\n rpn_delta_boxes_list = []\n rpn_scores_list = []\n rpn_probs_list = []\n with tf.variable_scope(name):\n with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):\n for level in cfgs.LEVEL:\n\n if cfgs.SHARE_NET:\n reuse_flag = None if level == cfgs.LEVEL[0] else True\n scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']\n else:\n reuse_flag = None\n scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,\n 'rpn_classification_' + level, 'rpn_regression_' + level]\n\n rpn_box_scores, rpn_box_probs = self.rpn_cls_net(feature_pyramid[level],\n scope_list, reuse_flag,\n level)\n rpn_delta_boxes = self.rpn_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)\n\n rpn_scores_list.append(rpn_box_scores)\n rpn_probs_list.append(rpn_box_probs)\n rpn_delta_boxes_list.append(rpn_delta_boxes)\n\n # rpn_all_delta_boxes = tf.concat(rpn_delta_boxes_list, axis=0)\n # rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0)\n # rpn_all_boxes_probs = tf.concat(rpn_probs_list, axis=0)\n\n return rpn_delta_boxes_list, rpn_scores_list, rpn_probs_list\n\n def refine_net(self, feature_pyramid, name):\n\n refine_delta_boxes_list = []\n refine_scores_list = []\n refine_probs_list = []\n with tf.variable_scope(name):\n with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):\n for level in cfgs.LEVEL:\n\n if cfgs.SHARE_NET:\n reuse_flag = None if level == cfgs.LEVEL[0] else True\n scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'refine_classification', 'refine_regression']\n else:\n reuse_flag = None\n scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,\n 'refine_classification_' + level, 'refine_regression_' + level]\n\n refine_box_scores, refine_box_probs = self.refine_cls_net(feature_pyramid[level],\n scope_list, reuse_flag,\n level)\n refine_delta_boxes = self.refine_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)\n\n refine_scores_list.append(refine_box_scores)\n refine_probs_list.append(refine_box_probs)\n refine_delta_boxes_list.append(refine_delta_boxes)\n\n # refine_all_delta_boxes = tf.concat(refine_delta_boxes_list, axis=0)\n # refine_all_boxes_scores = tf.concat(refine_scores_list, axis=0)\n # refine_all_boxes_probs = tf.concat(refine_probs_list, axis=0)\n\n return refine_delta_boxes_list, refine_scores_list, refine_probs_list\n\n def make_anchors(self, feature_pyramid):\n with tf.variable_scope('make_anchors'):\n anchor_list = []\n level_list = cfgs.LEVEL\n with tf.name_scope('make_anchors_all_level'):\n for level, base_anchor_size, stride in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):\n '''\n (level, base_anchor_size) tuple:\n (P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)\n '''\n featuremap_height, featuremap_width = tf.shape(feature_pyramid[level])[1], \\\n tf.shape(feature_pyramid[level])[2]\n\n featuremap_height = tf.cast(featuremap_height, tf.float32)\n featuremap_width = tf.cast(featuremap_width, tf.float32)\n\n if self.method == 'H':\n tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre,\n inp=[featuremap_height, featuremap_width, stride,\n np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0],\n Tout=[tf.float32])\n\n tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])\n else:\n tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size,\n anchor_scales=cfgs.ANCHOR_SCALES,\n anchor_ratios=cfgs.ANCHOR_RATIOS,\n anchor_angles=cfgs.ANCHOR_ANGLES,\n featuremap_height=featuremap_height,\n featuremap_width=featuremap_width,\n stride=stride)\n tmp_anchors = tf.reshape(tmp_anchors, [-1, 5])\n\n anchor_list.append(tmp_anchors)\n\n # all_level_anchors = tf.concat(anchor_list, axis=0)\n return anchor_list\n\n def add_anchor_img_smry(self, img, anchors, labels, method):\n\n positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(labels, 1)), [-1])\n # negative_anchor_indices = tf.reshape(tf.where(tf.equal(labels, 0)), [-1])\n\n positive_anchor = tf.gather(anchors, positive_anchor_indices)\n # negative_anchor = tf.gather(anchors, negative_anchor_indices)\n\n pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,\n boxes=positive_anchor,\n method=method)\n # neg_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,\n # boxes=negative_anchor)\n\n tf.summary.image('positive_anchor', pos_in_img)\n # tf.summary.image('negative_anchors', neg_in_img)\n\n def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r, gpu_id=0):\n\n if self.is_training:\n gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])\n gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)\n\n gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])\n gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)\n\n img_shape = tf.shape(input_img_batch)\n\n # 1. build base network\n feature_pyramid = self.build_base_network(input_img_batch)\n\n # 2. build rpn\n rpn_box_pred_list, rpn_cls_score_list, rpn_cls_prob_list = self.rpn_net(feature_pyramid, 'rpn_net')\n\n # 3. generate_anchors\n anchor_list = self.make_anchors(feature_pyramid)\n\n rpn_box_pred = tf.concat(rpn_box_pred_list, axis=0)\n rpn_cls_score = tf.concat(rpn_cls_score_list, axis=0)\n # rpn_cls_prob = tf.concat(rpn_cls_prob_list, axis=0)\n anchors = tf.concat(anchor_list, axis=0)\n\n if self.is_training:\n with tf.variable_scope('build_loss'):\n labels, target_delta, anchor_states, target_boxes = tf.py_func(func=anchor_target_layer,\n inp=[gtboxes_batch_h, gtboxes_batch_r,\n anchors],\n Tout=[tf.float32, tf.float32,\n tf.float32, tf.float32])\n\n if self.method == 'H':\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)\n else:\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)\n\n cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)\n if cfgs.USE_IOU_FACTOR:\n reg_loss = losses.iou_smooth_l1_loss(target_delta, rpn_box_pred, anchor_states, target_boxes, anchors)\n else:\n reg_loss = losses.smooth_l1_loss_atan(target_delta, rpn_box_pred, anchor_states)\n\n self.losses_dict['cls_loss'] = cls_loss * cfgs.CLS_WEIGHT\n self.losses_dict['reg_loss'] = reg_loss * cfgs.REG_WEIGHT\n\n box_pred_list, cls_prob_list, proposal_list = rpn_box_pred_list, rpn_cls_prob_list, anchor_list\n\n all_box_pred_list, all_cls_prob_list, all_proposal_list = [], [], []\n\n for i in range(cfgs.NUM_REFINE_STAGE):\n box_pred_list, cls_prob_list, proposal_list = self.refine_stage(input_img_batch,\n gtboxes_batch_r,\n box_pred_list,\n cls_prob_list,\n proposal_list,\n feature_pyramid,\n gpu_id,\n pos_threshold=cfgs.REFINE_IOU_POSITIVE_THRESHOLD[i],\n neg_threshold=cfgs.REFINE_IOU_NEGATIVE_THRESHOLD[i],\n stage='' if i == 0 else '_stage{}'.format(i + 2),\n proposal_filter=True if i == 0 else False)\n\n if not self.is_training:\n all_box_pred_list.extend(box_pred_list)\n all_cls_prob_list.extend(cls_prob_list)\n all_proposal_list.extend(proposal_list)\n else:\n all_box_pred_list, all_cls_prob_list, all_proposal_list = box_pred_list, cls_prob_list, proposal_list\n\n with tf.variable_scope('postprocess_detctions'):\n box_pred = tf.concat(all_box_pred_list, axis=0)\n cls_prob = tf.concat(all_cls_prob_list, axis=0)\n proposal = tf.concat(all_proposal_list, axis=0)\n\n boxes, scores, category = postprocess_detctions(refine_bbox_pred=box_pred,\n refine_cls_prob=cls_prob,\n anchors=proposal,\n is_training=self.is_training)\n boxes = tf.stop_gradient(boxes)\n scores = tf.stop_gradient(scores)\n category = tf.stop_gradient(category)\n\n if self.is_training:\n return boxes, scores, category, self.losses_dict\n else:\n return boxes, scores, category\n\n def get_restorer(self, checkpoint_path = ''):\n if checkpoint_path is '':\n checkpoint_path = tf.train.latest_checkpoint(os.path.join(cfgs.TRAINED_CKPT, cfgs.VERSION))\n\n if checkpoint_path != None:\n if cfgs.RESTORE_FROM_RPN:\n print('___restore from rpn___')\n model_variables = slim.get_model_variables()\n restore_variables = [var for var in model_variables if not var.name.startswith('FastRCNN_Head')] + \\\n [slim.get_or_create_global_step()]\n for var in restore_variables:\n print(var.name)\n restorer = tf.train.Saver(restore_variables)\n else:\n restorer = tf.train.Saver()\n print(\"model restore from :\", checkpoint_path)\n else:\n checkpoint_path = cfgs.PRETRAINED_CKPT\n print(\"model restore from pretrained mode, path is :\", checkpoint_path)\n\n model_variables = slim.get_model_variables()\n\n # for var in model_variables:\n # print(var.name)\n # print(20*\"__++__++__\")\n\n def name_in_ckpt_rpn(var):\n return var.op.name\n\n def name_in_ckpt_fastrcnn_head(var):\n '''\n Fast-RCNN/resnet_v1_50/block4 -->resnet_v1_50/block4\n Fast-RCNN/MobilenetV2/** -- > MobilenetV2 **\n :param var:\n :return:\n '''\n return '/'.join(var.op.name.split('/')[1:])\n\n nameInCkpt_Var_dict = {}\n for var in model_variables:\n if var.name.startswith('Fast-RCNN/'+self.base_network_name): # +'/block4'\n var_name_in_ckpt = name_in_ckpt_fastrcnn_head(var)\n nameInCkpt_Var_dict[var_name_in_ckpt] = var\n else:\n if var.name.startswith(self.base_network_name):\n var_name_in_ckpt = name_in_ckpt_rpn(var)\n nameInCkpt_Var_dict[var_name_in_ckpt] = var\n else:\n continue\n restore_variables = nameInCkpt_Var_dict\n for key, item in restore_variables.items():\n print(\"var_in_graph: \", item.name)\n print(\"var_in_ckpt: \", key)\n print(20*\"___\")\n restorer = tf.train.Saver(restore_variables)\n print(20 * \"****\")\n print(\"restore from pretrained_weighs in IMAGE_NET\")\n return restorer, checkpoint_path\n\n def get_gradients(self, optimizer, loss):\n '''\n\n :param optimizer:\n :param loss:\n :return:\n\n return vars and grads that not be fixed\n '''\n\n # if cfgs.FIXED_BLOCKS > 0:\n # trainable_vars = tf.trainable_variables()\n # # trained_vars = slim.get_trainable_variables()\n # start_names = [cfgs.NET_NAME + '/block%d'%i for i in range(1, cfgs.FIXED_BLOCKS+1)] + \\\n # [cfgs.NET_NAME + '/conv1']\n # start_names = tuple(start_names)\n # trained_var_list = []\n # for var in trainable_vars:\n # if not var.name.startswith(start_names):\n # trained_var_list.append(var)\n # # slim.learning.train()\n # grads = optimizer.compute_gradients(loss, var_list=trained_var_list)\n # return grads\n # else:\n # return optimizer.compute_gradients(loss)\n return optimizer.compute_gradients(loss)\n\n def enlarge_gradients_for_bias(self, gradients):\n\n final_gradients = []\n with tf.variable_scope(\"Gradient_Mult\") as scope:\n for grad, var in gradients:\n scale = 1.0\n if cfgs.MUTILPY_BIAS_GRADIENT and './biases' in var.name:\n scale = scale * cfgs.MUTILPY_BIAS_GRADIENT\n if not np.allclose(scale, 1.0):\n grad = tf.multiply(grad, scale)\n final_gradients.append((grad, var))\n return final_gradients\n\n def refine_feature_op(self, points, feature_map, name):\n\n h, w = tf.cast(tf.shape(feature_map)[1], tf.int32), tf.cast(tf.shape(feature_map)[2], tf.int32)\n\n xmin = tf.maximum(0.0, tf.floor(points[:, 0]))\n ymin = tf.maximum(0.0, tf.floor(points[:, 1]))\n xmax = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(points[:, 0]))\n ymax = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(points[:, 1]))\n\n left_top = tf.cast(tf.transpose(tf.stack([xmin, ymin], axis=0)), tf.int32)\n right_bottom = tf.cast(tf.transpose(tf.stack([xmax, ymax], axis=0)), tf.int32)\n left_bottom = tf.cast(tf.transpose(tf.stack([xmin, ymax], axis=0)), tf.int32)\n right_top = tf.cast(tf.transpose(tf.stack([xmax, ymin], axis=0)), tf.int32)\n\n feature_1x5 = slim.conv2d(inputs=feature_map,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[1, 5],\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n stride=1,\n activation_fn=None,\n scope='refine_1x5_{}'.format(name))\n\n feature5x1 = slim.conv2d(inputs=feature_1x5,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[5, 1],\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n stride=1,\n activation_fn=None,\n scope='refine_5x1_{}'.format(name))\n\n feature_1x1 = slim.conv2d(inputs=feature_map,\n num_outputs=cfgs.FPN_CHANNEL,\n kernel_size=[1, 1],\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n stride=1,\n activation_fn=None,\n scope='refine_1x1_{}'.format(name))\n\n feature = feature5x1 + feature_1x1\n\n left_top_feature = tf.gather_nd(tf.squeeze(feature), left_top)\n right_bottom_feature = tf.gather_nd(tf.squeeze(feature), right_bottom)\n left_bottom_feature = tf.gather_nd(tf.squeeze(feature), left_bottom)\n right_top_feature = tf.gather_nd(tf.squeeze(feature), right_top)\n\n refine_feature = right_bottom_feature * tf.tile(\n tf.reshape((tf.abs((points[:, 0] - xmin) * (points[:, 1] - ymin))), [-1, 1]),\n [1, cfgs.FPN_CHANNEL]) \\\n + left_top_feature * tf.tile(\n tf.reshape((tf.abs((xmax - points[:, 0]) * (ymax - points[:, 1]))), [-1, 1]),\n [1, cfgs.FPN_CHANNEL]) \\\n + right_top_feature * tf.tile(\n tf.reshape((tf.abs((points[:, 0] - xmin) * (ymax - points[:, 1]))), [-1, 1]),\n [1, cfgs.FPN_CHANNEL]) \\\n + left_bottom_feature * tf.tile(\n tf.reshape((tf.abs((xmax - points[:, 0]) * (points[:, 1] - ymin))), [-1, 1]),\n [1, cfgs.FPN_CHANNEL])\n\n refine_feature = tf.reshape(refine_feature, [1, tf.cast(h, tf.int32), tf.cast(w, tf.int32), cfgs.FPN_CHANNEL])\n\n # refine_feature = tf.reshape(refine_feature, [1, tf.cast(feature_size[1], tf.int32),\n # tf.cast(feature_size[0], tf.int32), 256])\n\n return refine_feature + feature\n\n def refine_stage(self, input_img_batch, gtboxes_batch_r, box_pred_list, cls_prob_list, proposal_list,\n feature_pyramid, gpu_id, pos_threshold, neg_threshold,\n stage, proposal_filter=False):\n with tf.variable_scope('refine_feature_pyramid{}'.format(stage)):\n refine_feature_pyramid = {}\n refine_boxes_list = []\n\n for box_pred, cls_prob, proposal, stride, level in \\\n zip(box_pred_list, cls_prob_list, proposal_list,\n cfgs.ANCHOR_STRIDE, cfgs.LEVEL):\n\n if proposal_filter:\n box_pred = tf.reshape(box_pred, [-1, self.num_anchors_per_location, 5]) # shape: (W x H, A, 5)\n proposal = tf.reshape(proposal, [-1, self.num_anchors_per_location, 5 if self.method == 'R' else 4])\n cls_prob = tf.reshape(cls_prob, [-1, self.num_anchors_per_location, cfgs.CLASS_NUM]) # shape: (W x H, A, K)\n\n # classfication\n cls_max_prob = tf.reduce_max(cls_prob, axis=-1) # shape: (W x H, A)\n # select best anchor\n box_pred_argmax = tf.cast(tf.reshape(tf.argmax(cls_max_prob, axis=-1), [-1, 1]), tf.int32) # shape: (W x H, 1)\n indices = tf.cast(tf.cumsum(tf.ones_like(box_pred_argmax), axis=0), tf.int32) - tf.constant(1, tf.int32) # shape: (W x H, 1)\n # axis 1: feature map index, axis 2: best anchor index\n indices = tf.concat([indices, box_pred_argmax], axis=-1) # shape: (W x H, 2)\n\n # get predict box coordinate\n box_pred = tf.reshape(tf.gather_nd(box_pred, indices), [-1, 5]) # shape: (W x H, 5)\n proposal = tf.reshape(tf.gather_nd(proposal, indices), [-1, 5 if self.method == 'R' else 4])\n\n if cfgs.METHOD == 'H':\n x_c = (proposal[:, 2] + proposal[:, 0]) / 2\n y_c = (proposal[:, 3] + proposal[:, 1]) / 2\n h = proposal[:, 2] - proposal[:, 0] + 1\n w = proposal[:, 3] - proposal[:, 1] + 1\n theta = -90 * tf.ones_like(x_c)\n proposal = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))\n else:\n box_pred = tf.reshape(box_pred, [-1, 5])\n proposal = tf.reshape(proposal, [-1, 5])\n\n bboxes = bbox_transform.rbbox_transform_inv(boxes=proposal, deltas=box_pred)\n refine_boxes_list.append(bboxes)\n center_point = bboxes[:, :2] / stride\n\n refine_feature_pyramid[level] = self.refine_feature_op(points=center_point,\n feature_map=feature_pyramid[level],\n name=level)\n\n refine_box_pred_list, refine_cls_score_list, refine_cls_prob_list = self.refine_net(refine_feature_pyramid,\n 'refine_net{}'.format(stage))\n\n refine_box_pred = tf.concat(refine_box_pred_list, axis=0)\n refine_cls_score = tf.concat(refine_cls_score_list, axis=0)\n # refine_cls_prob = tf.concat(refine_cls_prob_list, axis=0)\n refine_boxes = tf.concat(refine_boxes_list, axis=0)\n\n if self.is_training:\n with tf.variable_scope('build_refine_loss{}'.format(stage)):\n refine_labels, refine_target_delta, refine_box_states, refine_target_boxes = tf.py_func(\n func=refinebox_target_layer,\n inp=[gtboxes_batch_r, refine_boxes, pos_threshold, neg_threshold, gpu_id],\n Tout=[tf.float32, tf.float32,\n tf.float32, tf.float32])\n\n self.add_anchor_img_smry(input_img_batch, refine_boxes, refine_box_states, 1)\n\n refine_cls_loss = losses.focal_loss(refine_labels, refine_cls_score, refine_box_states)\n if cfgs.USE_IOU_FACTOR:\n refine_reg_loss = losses.iou_smooth_l1_loss(refine_target_delta, refine_box_pred,\n refine_box_states, refine_target_boxes,\n refine_boxes, is_refine=True)\n else:\n refine_reg_loss = losses.smooth_l1_loss_atan(refine_target_delta, refine_box_pred, refine_box_states)\n\n self.losses_dict['refine_cls_loss{}'.format(stage)] = refine_cls_loss * cfgs.CLS_WEIGHT\n self.losses_dict['refine_reg_loss{}'.format(stage)] = refine_reg_loss * cfgs.REG_WEIGHT\n\n return refine_box_pred_list, refine_cls_prob_list, refine_boxes_list"
]
| [
[
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.stack",
"tensorflow.ceil",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.concat",
"tensorflow.argmax",
"tensorflow.train.Saver",
"tensorflow.contrib.slim.get_model_variables",
"tensorflow.constant",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.floor",
"tensorflow.abs",
"numpy.array",
"tensorflow.py_func",
"tensorflow.gather_nd",
"numpy.allclose",
"tensorflow.contrib.slim.get_or_create_global_step",
"tensorflow.name_scope",
"tensorflow.contrib.slim.conv2d",
"tensorflow.multiply",
"tensorflow.summary.image",
"tensorflow.greater_equal",
"tensorflow.reduce_max",
"tensorflow.gather",
"tensorflow.stop_gradient"
]
]
|
dabingrosewood/MasterThesisProj | [
"7e40fa2395468a1bccef429362a61ed8515ecc11"
]
| [
"PonyGE2/src/fitness/supervised_learning/supervised_learning.py"
]
| [
"import numpy as np\nnp.seterr(all=\"raise\")\n\nfrom algorithm.parameters import params\nfrom utilities.fitness.get_data import get_data\nfrom utilities.fitness.math_functions import *\nfrom utilities.fitness.added_math import *\nfrom utilities.fitness.optimize_constants import optimize_constants\n\nfrom fitness.base_ff_classes.base_ff import base_ff\nfrom utilities.stats.eval_counter import eval_counter\n\n\nclass supervised_learning(base_ff):\n \"\"\"\n Fitness function for supervised learning, ie regression and\n classification problems. Given a set of training or test data,\n returns the error between y (true labels) and yhat (estimated\n labels).\n\n We can pass in the error metric and the dataset via the params\n dictionary. Of error metrics, eg RMSE is suitable for regression,\n while F1-score, hinge-loss and others are suitable for\n classification.\n\n This is an abstract class which exists just to be subclassed:\n should not be instantiated.\n \"\"\"\n\n def __init__(self):\n # Initialise base fitness function class.\n super().__init__()\n\n # Get training and test data\n self.training_in, self.training_exp, self.test_in, self.test_exp = \\\n get_data(params['DATASET_TRAIN'], params['DATASET_TEST'])\n\n # Find number of variables.\n self.n_vars = np.shape(self.training_in)[0]\n\n # Regression/classification-style problems use training and test data.\n if params['DATASET_TEST']:\n self.training_test = True\n\n @eval_counter\n def evaluate(self, ind, **kwargs):\n \"\"\"\n Note that math functions used in the solutions are imported from either\n utilities.fitness.math_functions or called from numpy.\n\n :param ind: An individual to be evaluated.\n :param kwargs: An optional parameter for problems with training/test\n data. Specifies the distribution (i.e. training or test) upon which\n evaluation is to be performed.\n :return: The fitness of the evaluated individual.\n \"\"\"\n\n dist = kwargs.get('dist', 'training')\n\n if dist == \"training\":\n # Set training datasets.\n x = self.training_in\n y = self.training_exp\n\n elif dist == \"test\":\n # Set test datasets.\n x = self.test_in\n y = self.test_exp\n\n else:\n raise ValueError(\"Unknown dist: \" + dist)\n\n if params['OPTIMIZE_CONSTANTS']:\n # if we are training, then optimize the constants by\n # gradient descent and save the resulting phenotype\n # string as ind.phenotype_with_c0123 (eg x[0] +\n # c[0] * x[1]**c[1]) and values for constants as\n # ind.opt_consts (eg (0.5, 0.7). Later, when testing,\n # use the saved string and constants to evaluate.\n if dist == \"training\":\n return optimize_constants(x, y, ind)\n\n else:\n # this string has been created during training\n phen = ind.phenotype_consec_consts\n c = ind.opt_consts\n # phen will refer to x (ie test_in), and possibly to c\n yhat = eval(phen)\n assert np.isrealobj(yhat)\n\n # let's always call the error function with the\n # true values first, the estimate second\n # print(\"shape of y, yhat=\",np.shape(y),np.shape(yhat))\n return params['ERROR_METRIC'](y, yhat)\n\n else:\n # phenotype won't refer to C\n # print(ind.phenotype)\n # print(eval(ind.phenotype))\n yhat = eval(ind.phenotype)\n assert np.isrealobj(yhat)\n\n # let's always call the error function with the true\n # values first, the estimate second\n\n\n if type(yhat) in [float,np.float64]:\n # this is the situation that the phenotype is one float.\n # Then to build an array\n\n # print(\"ind.phenotype==\",ind.phenotype)\n yhat=np.full(np.shape(y),yhat,dtype=float)\n\n elif yhat.size==1:\n # for the problem that yhat is ndarray but size is 1\n yhat=np.full(np.shape(y), yhat, dtype=float)\n\n # print(\"shape of y, yhat=\", np.shape(y), np.shape(yhat),\"type(y)=\",type(y),\"type(yha=)\",type(yhat),yhat,ind.phenotype)\n # for test\n\n y = y.copy(order='C')\n yhat = yhat.copy(order='C')\n\n # print(y,yhat)\n # print(\"fit=\",params['ERROR_METRIC'](y, yhat))\n\n return params['ERROR_METRIC'](y, yhat)\n"
]
| [
[
"numpy.isrealobj",
"numpy.seterr",
"numpy.shape"
]
]
|
AlistairChild/Percolate | [
"064ef1d16b6bc50f2ebab251c439b30386378235"
]
| [
"percolate/toolkit/Transpose_spectra.py"
]
| [
"\"\"\"Copyright (c) 2021 Alistair Child\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\"\"\"\n\n# standard\nimport numpy as np\n\n# created functions.\nfrom percolate.toolkit.find_array_equivalent import find_array_equivalent\n\n\ndef Transpose_spectra(energy, absorption, args):\n \"\"\"Transpose_spectra(energy, absorption, args) requires args.x_value_for_transpose and args.action\"\"\"\n loc_energy = np.array(energy)\n loc_absorption = np.array(absorption)\n\n if len(np.array(loc_absorption).shape) > 1:\n n_files = len(np.array(loc_absorption))\n else:\n n_files = 1\n\n transposed_spectra = []\n\n for i in range(n_files):\n\n if args.action == \"on\":\n transposed_spectra_i = (\n loc_absorption[i]\n - loc_absorption[i][\n find_array_equivalent(loc_energy[i], args.x_value_for_transpose)\n ]\n )\n\n else:\n transposed_spectra_i = loc_absorption[i]\n\n transposed_spectra.append(transposed_spectra_i)\n\n return transposed_spectra\n"
]
| [
[
"numpy.array"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.