repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
rahuls02/Image-Noise-Reduction
|
[
"e2fecf2c84f83de812cc2b9cb391ccec1b4b3dc6"
] |
[
"criteria/lpips/lpips.py"
] |
[
"import torch\nimport torch.nn as nn\n\nfrom criteria.lpips.networks import get_network, LinLayers\nfrom criteria.lpips.utils import get_state_dict\n\n\nclass LPIPS(nn.Module):\n r\"\"\"Creates a criterion that measures\n Learned Perceptual Image Patch Similarity (LPIPS).\n Arguments:\n net_type (str): the network type to compare the features:\n 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.\n version (str): the version of LPIPS. Default: 0.1.\n \"\"\"\n\n def __init__(self, net_type: str = \"alex\", version: str = \"0.1\"):\n\n assert version in [\"0.1\"], \"v0.1 is only supported now\"\n\n super(LPIPS, self).__init__()\n\n # pretrained network\n self.net = get_network(net_type).to(\"cuda\")\n\n # linear layers\n self.lin = LinLayers(self.net.n_channels_list).to(\"cuda\")\n self.lin.load_state_dict(get_state_dict(net_type, version))\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n feat_x, feat_y = self.net(x), self.net(y)\n\n diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]\n res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]\n\n return torch.sum(torch.cat(res, 0)) / x.shape[0]\n"
] |
[
[
"torch.cat"
]
] |
PloxKevin/Weak-Supervision
|
[
"79bd8690309ec161c5e0d0a5715dfa61f7baf786"
] |
[
"py_img_seg_eval/unit_tests.py"
] |
[
"#!/usr/bin/python\n\n'''\nMartin Kersner, [email protected]\n2015/11/30\n\nUnit tests for eval_segm.py.\n'''\n\nimport numpy as np\nimport eval_segm as es\nimport unittest\n\nclass pixel_accuracy_UnitTests(unittest.TestCase):\n '''\n Wrong inputs\n '''\n def test1dInput(self):\n mat = np.array([0])\n self.assertRaises(IndexError, es.pixel_accuracy, mat, mat)\n\n def testDiffDim(self):\n mat0 = np.array([[0,0], [0,0]])\n mat1 = np.array([[0,0,0], [0,0,0]])\n self.assertRaisesRegexp(es.EvalSegErr, \"DiffDim\", es.pixel_accuracy, mat0, mat1)\n\n '''\n Correct inputs\n '''\n def testOneClass(self):\n segm = np.array([[0,0], [0,0]])\n gt = np.array([[0,0], [0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, 1.0)\n\n def testTwoClasses0(self):\n segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, 0)\n\n def testTwoClasses1(self):\n segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (9.0)/(10.0))\n\n def testTwoClasses2(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (9.0+0.0)/(9.0+1.0))\n\n def testThreeClasses0(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,2,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (8.0+0.0+0.0)/(8.0+1.0+1.0))\n\n def testThreeClasses1(self):\n segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (8.0+0.0)/(9.0+1.0))\n\n def testFourClasses0(self):\n segm = np.array([[0,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (7.0+0.0)/(9.0+1.0))\n\n def testFourClasses1(self):\n segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (7.0+1.0)/(9.0+1.0))\n\n def testFiveClasses0(self):\n segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])\n gt = np.array([[1,0,3,0,0], [0,0,0,0,0]])\n\n res = es.pixel_accuracy(segm, gt)\n self.assertEqual(res, (5.0+1.0+1.0)/(8.0+1.0+1.0))\n\nclass mean_accuracy_UnitTests(unittest.TestCase):\n '''\n Wrong inputs\n '''\n def test1dInput(self):\n mat = np.array([0])\n self.assertRaises(IndexError, es.mean_accuracy, mat, mat)\n\n def testDiffDim(self):\n mat0 = np.array([[0,0], [0,0]])\n mat1 = np.array([[0,0,0], [0,0,0]])\n self.assertRaisesRegexp(es.EvalSegErr, \"DiffDim\", es.mean_accuracy, mat0, mat1)\n\n '''\n Correct inputs\n '''\n def testOneClass(self):\n segm = np.array([[0,0], [0,0]])\n gt = np.array([[0,0], [0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, 1.0)\n\n def testTwoClasses0(self):\n segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, 0)\n\n def testTwoClasses1(self):\n segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, 9.0/10.0)\n\n def testTwoClasses2(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, np.mean([9.0/9.0, 0.0/1.0]))\n\n def testThreeClasses0(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,2,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, np.mean([8.0/8.0, 0.0/1.0, 0.0/1.0]))\n\n def testThreeClasses1(self):\n segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, np.mean([8.0/9.0, 0.0/1.0]))\n\n def testFourClasses0(self):\n segm = np.array([[0,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, np.mean([7.0/9.0, 0.0/1.0]))\n\n def testFourClasses1(self):\n segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, np.mean([7.0/9.0, 1.0/1.0]))\n\n def testFiveClasses0(self):\n segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])\n gt = np.array([[1,0,3,0,0], [0,0,0,0,0]])\n\n res = es.mean_accuracy(segm, gt)\n self.assertEqual(res, np.mean([5.0/8.0, 1.0, 1.0]))\n\nclass mean_IU_UnitTests(unittest.TestCase):\n '''\n Wrong inputs\n '''\n def test1dInput(self):\n mat = np.array([0])\n self.assertRaises(IndexError, es.mean_IU, mat, mat)\n\n def testDiffDim(self):\n mat0 = np.array([[0,0], [0,0]])\n mat1 = np.array([[0,0,0], [0,0,0]])\n self.assertRaisesRegexp(es.EvalSegErr, \"DiffDim\", es.mean_IU, mat0, mat1)\n\n '''\n Correct inputs\n '''\n def testOneClass(self):\n segm = np.array([[0,0], [0,0]])\n gt = np.array([[0,0], [0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, 1.0)\n\n def testTwoClasses0(self):\n segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, 0)\n\n def testTwoClasses1(self):\n segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([0.9]))\n\n def testTwoClasses2(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([0.9, 0]))\n\n def testThreeClasses0(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,2,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([8.0/10.0, 0, 0]))\n\n def testThreeClasses1(self):\n segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([8.0/10.0, 0]))\n\n def testFourClasses0(self):\n segm = np.array([[0,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([7.0/10.0, 0]))\n\n def testFourClasses1(self):\n segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([7.0/9.0, 1]))\n\n def testFiveClasses0(self):\n segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])\n gt = np.array([[1,0,3,0,0], [0,0,0,0,0]])\n\n res = es.mean_IU(segm, gt)\n self.assertEqual(res, np.mean([5.0/8.0, 1, 1.0/2.0]))\n\nclass frequency_weighted_IU_UnitTests(unittest.TestCase):\n '''\n Wrong inputs\n '''\n def test1dInput(self):\n mat = np.array([0])\n self.assertRaises(IndexError, es.frequency_weighted_IU, mat, mat)\n\n def testDiffDim(self):\n mat0 = np.array([[0,0], [0,0]])\n mat1 = np.array([[0,0,0], [0,0,0]])\n self.assertRaisesRegexp(es.EvalSegErr, \"DiffDim\", es.frequency_weighted_IU, mat0, mat1)\n\n '''\n Correct inputs\n '''\n def testOneClass(self):\n segm = np.array([[0,0], [0,0]])\n gt = np.array([[0,0], [0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n self.assertEqual(res, 1.0)\n\n def testTwoClasses0(self):\n segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n self.assertEqual(res, 0)\n\n def testTwoClasses1(self):\n segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n self.assertEqual(res, (1.0/10.0)*(10.0*9.0/10.0))\n\n def testTwoClasses2(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n # Almost equal!\n self.assertAlmostEqual(res, (1.0/10.0)*((9.0*9.0/10.0)+(1.0*0.0/1.0))) \n\n def testThreeClasses0(self):\n segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,2,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n # Almost equal!\n self.assertAlmostEqual(res, (1.0/10.0)*((8.0*8.0/10.0)+(1.0*0.0/1.0)+(1.0*0.0/1.0)))\n\n def testThreeClasses1(self):\n segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n # Almost equal!\n self.assertAlmostEqual(res, (1.0/10.0)*((9.0*8.0/10.0)+(1.0*0.0/1.0)))\n\n def testFourClasses0(self):\n segm = np.array([[0,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n self.assertEqual(res, (1.0/10.0)*((9.0*7.0/10.0)+(1.0*0.0/1.0)))\n\n def testFourClasses1(self):\n segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])\n gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n self.assertEqual(res, (1.0/10.0)*((9.0*7.0/9.0)+(1.0*1.0/1.0)))\n\n def testFiveClasses0(self):\n segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])\n gt = np.array([[1,0,3,0,0], [0,0,0,0,0]])\n\n res = es.frequency_weighted_IU(segm, gt)\n self.assertEqual(res, (1.0/10.0)*((8.0*5.0/8.0)+(1.0*1.0/1.0)+(1.0*1.0/2.0)))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.mean"
]
] |
saturnaxis/CBP_stability
|
[
"435d5bdb3212d5696d5fbf7cc6737476a08c476c"
] |
[
"plot_figures/plot_Fig1_2.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom scipy.interpolate import griddata\nimport matplotlib.colors as colors\nfrom matplotlib import ticker\nimport matplotlib.cm as cm\nimport sys\nfrom matplotlib import rcParams\n\nrcParams.update({'font.size': 22})\n\nfig_num = int(sys.argv[1])\n\ncmap = cm.gnuplot_r\nvmin = -3.\nvmax = 0.\nmy_cmap=cm.get_cmap(cmap)\nnorm = colors.Normalize(vmin,vmax)\ncmmapable =cm.ScalarMappable(norm,my_cmap)\ncmmapable.set_array(range(0,1))\ncmap.set_under('gray')\ncmap.set_over('white')\n\nfs = 'x-large'\nwidth = 13.\naspect = 1.\ntscale = 1e5\n\nval = [0.0,0.1,0.3,0.5]\nhome = os.getcwd() + \"/\"\ndatalist = [f for f in os.listdir('Fig%i_data' % fig_num) if f.endswith('.txt')]\n\nf, axarr = plt.subplots(4,4)\nf.set_size_inches(aspect*width,width)\n\nfor dat in datalist:\n\n\tax_mu = val.index(float(dat[8:11]))\n\tax_ecc = val.index(float(dat[12:15]))\n\n\tax = axarr[ax_mu,ax_ecc]\n\n\tdata = np.genfromtxt(home+\"Fig1_data/\"+dat,delimiter = ',',comments = '#')\n\tunstab = np.where(data[:,-1]<tscale)[0]\n\tstab = np.where(data[:,-1]==tscale)[0]\n\n\tX = data[:,1]\n\tY = data[:,0]\n\tidx_cr = np.where(np.logical_and(X==90.,data[:,-1]==tscale))[0]\n\n\tZ = data[:,2]\n\tZ[unstab] = 1.5\n\ty_cr = []\n\tfor i in xrange(0,401):\n\t\tyi = 1.01 + i*0.01\n\t\trow_x = np.where(np.abs(Y-yi)<1e-6)[0]\n\t\tall_stab = np.where(data[row_x,-1]==tscale)[0]\n\t\tif len(all_stab)>180:\n\t\t\ty_cr.append(yi)\n\n\n\txi = np.linspace(0,360,181)\n\tyi = np.linspace(1.,5.,401)\n\tzi = griddata((X,Y),Z,(xi[None,:],yi[:,None]),method = 'nearest')\n\n\tbins =200 \n\n\tcs = ax.contourf(xi,yi,np.log10(zi),bins, cmap = cmap,vmin=vmin,vmax=vmax)#,locator=ticker.LogLocator() ,norm=colors.LogNorm())\n\n\tif len(y_cr)>0:\n\t\ta_crit = np.min(y_cr)\n\t\tax.axhline(a_crit,linestyle='-',color='c',lw=2)\t\n\t\n\tax.tick_params(axis='both', direction='out',length = 4.0, width = 4.0)\n\tax.set_xticks(np.arange(0,360+90,90))\n\tyticks = np.arange(1.,6.0,1.)\n\n\tax.set_yticks(yticks)\n\tax.set_ylim(1,5.)\n\tmu = val[ax_mu]\n\teb = val[ax_ecc]\n\tif mu == 0:\n\t\tax.text(1.0,0.9,\"(%1.3f,%1.1f)\" % (0.001,eb), color='k',fontsize='medium',horizontalalignment='right',weight='bold',transform=ax.transAxes)\n\telse:\n\t\tax.text(1.0,0.9,\"(%1.1f,%1.1f)\" % (mu,eb), color='k',fontsize='medium',horizontalalignment='right',weight='bold',transform=ax.transAxes)\n\tax.text(0.0,0.02,\"$a_{c}$ = %1.2f\" % a_crit, color='c',fontsize='small',horizontalalignment='left',weight='bold',transform=ax.transAxes)\n\tif ax_ecc > 0:\n\t\tax.set_yticklabels([])\n\tif ax_ecc == 0 and ax_mu>0:\n\t\tax.set_yticklabels([\"%i\" % i for i in xrange(1,5)])\n\tif ax_mu == 3 and ax_ecc<3:\n\t\tax.set_xticklabels([\"%i\" % (90*i) for i in xrange(0,4)])\n\n\n# Fine-tune figure; make subplots close to each other and hide x ticks for\n# all but bottom plot.\nf.subplots_adjust(hspace=0,wspace=0)\nplt.setp([a.get_xticklabels() for a in f.axes[:-4]], visible=False)\n\ncolor_label='$\\log_{10}[e_{max}]$'\ncax = f.add_axes([0.92,0.11,0.015,0.77])\ncbar = plt.colorbar(cmmapable,cax=cax,orientation='vertical')#,ticks=np.linspace(vmin,vmax,11)) #draw colorbar ,ticks =[0.0,0.2,0.4,0.6,0.8,1.0]\ncbar.set_label(color_label,fontsize=fs)\n\ncax.tick_params(axis='both', direction='out',length = 4.0, width = 4.0)\n\n\nf.text(0.5, 0.04, \"Mean Anomaly (deg.)\", ha='center',fontsize = fs)\nf.text(0.04, 0.5, \"$a_p/a_{bin}$\", va='center', rotation='vertical',fontsize = fs)\n\naxarr[0,3].text(1.0,1.02,\"$\\lambda_{bin}$ = %i$^\\circ$\" % (0), color='k',fontsize='small',horizontalalignment='right',weight='bold',transform=ax.transAxes)\n\nplt.savefig(\"Q18_Fig%i.png\" % fig_num, dpi = 300, bbox_inches = 'tight')\nplt.close()\n"
] |
[
[
"numpy.abs",
"numpy.linspace",
"numpy.min",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"matplotlib.pyplot.colorbar",
"numpy.log10",
"matplotlib.cm.ScalarMappable",
"matplotlib.rcParams.update",
"matplotlib.pyplot.close",
"matplotlib.cm.get_cmap",
"numpy.logical_and",
"scipy.interpolate.griddata",
"numpy.where"
]
] |
wengzehang/keypoint_humanoids
|
[
"0630037b2d8063146728670643a96b73632643e8"
] |
[
"models/kp_comp.py"
] |
[
"# Ref: https://github.com/hansen7/OcCo\n# TODO: Reimplement chamfer distance in Torch and add completion/topo loss back; decoder\n\nfrom models.pcn_util import PCNEncoder\nimport torch, torch.nn as nn, torch.nn.functional as F\n\nclass View(nn.Module):\n def __init__(self, shape):\n super(View, self).__init__()\n self.shape = shape\n\n def forward(self, x):\n return x.view(*self.shape)\n\n\n\nclass get_model(nn.Module):\n def __init__(self, **kwargs):\n super(get_model, self).__init__()\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.numKeypoint = kwargs['num_cp']\n # self.__dict__.update(kwargs) # to update args, num_coarse, grid_size, grid_scale\n\n ''' === encoder for extracting feature === '''\n self.feat = PCNEncoder(global_feat=True, channel=3)\n\n ''' === detection layer === '''\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, self.numKeypoint * 3)\n\n self.dp1 = nn.Dropout(p=0.3)\n self.bn1 = nn.BatchNorm1d(512)\n\n self.view = View((3, self.numKeypoint))\n\n ''' === completion layer === '''\n # batch normalisation will destroy limit the expression\n self.folding1 = nn.Sequential(\n nn.Linear(self.numKeypoint * 3, 1024),\n # nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Linear(1024, 1024),\n # nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Linear(1024, self.num_coarse * 3))\n\n self.folding2 = nn.Sequential(\n nn.Conv1d(1024+2+3, 512, 1),\n # nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Conv1d(512, 512, 1),\n # nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Conv1d(512, 3, 1))\n\n def forward(self, input_pc):\n x = self.feat(input_pc)\n x = F.relu(self.bn1(self.fc1(x)))\n x = self.dp1(x)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n x = x.view([-1, self.numKeypoint, 3])\n return x\n\n\n\n coarse = self.folding1(x)\n coarse = coarse.view(-1, self.num_coarse, 3)\n\n grid = self.build_grid(x.shape[0])\n grid_feat = grid.repeat(1, self.num_coarse, 1)\n\n point_feat = self.tile(self.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])\n point_feat = point_feat.view([-1, self.num_fine, 3])\n\n global_feat = self.tile(self.expand_dims(feature, 1), [1, self.num_fine, 1])\n feat = torch.cat([grid_feat, point_feat, global_feat], dim=2)\n\n center = self.tile(self.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])\n center = center.view([-1, self.num_fine, 3])\n\n fine = self.folding2(feat.transpose(2, 1)).transpose(2, 1) + center\n\n return coarse, fine\n\n\n\n return x_kp, x\n\n\nclass get_loss(nn.Module):\n def __init__(self):\n super(get_loss, self).__init__()\n\n def forward(self, pred, target):\n loss = F.mse_loss(pred, target)\n return loss\n\n\nif __name__ == '__main__':\n model = get_model().cuda()\n print(model)\n input_pc = torch.rand(7, 3, 1024).type(torch.float32).cuda()\n x = model(input_pc)"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.functional.mse_loss",
"torch.nn.Linear",
"torch.rand",
"torch.cuda.is_available",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] |
KentaKawamata/probreg
|
[
"ab29c01353f5ca490653172523d351cce26017c8"
] |
[
"probreg/transformation.py"
] |
[
"import abc\nimport six\nimport numpy as np\nimport open3d as o3\nfrom . import math_utils as mu\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Transformation():\n def __init__(self):\n pass\n\n def transform(self, points,\n array_type=o3.Vector3dVector):\n if isinstance(points, array_type):\n return array_type(self._transform(np.asarray(points)))\n return self._transform(points)\n\n @abc.abstractmethod\n def _transform(self, points):\n return points\n\n\nclass RigidTransformation(Transformation):\n def __init__(self, rot=np.identity(3),\n t=np.zeros(3), scale=1.0):\n super(RigidTransformation, self).__init__()\n self.rot = rot\n self.t = t\n self.scale = scale\n\n def _transform(self, points):\n return self.scale * np.dot(points, self.rot.T) + self.t\n\n def inverse(self):\n return RigidTransformation(self.rot.T, -np.dot(self.rot.T, self.t),\n 1.0 / self.scale)\n\n\nclass AffineTransformation(Transformation):\n def __init__(self, b=np.identity(3),\n t=np.zeros(3)):\n super(AffineTransformation, self).__init__()\n self.b = b\n self.t = t\n\n def _transform(self, points):\n return np.dot(points, self.b.T) + self.t\n\n\nclass NonRigidTransformation(Transformation):\n def __init__(self, w, points, beta=2.0):\n super(NonRigidTransformation, self).__init__()\n self.g = mu.rbf_kernel(points, points, beta)\n self.w = w\n\n def _transform(self, points):\n return points + np.dot(self.g, self.w)\n\n\nclass TPSTransformation(Transformation):\n \"\"\"Thin Plate Spline transformaion.\n \"\"\"\n def __init__(self, a, v, control_pts,\n kernel=mu.tps_kernel):\n super(TPSTransformation, self).__init__()\n self.a = a\n self.v = v\n self.control_pts = control_pts\n self._kernel = kernel\n\n def prepare(self, landmarks):\n control_pts = self.control_pts\n m, d = landmarks.shape\n n, _ = control_pts.shape\n pm = np.c_[np.ones((m, 1)), landmarks]\n pn = np.c_[np.ones((n, 1)), control_pts]\n u, _, _ = np.linalg.svd(pn)\n pp = u[:, d + 1:]\n kk = self._kernel(control_pts, control_pts)\n uu = self._kernel(landmarks, control_pts)\n basis = np.c_[pm, np.dot(uu, pp)]\n kernel = np.dot(pp.T, np.dot(kk, pp))\n return basis, kernel\n\n def transform_basis(self, basis):\n return np.dot(basis, np.r_[self.a, self.v])\n\n def _transform(self, points):\n basis, _ = self.prepare(points)\n return self.transform_basis(basis)"
] |
[
[
"numpy.dot",
"numpy.linalg.svd",
"numpy.asarray",
"numpy.ones",
"numpy.identity",
"numpy.zeros"
]
] |
Ayush8120/Improved-MR-DFS-PX4
|
[
"1f64db7b801cb97a2ccb26a7de95d1c92b0666f4"
] |
[
"ayush/src/uav_city_4.py"
] |
[
"#!/usr/bin/env python3\n\nimport rospy\nimport mavros\nfrom geometry_msgs.msg import PoseStamped\nfrom mavros_msgs.msg import *\nfrom mavros_msgs.srv import *\n\nimport pprint\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom ayush.second_step_on_vertex_visit import second_step_on_vertex_visit\nfrom ayush.initialize_graph import Robot,Vertex, build_graph, find_shortest_path\nfrom ayush.first_step_on_vertex_visit import Id,what_to_do_if_next_node_known,first_step_on_arriving_at_vertex\nfrom ayush.get_incidence_matrix import get_incidence_matrix\nfrom ayush.order_matrix import completed,out,unexplored\npp = pprint.PrettyPrinter(indent=8)\n\nspawn_location = [-24.1095,284.1490,8.63479,\n -22.5968,284.1330,8.63479,\n -21.0750,283.9410,8.63479,\n -24.1239,282.6820,8.63479,\n -23.0024,282.5940,8.63479,\n -22.1617,282.5870,8.63479,\n -21.0457,282.5580,8.63479,\n -24.1631,281.1710,8.63479,\n -22.5350,281.0980,8.63479,\n -20.9154,281.1750,8.63479\n ]\n# callback method for state sub\ncurrent_state = State() \noffb_set_mode = SetMode\nnew_pose = PoseStamped()\n\ndef state_cb(state):\n global current_state\n current_state = state\n\ndef position_cb(Pose):\n global new_pose\n new_pose = Pose\n\nlocal_pos_pub = rospy.Publisher('uav4/mavros/setpoint_position/local', PoseStamped, queue_size=1)\n\nrospy.Subscriber('uav4/mavros/state', State, state_cb)\nrospy.Subscriber('uav4/mavros/local_position/pose', PoseStamped, position_cb)\n\n#state_sub = rospy.Subscriber(mavros.get_topic('uav1','state'), State, state_cb)\narming_client = rospy.ServiceProxy('uav4/mavros/cmd/arming', mavros_msgs.srv.CommandBool)\nset_mode_client = rospy.ServiceProxy('uav4/mavros/set_mode', mavros_msgs.srv.SetMode)\n\npose = PoseStamped()\n#Leaf node cordinates with elevation\npose.pose.position.x = spawn_location[12] #CHANGE TODO # Hover over spawn location # personal\npose.pose.position.y = spawn_location[13]\npose.pose.position.z = spawn_location[14] + 5 \n\nthreshold = 0.1\n\n# ANIMATION FUNCTION\ndef func(num, dataSet, line, redDots):\n # NOTE: there is no .set_data() for 3 dim data...\n line.set_data(dataSet[0:2, :num]) \n line.set_3d_properties(dataSet[2, :num]) \n redDots.set_data(dataSet[0:2, :num]) \n redDots.set_3d_properties(dataSet[2, :num]) \n return line\n\ndef algorithm():\n #Topography \n K = 10\n J = 24\n count = 0\n vertex = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\"]\n edges = [\"AC\",\"BC\",\"CD\",\"DE\",\"EF\",\"EK\",\"FG\",\"GH\",\"HI\",\"HJ\",\"KL\",\"KO\",\"LM\",\"LN\",\"OP\",\"PQ\",\"PS\",\"QR\",\"ST\",\"SU\",\"UV\",\"VW\",\"VX\"]\n robo_vertex = [\"A\",\"B\",\"I\",\"J\",\"M\",\"N\",\"R\",\"T\",\"W\",\"X\"]\n XData = [2.50, 3.50,3,3,3,1.50,1.50,1.50,1,2,3.50,3,2.50,3.50,4.50,4.50,3.50,3.50,5,4.50,5.50,5.50,5,6]\n YData = [1,1,2,3,4,5,6,7,8,8,5,6,7,7,6,7,8,9,8,9,9,10,11,11] # Got from MATLAB\n [graph,edges_decomp] = build_graph(edges)\n global spawn_location\n G = nx.Graph()\n # --------------------------------------------------------------\n for i in range(J):\n G.add_node(chr(i+65))\n #print('LAMBA')\n\n for ed in edges_decomp:\n # print(*ed)\n G.add_edge(*ed)\n # nx.draw(G,with_labels = True, font_weight = 'bold')\n #--------------------------------------------------------------\n\n #plt.show\n #print(G.nodes)\n #print(G.edges)\n\n incidence_matrix = get_incidence_matrix(XData,YData,G)\n #pp.pprint(incidence_matrix)\n\n R = []\n for j in range(K):\n R.append(Robot(j,robo_vertex,incidence_matrix))\n\n #initializing of V : list of Vertex objects\n V = []\n for j in range(J):\n V.append(Vertex(vertex[j],edges,incidence_matrix))#asdf\n\n\n #print(\"The first mandatory push:\")\n #print('')\n\n\n for k in range(K):\n\n R[k].setpoint_list.append(spawn_location[3*k])\n R[k].setpoint_list.append(spawn_location[3*k + 1])\n R[k].setpoint_list.append(spawn_location[3*k +2] + k+1)\n #print(R[k].present_location)\n start = R[k].present_location\n #print(V[ord(R[k].present_location) - 65].neighbors[0])\n end = V[ord(R[k].present_location) - 65].neighbors[0]\n #print(-1*incidence_matrix[ord(start) - 65,ord(end)-65])\n top = np.array([-1*incidence_matrix[ord(start) - 65,ord(end)-65]])\n bottom = np.array([-1*incidence_matrix[ord(end) - 65,ord(start)-65]])\n col_vector = np.vstack((top,bottom))\n #Setpoint - 1 as the leaf node location while the height is kept constant\n R[k].setpoint_list.append(XData[ord(R[k].present_location) - 65]*25)\n R[k].setpoint_list.append(YData[ord(R[k].present_location) - 65]*25)\n R[k].setpoint_list.append(spawn_location[3*k +2] + k+1)\n \n #print(\"The {e} robot is currently at {f}\".format(e=k,f=R[k].present_location))\n #print(\"The next node chosen is {}\".format(V[ord(R[k].present_location) - 65].neighbors[0]))\n \n R[k].setpoint_list.append(XData[ord(V[ord(R[k].present_location) - 65].neighbors[0]) - 65]*25)\n R[k].setpoint_list.append(YData[ord(V[ord(R[k].present_location) - 65].neighbors[0]) - 65]*25)\n R[k].setpoint_list.append(spawn_location[3*k +2] + k+1)\n rospy.loginfo('Setpoint Addded!')\n id,R,V = what_to_do_if_next_node_known(R,k,V,1,R[k].present_location,V[ord(R[k].present_location) - 65].neighbors[0],incidence_matrix=incidence_matrix)\n #flying_function(XData[ord(V[ord(R[k].present_location) - 65].neighbors[0]) - 65],YData[ord(V[ord(R[k].present_location) - 65].neighbors[0]) - 65],k+1)\n R[k].next_edge_decided,count = second_step_on_vertex_visit(graph, V,R,k,count)\n\n #print('The next edge selected by - ' + str(k) + '- robot is' + str(R[k].next_edge_decided))\n R[k].setpoint_list.append(XData[ord(R[k].next_edge_decided.replace(R[k].present_location,'')) - 65]*25)\n R[k].setpoint_list.append(YData[ord(R[k].next_edge_decided.replace(R[k].present_location,'')) - 65]*25)\n R[k].setpoint_list.append(spawn_location[3*k +2] + k+1)\n rospy.loginfo('Setpoint Addded!')\n #print('')\n\n #rospy.sleep(6)\n\n #print(\"This is the loop part which continues till the declaration of completion\")\n while(count != K):\n for z in range(K):\n if R[z].count != 1:\n #print(\"{z} robot Travelling to the selected edge :{e}\".format(z = z , e = R[z].next_edge_decided) )\n if R[z].next_edge_decided !=0:\n id,R,V = what_to_do_if_next_node_known(R,z,V,2,R[z].present_location, R[z].next_edge_decided.replace(R[z].present_location,''),incidence_matrix = incidence_matrix)\n #flying_function(XData[ord(R[z].next_edge_decided.replace(R[z].present_location,'')) - 65], YData[ord(R[z].next_edge_decided.replace(R[z].present_location,'')) - 65], z+1) \n R[z].next_edge_decided,count = second_step_on_vertex_visit(graph, V,R,z,count)\n if R[z].next_edge_decided !=0:\n R[z].setpoint_list.append(XData[ord(R[z].next_edge_decided.replace(R[z].present_location,'')) - 65]*25)\n R[z].setpoint_list.append(YData[ord(R[z].next_edge_decided.replace(R[z].present_location,'')) - 65]*25)\n R[z].setpoint_list.append(spawn_location[3*z +2] + z+1)\n rospy.loginfo('Setpoint Addded!')\n #print('The next edge selected by : ' + str(z) + ' : robot is :' + str(R[z].next_edge_decided))\n #print('')\n for k in range(K):\n R[k].setpoint_list.append(spawn_location[3*k])\n R[k].setpoint_list.append(spawn_location[3*k + 1])\n R[k].setpoint_list.append(spawn_location[3*k + 2] +k+1)\n\n return R[0].setpoint_list,R[1].setpoint_list,R[2].setpoint_list, R[3].setpoint_list,R[4].setpoint_list,R[5].setpoint_list,R[6].setpoint_list,R[7].setpoint_list,R[8].setpoint_list,R[9].setpoint_list\n#still need to append the base station as last setpoint and spawn location as the first setpoint\ndef drone_reached(xdata, ydata, zdata):\n if abs(new_pose.pose.position.x - xdata) <= threshold and abs(new_pose.pose.position.y - ydata) <= threshold and abs(new_pose.pose.position.z - zdata) <= threshold:\n return True \n\ndef position_control():\n\n global spawn_location\n _,_,_,_, list_of_setpoints4,_,_,_,_,_ = algorithm()\n print(list_of_setpoints4)\n num_of_points = len(list_of_setpoints4)//3\n # print(list_of_setpoints1)\n rospy.init_node('offb_node4', anonymous=True)\n prev_state = current_state\n rate = rospy.Rate(20.0) # MUST be more then 2Hz\n\n dataSet = np.array(list_of_setpoints4)\n numDataPoints = len(list_of_setpoints4)//3\n dataSet = np.reshape(dataSet,(numDataPoints,3))\n dataSet = np.transpose(dataSet)\n print(dataSet)\n # GET SOME MATPLOTLIB OBJECTS\n fig = plt.figure()\n ax = Axes3D(fig)\n redDots = plt.plot(dataSet[0], dataSet[1], dataSet[2], lw=2, c='r', marker='o')[0] # For scatter plot\n # NOTE: Can't pass empty arrays into 3d version of plot()\n line = plt.plot(dataSet[0], dataSet[1], dataSet[2], lw=2, c='g')[0] # For line plot\n \n # AXES PROPERTIES]\n # ax.set_xlim3d([limit0, limit1])\n ax.set_xlabel('X(t)')\n ax.set_ylabel('Y(t)')\n ax.set_zlabel('Z(t)')\n ax.set_title('Trajectory of 4th UAV')\n \n # Creating the Animation object\n line_ani = animation.FuncAnimation(fig, func, frames=numDataPoints + 1, fargs=(dataSet,line,redDots), interval=150, blit=False)\n line_ani.save(r'4_th_UAV_animation.gif')\n # send a few setpoints before starting\n for i in range(100):\n local_pos_pub.publish(pose)\n rate.sleep()\n \n # wait for FCU connection\n while not current_state.connected:\n rate.sleep()\n # print(num_of_points)\n for i in range(num_of_points):\n print(i)\n last_request = rospy.get_rostime()\n if i ==2:\n ayushtrial = input()\n else:\n ayushtrial = 1\n #substract personal\n while not rospy.is_shutdown() and not drone_reached(list_of_setpoints4[3*i] - spawn_location[12],list_of_setpoints4[3*i+1] - spawn_location[13],list_of_setpoints4[3*i+2] - spawn_location[14]):\n now = rospy.get_rostime()\n if current_state.mode != \"OFFBOARD\" and (now - last_request > rospy.Duration(4.)):\n set_mode_client(base_mode=0, custom_mode=\"OFFBOARD\")\n last_request = now \n else:\n if not current_state.armed and (now - last_request > rospy.Duration(4.)):\n arming_client(True)\n last_request = now \n\n # older versions of PX4 always return success==True, so better to check Status instead\n if prev_state.armed != current_state.armed:\n rospy.loginfo(\"Vehicle armed: %r\" % current_state.armed)\n if prev_state.mode != current_state.mode: \n rospy.loginfo(\"Current mode: %s\" % current_state.mode)\n prev_state = current_state\n \n # new_x,new_y,new_z = \n # Update timestamp and publish pose \n #pose.header.stamp = rospy.Time.now()\n pose.pose.position.x = list_of_setpoints4[3*i] - spawn_location[12] #initial x #personal\n pose.pose.position.y = list_of_setpoints4[3*i+1] - spawn_location[13]#initial y\n pose.pose.position.z = list_of_setpoints4[3*i+2] - spawn_location[14]#initial_height\n local_pos_pub.publish(pose)\n #print('here!!')\n # plt.scatter(new_pose.pose.position.x/50 , new_pose.pose.position.y/50, marker = '.', color='orange')\n # plt.draw()\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n #print('D')\n position_control()\n # plt.savefig(\"4_city_uav_route_yo_final_run.png\")\n except rospy.ROSInterruptException:\n pass"
] |
[
[
"numpy.reshape",
"matplotlib.pyplot.plot",
"matplotlib.animation.FuncAnimation",
"numpy.transpose",
"numpy.array",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] |
tgolubev/Drift-Diffusion_Python
|
[
"645650341c900410d59e61c150aab2516cabceba"
] |
[
"photogeneration.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 19, 2018\n\n@author: Timofey Golubev\n\nThis just contains the function for reading photogeneration rate from a generation rate data file.\n\"\"\"\n\nimport numpy as np\n\ndef get_photogeneration(params):\n '''\n Reads photogeneration rate from an input file.\n Inputs: \n params: Params object which contains several necessary parameters such as the name of generation\n rate file as well as the photogeneration scaling to use. The photogeneration scaling\n is determined by finding a value which will result in the correct short-circuit current.\n '''\n \n try:\n gen_file = open(params.gen_rate_file_name, \"r\")\n except:\n print(f\"Unable to open file{params.gen_rate_file_name}\")\n \n photogen_rate = np.loadtxt(params.gen_rate_file_name) \n #photogen_rate = np.ones(params.num_cell+1)\n \n photogen_rate = params.Photogen_scaling * photogen_rate/np.max(photogen_rate)\n \n gen_file.close()\n \n return photogen_rate\n \n \n \n "
] |
[
[
"numpy.max",
"numpy.loadtxt"
]
] |
sry002/avocado
|
[
"d89f4161e3236b65e57f0d8c7f4454ee44501fa3"
] |
[
"avocado/model.py"
] |
[
"# models.py\n# Contact: Jacob Schreiber <[email protected]>\n# William Noble <[email protected]>\n\n\"\"\"\nAvocado is deep tensor factorization model for learning a latent representation\nof the human epigenome. This file has functions for building a deep tensor\nfactorization model.\n\"\"\"\n\nfrom .io import data_generator\nfrom .io import permuted_data_generator\nfrom .io import sequential_data_generator\n\n\nimport json\nimport numpy\nimport keras\n\nfrom keras.layers import Input, Embedding, Dense\nfrom keras.layers import Multiply, Dot, Flatten, concatenate\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\ndef build_model(n_celltypes, n_celltype_factors, n_assays, n_assay_factors,\n\tn_genomic_positions, n_25bp_factors, n_250bp_factors, n_5kbp_factors,\n\tn_layers, n_nodes, freeze_celltypes=False, freeze_assays=False,\n\tfreeze_genome_25bp=False, freeze_genome_250bp=False, \n\tfreeze_genome_5kbp=False, freeze_network=False):\n\t\"\"\"This function builds a multi-scale deep tensor factorization model.\"\"\"\n\n\tcelltype_input = Input(shape=(1,), name=\"celltype_input\")\n\tcelltype_embedding = Embedding(n_celltypes, n_celltype_factors, \n\t\tinput_length=1, name=\"celltype_embedding\")\n\tcelltype_embedding.trainable = not freeze_celltypes\n\tcelltype = Flatten()(celltype_embedding(celltype_input))\n\n\tassay_input = Input(shape=(1,), name=\"assay_input\")\n\tassay_embedding = Embedding(n_assays, n_assay_factors, \n\t\tinput_length=1, name=\"assay_embedding\")\n\tassay_embedding.trainable = not freeze_assays\n\tassay = Flatten()(assay_embedding(assay_input))\n\n\tgenome_25bp_input = Input(shape=(1,), name=\"genome_25bp_input\")\n\tgenome_25bp_embedding = Embedding(n_genomic_positions, n_25bp_factors, \n\t\tinput_length=1, name=\"genome_25bp_embedding\")\n\tgenome_25bp_embedding.trainable = not freeze_genome_25bp\n\tgenome_25bp = Flatten()(genome_25bp_embedding(genome_25bp_input))\n\n\tgenome_250bp_input = Input(shape=(1,), name=\"genome_250bp_input\")\n\tgenome_250bp_embedding = Embedding(int(n_genomic_positions / 10) + 1,\n\t\tn_250bp_factors, input_length=1, name=\"genome_250bp_embedding\")\n\tgenome_250bp_embedding.trainable = not freeze_genome_250bp\n\tgenome_250bp = Flatten()(genome_250bp_embedding(genome_250bp_input))\n\n\tgenome_5kbp_input = Input(shape=(1,), name=\"genome_5kbp_input\")\n\tgenome_5kbp_embedding = Embedding(int(n_genomic_positions / 200) + 1, \n\t\tn_5kbp_factors, input_length=1, name=\"genome_5kbp_embedding\")\n\tgenome_5kbp_embedding.trainable = not freeze_genome_5kbp\n\tgenome_5kbp = Flatten()(genome_5kbp_embedding(genome_5kbp_input))\n\n\tlayers = [celltype, assay, genome_25bp, genome_250bp, genome_5kbp]\n\tinputs = (celltype_input, assay_input, genome_25bp_input, \n\t\tgenome_250bp_input, genome_5kbp_input)\n\n\tx = concatenate(layers)\n\tfor i in range(n_layers):\n\t\tlayer = Dense(n_nodes, activation='relu', name=\"dense_{}\".format(i))\n\t\tlayer.trainable = not freeze_network\n\t\tx = layer(x)\n\n\tlayer = Dense(1, name=\"y_pred\")\n\tlayer.trainable = not freeze_network\n\ty = layer(x)\n\n\tmodel = Model(inputs=inputs, outputs=y)\n\tmodel.compile(optimizer='adam', loss='mse', metrics=['mse'])\n\treturn model\n\nclass Avocado(object):\n\t\"\"\"An Avocado multi-scale deep tensor factorization model.\n\n\tThe Avocado model is a multi-scale deep tensor factorization model. It is\n\tmulti-scale because it represents the genome axis using three different\n\tresolutions---25 bp, 250 bp and 5 kbp. It is deep because it replaces the\n\tdot product component of most linear factorization approaches with a deep\n\tneural network. The tensor factors and the neural network weights are\n\ttrained jointly to impute the values in the tensor that it is provided.\n\n\tIn this case Avocado is trained on epigenomic data whose dimensions are\n\thuman cell type, epigenomic assay, and genomic coordinate. The trained\n\tmodel can impute epigenomic assays that have not yet been performed, and\n\tthe learned factor values can themselves be used to represent genomic\n\tpositions more compactly than the full set of epigenomic measurements\n\tcould.\n\n\tThe default parameters are those used in the manuscript entitled \n\t\"Multi-scale deep tensor factorization learns a latent representation\n\tof the human epigenome\". \n\n\tParameters\n\t----------\n\tcelltypes : list\n\t\tThe list of cell type names that will be modeled\n\n\tassays : list\n\t\tThe list of assays that will be modeled\n\n\tn_celltype_factors : int, optional\n\t\tThe number of factors to use to represent each cell type. Default is 32.\n\n\tn_assay_factors : int, optional\n\t\tThe number of factors to use to represent each assay. Default is 256.\n\n\tn_genomic_positions : int, optional\n\t\tThe number of genomic positions to model. This is typically either\n\t\tthe size of the pilot regions when performing initial training or\n\t\tthe size of the chromosome when fitting the genomic latent factors.\n\t\tDefault is 1126469, the size of the pilot regions in chr1-22.\n\n\tn_25bp_factors : int, optional\n\t\tThe number of factors to use to represent the genome at 25 bp\n\t\tresolution. Default is 25.\n\n\tn_250bp_factors : int, optional\n\t\tThe number of factors to use to represent the genome at 250 bp\n\t\tresolution. Default is 40.\n\n\tn_5kbp_factors : int, optional\n\t\tThe number of factors to use to represent the genome at 5 kbp\n\t\tresolution. Default is 45.\n\n\tn_layers : int, optional\n\t\tThe number of hidden layers in the neural model. Default is 2.\n\n\tn_nodes : int, optional\n\t\tThe number of nodes per layer. Default is 2048.\n\n\tbatch_size : int, optional\n\t\tThe size of each batch to use in training. Defaut is 40000.\n\n\tfreeze_celltypes : bool, optional\n\t\tWhether to freeze the training of the cell type embedding. Default\n\t\tis False.\n\n\tfreeze_assays : bool, optional\n\t\tWhether to freeze the training of the assay embeddings. Default\n\t\tis False.\n\n\tfreeze_genome_25bp : bool, optional\n\t\tWhether to freeze the training of the 25 bp genome factors. Default\n\t\tis False.\n\n\tfreeze_genome_250bp : bool, optional\n\t\tWhether to freeze the training of the 250 bp genome factors. Default\n\t\tis False.\n\n\tfreeze_genome_5kbp : bool, optional\n\t\tWhether to freeze the training of the 5 kbp genome factors. Default\n\t\tis False.\n\n\tfreeze_network : bool, optional\n\t\tWhether to freeze the training of the neural network. Default\n\t\tis False.\n\n\tExample\n\t-------\n\t>>> import numpy, itertools\n\t>>> from avocado import Avocado\n\t>>>\n\t>>> celltypes = ['E003', 'E017', 'E065', 'E116', 'E117']\n\t>>> assays = ['H3K4me3', 'H3K27me3', 'H3K36me3', 'H3K9me3', 'H3K4me1']\n\t>>> \n\t>>> data = {}\n\t>>> for celltype, assay in itertools.product(celltypes, assays):\n\t>>> filename = 'data/{}.{}.pilot.arcsinh.npz'.format(celltype, assay)\n\t>>> data[(celltype, assay)] = numpy.load(filename)['arr_0']\n\t>>>\n\t>>> model = Avocado(celltypes, assays)\n\t>>> model.fit(data)\n\t>>>\n\t>>> track = model.predict(\"E065\", \"H3K27me3\")\n\t\"\"\"\n\n\tdef __init__(self, celltypes, assays, n_celltype_factors=32, \n\t\tn_assay_factors=256, n_genomic_positions=1126469,\n\t\tn_25bp_factors=25, n_250bp_factors=40, n_5kbp_factors=45, n_layers=2,\n\t\tn_nodes=2048, batch_size=40000, freeze_celltypes=False, \n\t\tfreeze_assays=False, freeze_genome_25bp=False, freeze_genome_250bp=False,\n\t\tfreeze_genome_5kbp=False, freeze_network=False):\n\n\t\tself.celltypes = list(celltypes)\n\t\tself.assays = list(assays)\n\t\tself.experiments = []\n\n\t\tself.n_celltypes = len(celltypes)\n\t\tself.n_assays = len(assays)\n\n\t\tself.batch_size = batch_size\n\n\t\tself.n_celltype_factors = n_celltype_factors\n\t\tself.n_celltype_factors = n_celltype_factors\n\t\tself.n_assay_factors = n_assay_factors\n\t\tself.n_genomic_positions = n_genomic_positions\n\t\tself.n_25bp_factors = n_25bp_factors\n\t\tself.n_250bp_factors = n_250bp_factors\n\t\tself.n_5kbp_factors = n_5kbp_factors\n\t\tself.n_layers = n_layers\n\t\tself.n_nodes = n_nodes\n\n\t\tself.freeze_celltypes = freeze_celltypes\n\t\tself.freeze_assays = freeze_assays\n\t\tself.freeze_genome_25bp = freeze_genome_25bp\n\t\tself.freeze_genome_250bp = freeze_genome_250bp\n\t\tself.freeze_genome_5kbp = freeze_genome_5kbp\n\t\tself.freeze_network = freeze_network\n\n\t\tself.model = build_model(n_celltypes=self.n_celltypes,\n\t\t\t\t\t\t\t\t n_celltype_factors=n_celltype_factors,\n\t\t\t\t\t\t\t\t n_assays=self.n_assays,\n\t\t\t\t\t\t\t\t n_assay_factors=n_assay_factors,\n\t\t\t\t\t\t\t\t n_genomic_positions=n_genomic_positions,\n\t\t\t\t\t\t\t\t n_25bp_factors=n_25bp_factors,\n\t\t\t\t\t\t\t\t n_250bp_factors=n_250bp_factors,\n\t\t\t\t\t\t\t\t n_5kbp_factors=n_5kbp_factors,\n\t\t\t\t\t\t\t\t n_layers=n_layers,\n\t\t\t\t\t\t\t\t n_nodes=n_nodes,\n\t\t\t\t\t\t\t\t freeze_celltypes=freeze_celltypes,\n\t\t\t\t\t\t\t\t freeze_assays=freeze_assays,\n\t\t\t\t\t\t\t\t freeze_genome_25bp=freeze_genome_25bp,\n\t\t\t\t\t\t\t\t freeze_genome_250bp=freeze_genome_250bp,\n\t\t\t\t\t\t\t\t freeze_genome_5kbp=freeze_genome_5kbp,\n\t\t\t\t\t\t\t\t freeze_network=freeze_network)\n\n\t@property\n\tdef celltype_embedding(self):\n\t\t\"\"\"Returns the learned cell type embedding as a numpy array.\n\n\t\tParameters\n\t\t----------\n\t\tNone\n\n\t\tReturns\n\t\t-------\n\t\tcelltype_embedding : numpy.ndarray, shape=(n_celltypes, n_factors)\n\t\t\tThe learned embedding corresponding to the input name \n\t\t\t'celltype_embedding'. The cell types are ordered according to the\n\t\t\torder defined in self.celltypes.\n\t\t\"\"\"\n\n\t\tfor layer in self.model.layers:\n\t\t\tif layer.name == 'celltype_embedding':\n\t\t\t\treturn layer.get_weights()[0]\n\n\t\traise ValueError(\"No layer in model named 'celltype_embedding'.\")\n\n\t@property\n\tdef assay_embedding(self):\n\t\t\"\"\"Returns the learned assay embedding as a numpy array.\n\n\t\tParameters\n\t\t----------\n\t\tNone\n\n\t\tReturns\n\t\t-------\n\t\tassay_embedding : numpy.ndarray, shape=(n_assays, n_factors)\n\t\t\tThe learned embedding corresponding to the input name\n\t\t\t'assay_embedding'. The assays are ordered according to the order \n\t\t\tdefined in self.assays.\n\t\t\"\"\"\n\n\t\tfor layer in self.model.layers:\n\t\t\tif layer.name == 'assay_embedding':\n\t\t\t\treturn layer.get_weights()[0]\n\n\t\traise ValueError(\"No layer in model named 'assay_embedding'.\")\n\n\n\t@property\n\tdef genome_embedding(self):\n\t\t\"\"\"Returns the learned genomic embedding as a numpy array.\n\n\t\tThis function will concatenate together the three resolutions of\n\t\tgenomic factors, such that the first columns correspond to the\n\t\t25 bp factors, the next columns correspond to the 250 bp factors,\n\t\tand the final columns correspond to the 5 kbp factors. The factors\n\t\tthat span more than 25 bp will be repeated across several successive\n\t\tpositions \n\n\t\tParameters\n\t\t----------\n\t\tNone\n\n\t\tReturns\n\t\t-------\n\t\tgenome_embedding : numpy.ndarray, shape=(n_genomic_positions, \n\t\t\tn_25bp_factors + n_250bp_factors + n_5kbp_factors)\n\t\t\tThe learned embedding corresponding to the input names\n\t\t\tgenome_25bp_embedding, genome_250bp_embedding, and \n\t\t\tgenome_5kbp_embedding.\n\t\t\"\"\"\n\n\t\tn_25bp = self.n_25bp_factors\n\t\tn_250bp = self.n_250bp_factors\n\t\tn_5kbp = self.n_5kbp_factors\n\n\t\tgenome_embedding = numpy.empty((self.n_genomic_positions, \n\t\t\tn_25bp + n_250bp + n_5kbp))\n\n\t\tfor layer in self.model.layers:\n\t\t\tif layer.name == 'genome_25bp_embedding':\n\t\t\t\tgenome_25bp_embedding = layer.get_weights()[0]\n\t\t\telif layer.name == 'genome_250bp_embedding':\n\t\t\t\tgenome_250bp_embedding = layer.get_weights()[0]\n\t\t\telif layer.name == 'genome_5kbp_embedding':\n\t\t\t\tgenome_5kbp_embedding = layer.get_weights()[0]\n\n\t\tn1 = n_25bp\n\t\tn2 = n_25bp + n_250bp\n\n\t\tfor i in range(self.n_genomic_positions):\n\t\t\tgenome_embedding[i, :n1] = genome_25bp_embedding[i]\n\t\t\tgenome_embedding[i, n1:n2] = genome_250bp_embedding[i // 10]\n\t\t\tgenome_embedding[i, n2:] = genome_5kbp_embedding[i // 200]\n\n\t\treturn genome_embedding\n\n\tdef summary(self):\n\t\t\"\"\"A wrapper method for the keras summary method.\"\"\"\n\n\t\tself.model.summary()\n\n\tdef fit(self, X_train, X_valid=None, n_epochs=200, epoch_size=120,\n\t\tverbose=1, callbacks=None, sampling='sequential', input_generator=None, \n\t\t**kwargs):\n\t\t\"\"\"Fit the model to the given epigenomic tracks.\n\n\t\tPass in a dictionary of training data and an optional dictionary of\n\t\tvalidation data. The keys to this dictionary are a tuple of the format\n\t\t(celltype, assay) and the values are the corresponding track in the\n\t\tform of a numpy array. The tracks can either be in the form of an array\n\t\tthat is in memory or as a memory map.\n\n\t\tParameters\n\t\t----------\n\t\tX_train : dict\n\t\t\tA dictionary of training data values, where the keys are a tuple of\n\t\t\t(celltype, assay) and the values are a track.\n\n\t\tX_valid : dict or None, optional\n\t\t\tA dictionary of validation data values that are used to calculate\n\t\t\tvalidation set MSE during the training process. If None, validation\n\t\t\tset statistics are not calculated during the training process.\n\t\t\tDefault is None.\n\n\t\tn_epochs : int, optional\n\t\t\tThe number of epochs to train on before ending training. Default is 120.\n\n\t\tepoch_size : int, optional\n\t\t\tThe number of batches per epoch. Default is 200.\n\n\t\tverbose: int, optional\n\t\t\tThe verbosity level of training. Must be one of 0, 1, or 2, where 0\n\t\t\tmeans silent, 1 means progress bar, and 2 means use only one line\n\t\t\tper epoch. Default is 1.\n\n\t\tcallbacks : list or None, optional\n\t\t\tA list of keras callback instances to be called during training.\n\n\t\tsampling : str, optional\n\t\t\tThe sampling strategy to use for the generators. Must be one of the\n\t\t\tfollowing:\n\n\t\t\t\t'sequential' : Sequentially scans through the genome indexes,\n\t\t\t\t\tselecting a cell type and assay randomly at each position\n\t\t\t\t'permuted' : Sequentially scans through a permuted version\n\t\t\t\t\tof the genome indexes, such that each epoch sees every\n\t\t\t\t\tgenomic index once, but each batch sees nearly random \n\t\t\t\t\tindexes\n\t\t\t\t'random' : Randomly selects genomic positions. No guarantee\n\t\t\t\t\ton the number of times each position has been seen. \n\n\t\t\tDefault is 'sequential'.\n\n\t\tinput_generator : generator or None, optional\n\t\t\tA custom data generator object to be used in the place of the\n\t\t\tdefault generator. This will only change the training generator,\n\t\t\tnot the validation generator. Default is None.\n\n\t\t**kwargs : optional\n\t\t\tAny other keyword arguments to be passed into the `fit_generator`\n\t\t\tmethod.\n\n\t\tReturns\n\t\t-------\n\t\thistory : keras.History.history\n\t\t\tThe keras history object that records training loss values and\n\t\t\tmetric values.\n\t\t\"\"\"\n\n\t\tif not isinstance(X_train, dict):\n\t\t\traise ValueError(\"X_train must be a dictionary where the keys\" \\\n\t\t\t\t\" are (celltype, assay) tuples and the values are the track\" \\\n\t\t\t\t\" corresponding to that pair.\")\n\n\t\tif X_valid is not None and not isinstance(X_valid, dict):\n\t\t\traise ValueError(\"X_valid must be a dictionary where the keys\" \\\n\t\t\t\t\" are (celltype, assay) tuples and the values are the track\" \\\n\t\t\t\t\" corresponding to that pair.\")\t\n\n\t\tfor (celltype, assay), track in X_train.items():\n\t\t\tif celltype not in self.celltypes:\n\t\t\t\traise ValueError(\"Celltype {} appears in the training data \" \\\n\t\t\t\t\t\"but not in the list of cell types provided to the \" \\\n\t\t\t\t\t\"model.\".format(celltype))\n\n\t\t\tif assay not in self.assays:\n\t\t\t\traise ValueError(\"Assay {} appears in the training data \" \\\n\t\t\t\t\t\"but not in the list of assays provided to the \" \\\n\t\t\t\t\t\"model.\".format(assay))\n\n\t\t\tif len(track) != self.n_genomic_positions:\n\t\t\t\traise ValueError(\"The track corresponding to {} {} is of \" \\\n\t\t\t\t\t\"size {} while the model encodes {} genomic \" \\\n\t\t\t\t\t\"positions\".format(celltype, assay, len(track), \n\t\t\t\t\t\tself.n_genomic_positions))\n\n\t\tif X_valid is not None:\n\t\t\tfor (celltype, assay), track in X_valid.items():\n\t\t\t\tif celltype not in self.celltypes:\n\t\t\t\t\traise ValueError(\"Celltype {} appears in the validation \" \\\n\t\t\t\t\t\t\"data but not in the list of cell types provided to \" \\\n\t\t\t\t\t\t\"the model.\".format(celltype))\n\n\t\t\t\tif assay not in self.assays:\n\t\t\t\t\traise ValueError(\"Assay {} appears in the validation \" \\\n\t\t\t\t\t\t\"data but not in the list of assays provided to the \" \\\n\t\t\t\t\t\t\"model.\".format(assay))\n\n\t\t\t\tif len(track) != self.n_genomic_positions:\n\t\t\t\t\traise ValueError(\"The track corresponding to {} {} is of \" \\\n\t\t\t\t\t\t\"size {} while the model encodes {} genomic \" \\\n\t\t\t\t\t\t\"positions\".format(celltype, assay, len(track), \n\t\t\t\t\t\t\tself.n_genomic_positions))\n\n\t\tif input_generator is not None:\n\t\t\tX_train_gen = input_generator\n\t\telif sampling == 'sequential':\n\t\t\tX_train_gen = sequential_data_generator(self.celltypes, \n\t\t\t\tself.assays, X_train, self.n_genomic_positions, \n\t\t\t\tself.batch_size)\n\t\telif sampling == 'permuted':\n\t\t\tX_train_gen = permuted_data_generator(self.celltypes, \n\t\t\t\tself.assays, X_train, self.n_genomic_positions, \n\t\t\t\tself.batch_size)\n\t\telif sampling == 'random':\n\t\t\tX_train_gen = permuted_data_generator(self.celltypes, \n\t\t\t\tself.assays, X_train, self.n_genomic_positions, \n\t\t\t\tself.batch_size)\t\t\t\n\n\t\tif X_valid is not None:\n\t\t\tX_valid_gen = data_generator(self.celltypes, self.assays, \n\t\t\t\tX_valid, self.n_genomic_positions, self.batch_size)\n\n\t\t\thistory = self.model.fit_generator(X_train_gen, epoch_size, n_epochs, \n\t\t\t\tworkers=1, validation_data=X_valid_gen, \n\t\t\t\tvalidation_steps=30, verbose=verbose, callbacks=callbacks, \n\t\t\t\t**kwargs)\n\t\telse:\n\t\t\thistory = self.model.fit_generator(X_train_gen, epoch_size, n_epochs, \n\t\t\t\tworkers=1, verbose=verbose, \n\t\t\t\tcallbacks=callbacks, **kwargs)\n\n\t\tself.experiments = list(X_train.keys())\n\t\treturn history\n\n\tdef fit_celltypes(self, X_train, X_valid=None, n_epochs=200, epoch_size=120,\n\t\tverbose=1, callbacks=None, **kwargs):\n\t\t\"\"\"Add a new cell type(s) to an otherwise frozen model.\n\n\t\tThis method will add a new cell type to the cell type embedding after\n\t\tfreezing all of the other parameters in the model, including weights\n\t\tand the other cell type positions. Functionally it will train a new\n\t\tcell type embedding and return a new model whose cell type embedding\n\t\tis the concatenation of the old cell type embedding and the new one.\n\n\t\tPass in a dictionary of training data and an optional dictionary of\n\t\tvalidation data. The keys to this dictionary are a tuple of the format\n\t\t(celltype, assay) and the values are the corresponding track in the\n\t\tform of a numpy array. The tracks can either be in the form of an array\n\t\tthat is in memory or as a memory map. The celltypes provided should not\n\t\tappear in the model.celltypes attribute but the assays should exclusively\n\t\tappear in the model.assays attribute.\n\n\t\tParameters\n\t\t----------\n\t\tX_train : dict\n\t\t\tA dictionary of training data values, where the keys are a tuple of\n\t\t\t(celltype, assay) and the values are a track.\n\n\t\tX_valid : dict or None, optional\n\t\t\tA dictionary of validation data values that are used to calculate\n\t\t\tvalidation set MSE during the training process. If None, validation\n\t\t\tset statistics are not calculated during the training process.\n\t\t\tDefault is None.\n\n\t\tn_epochs : int, optional\n\t\t\tThe number of epochs to train on before ending training. Default is 120.\n\n\t\tepoch_size : int, optional\n\t\t\tThe number of batches per epoch. Default is 200.\n\n\t\tverbose: int, optional\n\t\t\tThe verbosity level of training. Must be one of 0, 1, or 2, where 0\n\t\t\tmeans silent, 1 means progress bar, and 2 means use only one line\n\t\t\tper epoch.\n\n\t\tcallbacks : list or None, optional\n\t\t\tA list of keras callback instances to be called during training. \n\n\t\t**kwargs : optional\n\t\t\tAny other keyword arguments to be passed into the `fit_generator`\n\t\t\tmethod.\n\n\t\tReturns\n\t\t-------\n\t\thistory : keras.History.history\n\t\t\tThe keras history object that records training loss values and\n\t\t\tmetric values.\n\t\t\"\"\"\n\n\t\tif not isinstance(X_train, dict):\n\t\t\traise ValueError(\"X_train must be a dictionary where the keys\" \\\n\t\t\t\t\" are (celltype, assay) tuples and the values are the track\" \\\n\t\t\t\t\" corresponding to that pair.\")\n\n\t\tif X_valid is not None and not isinstance(X_valid, dict):\n\t\t\traise ValueError(\"X_valid must be a dictionary where the keys\" \\\n\t\t\t\t\" are (celltype, assay) tuples and the values are the track\" \\\n\t\t\t\t\" corresponding to that pair.\")\t\n\n\t\tfor (celltype, assay), track in X_train.items():\n\t\t\tif celltype in self.celltypes:\n\t\t\t\traise ValueError(\"Celltype {} appears in the training data \" \\\n\t\t\t\t\t\"and also in the list of cell types already in the \" \\\n\t\t\t\t\t\"model.\".format(celltype))\n\n\t\t\tif assay not in self.assays:\n\t\t\t\traise ValueError(\"Assay {} appears in the training data \" \\\n\t\t\t\t\t\"but not in the list of assays provided to the \" \\\n\t\t\t\t\t\"model.\".format(assay))\n\n\t\t\tif len(track) != self.n_genomic_positions:\n\t\t\t\traise ValueError(\"The track corresponding to {} {} is of \" \\\n\t\t\t\t\t\"size {} while the model encodes {} genomic \" \\\n\t\t\t\t\t\"positions\".format(celltype, assay, len(track), \n\t\t\t\t\t\tself.n_genomic_positions))\n\n\t\tif X_valid is not None:\n\t\t\tfor (celltype, assay), track in X_valid.items():\n\t\t\t\tif celltype in self.celltypes:\n\t\t\t\t\traise ValueError(\"Celltype {} appears in the validation \" \\\n\t\t\t\t\t\t\"data and also in the list of cell types already in \" \\\n\t\t\t\t\t\t\"the model.\".format(celltype))\n\n\t\t\t\tif assay not in self.assays:\n\t\t\t\t\traise ValueError(\"Assay {} appears in the training data \" \\\n\t\t\t\t\t\t\"but not in the list of assays provided to the \" \\\n\t\t\t\t\t\t\"model.\".format(assay))\n\n\t\t\t\tif len(track) != self.n_genomic_positions:\n\t\t\t\t\traise ValueError(\"The track corresponding to {} {} is of \" \\\n\t\t\t\t\t\t\"size {} while the model encodes {} genomic \" \\\n\t\t\t\t\t\t\"positions\".format(celltype, assay, len(track), \n\t\t\t\t\t\t\tself.n_genomic_positions))\n\n\t\tnew_celltypes = list(numpy.unique([ct for ct, _ in X_train.keys()]))\n\n\t\tmodel = build_model(n_celltypes=len(new_celltypes),\n\t\t\t\t\t\t\tn_celltype_factors=self.n_celltype_factors,\n\t\t\t\t\t\t\tn_assays=self.n_assays,\n\t\t\t\t\t\t\tn_assay_factors=self.n_assay_factors,\n\t\t\t\t\t\t\tn_genomic_positions=self.n_genomic_positions,\n\t\t\t\t\t\t\tn_25bp_factors=self.n_25bp_factors,\n\t\t\t\t\t\t\tn_250bp_factors=self.n_250bp_factors,\n\t\t\t\t\t\t\tn_5kbp_factors=self.n_5kbp_factors,\n\t\t\t\t\t\t\tn_layers=self.n_layers,\n\t\t\t\t\t\t\tn_nodes=self.n_nodes,\n\t\t\t\t\t\t\tfreeze_celltypes=False,\n\t\t\t\t\t\t\tfreeze_assays=True,\n\t\t\t\t\t\t\tfreeze_genome_25bp=True,\n\t\t\t\t\t\t\tfreeze_genome_250bp=True,\n\t\t\t\t\t\t\tfreeze_genome_5kbp=True,\n\t\t\t\t\t\t\tfreeze_network=True)\n\n\t\tfor old_layer, new_layer in zip(self.model.layers, model.layers):\n\t\t\tif 'input' in old_layer.name:\n\t\t\t\tcontinue\n\t\t\tif old_layer.name == 'celltype_embedding':\n\t\t\t\tcontinue\n\n\t\t\tnew_layer.set_weights(old_layer.get_weights())\n\n\n\t\tX_train_gen = sequential_data_generator(new_celltypes, self.assays, \n\t\t\tX_train, self.n_genomic_positions, self.batch_size)\n\n\t\tif X_valid is not None:\n\t\t\tX_valid_gen = data_generator(new_celltypes, self.assays, \n\t\t\t\tX_valid, self.n_genomic_positions, self.batch_size)\n\n\t\t\thistory = model.fit_generator(X_train_gen, epoch_size, n_epochs, \n\t\t\t\tworkers=1, validation_data=X_valid_gen, \n\t\t\t\tvalidation_steps=30, verbose=verbose, callbacks=callbacks, \n\t\t\t\t**kwargs)\n\t\telse:\n\t\t\thistory = model.fit_generator(X_train_gen, epoch_size, n_epochs, \n\t\t\t\tworkers=1, verbose=verbose, \n\t\t\t\tcallbacks=callbacks, **kwargs)\n\n\t\tfor layer in self.model.layers:\n\t\t\tif layer.name == 'celltype_embedding':\n\t\t\t\tcelltype_embedding = layer.get_weights()[0]\n\t\t\t\tbreak\n\n\t\tfor layer in model.layers:\n\t\t\tif layer.name == 'celltype_embedding':\n\t\t\t\tnew_celltype_embedding = layer.get_weights()[0]\n\t\t\t\tbreak\n\n\t\tcelltype_embedding = numpy.concatenate([celltype_embedding, \n\t\t\tnew_celltype_embedding]) \n\n\t\tself.celltypes.extend(new_celltypes)\n\t\tself.n_celltypes = len(self.celltypes)\n\n\t\tmodel = build_model(n_celltypes=self.n_celltypes,\n\t\t\t\t\t\t\tn_celltype_factors=self.n_celltype_factors,\n\t\t\t\t\t\t\tn_assays=self.n_assays,\n\t\t\t\t\t\t\tn_assay_factors=self.n_assay_factors,\n\t\t\t\t\t\t\tn_genomic_positions=self.n_genomic_positions,\n\t\t\t\t\t\t\tn_25bp_factors=self.n_25bp_factors,\n\t\t\t\t\t\t\tn_250bp_factors=self.n_250bp_factors,\n\t\t\t\t\t\t\tn_5kbp_factors=self.n_5kbp_factors,\n\t\t\t\t\t\t\tn_layers=self.n_layers,\n\t\t\t\t\t\t\tn_nodes=self.n_nodes,\n\t\t\t\t\t\t\tfreeze_celltypes=self.freeze_celltypes,\n\t\t\t\t\t\t\tfreeze_assays=self.freeze_assays,\n\t\t\t\t\t\t\tfreeze_genome_25bp=self.freeze_genome_25bp,\n\t\t\t\t\t\t\tfreeze_genome_250bp=self.freeze_genome_250bp,\n\t\t\t\t\t\t\tfreeze_genome_5kbp=self.freeze_genome_5kbp,\n\t\t\t\t\t\t\tfreeze_network=self.freeze_network)\n\n\t\tfor old_layer, new_layer in zip(self.model.layers, model.layers):\n\t\t\tif 'input' in old_layer.name:\n\t\t\t\tcontinue\n\t\t\tif old_layer.name == 'celltype_embedding':\n\t\t\t\tnew_layer.set_weights([celltype_embedding])\n\t\t\telse:\n\t\t\t\tnew_layer.set_weights(old_layer.get_weights())\n\n\t\tmodel.experiments = self.experiments + list(X_train.keys())\n\t\tself.model = model\n\t\treturn history\n\n\tdef fit_assays(self, X_train, X_valid=None, n_epochs=200, epoch_size=120,\n\t\tverbose=1, callbacks=None, **kwargs):\n\t\t\"\"\"Add a new assay(s) to an otherwise frozen model.\n\n\t\tThis method will add a new assay to the assay embedding after\n\t\tfreezing all of the other parameters in the model, including weights\n\t\tand the other assay positions. Functionally it will train a new\n\t\tassay embedding and return a new model whose assay embedding\n\t\tis the concatenation of the old assay embedding and the new one.\n\n\t\tPass in a dictionary of training data and an optional dictionary of\n\t\tvalidation data. The keys to this dictionary are a tuple of the format\n\t\t(celltype, assay) and the values are the corresponding track in the\n\t\tform of a numpy array. The tracks can either be in the form of an array\n\t\tthat is in memory or as a memory map. The assays provided should not\n\t\tappear in the model.assays attribute, but the cell types should appear\n\t\tin the model.celltypes attribute.\n\n\t\tParameters\n\t\t----------\n\t\tX_train : dict\n\t\t\tA dictionary of training data values, where the keys are a tuple of\n\t\t\t(celltype, assay) and the values are a track.\n\n\t\tX_valid : dict or None, optional\n\t\t\tA dictionary of validation data values that are used to calculate\n\t\t\tvalidation set MSE during the training process. If None, validation\n\t\t\tset statistics are not calculated during the training process.\n\t\t\tDefault is None.\n\n\t\tn_epochs : int, optional\n\t\t\tThe number of epochs to train on before ending training. Default is 120.\n\n\t\tepoch_size : int, optional\n\t\t\tThe number of batches per epoch. Default is 200.\n\n\t\tverbose: int, optional\n\t\t\tThe verbosity level of training. Must be one of 0, 1, or 2, where 0\n\t\t\tmeans silent, 1 means progress bar, and 2 means use only one line\n\t\t\tper epoch.\n\n\t\tcallbacks : list or None, optional\n\t\t\tA list of keras callback instances to be called during training. \n\n\t\t**kwargs : optional\n\t\t\tAny other keyword arguments to be passed into the `fit_generator`\n\t\t\tmethod.\n\n\t\tReturns\n\t\t-------\n\t\thistory : keras.History.history\n\t\t\tThe keras history object that records training loss values and\n\t\t\tmetric values.\n\t\t\"\"\"\n\n\t\tif not isinstance(X_train, dict):\n\t\t\traise ValueError(\"X_train must be a dictionary where the keys\" \\\n\t\t\t\t\" are (celltype, assay) tuples and the values are the track\" \\\n\t\t\t\t\" corresponding to that pair.\")\n\n\t\tif X_valid is not None and not isinstance(X_valid, dict):\n\t\t\traise ValueError(\"X_valid must be a dictionary where the keys\" \\\n\t\t\t\t\" are (celltype, assay) tuples and the values are the track\" \\\n\t\t\t\t\" corresponding to that pair.\")\t\n\n\t\tfor (celltype, assay), track in X_train.items():\n\t\t\tif celltype not in self.celltypes:\n\t\t\t\traise ValueError(\"Celltype {} appears in the training data \" \\\n\t\t\t\t\t\"but not in the list of cell types already in the \" \\\n\t\t\t\t\t\"model.\".format(celltype))\n\n\t\t\tif assay in self.assays:\n\t\t\t\traise ValueError(\"Assay {} appears in the training data \" \\\n\t\t\t\t\t\"and also in the list of assays already in the \" \\\n\t\t\t\t\t\"model.\".format(assay))\n\n\t\t\tif len(track) != self.n_genomic_positions:\n\t\t\t\traise ValueError(\"The track corresponding to {} {} is of \" \\\n\t\t\t\t\t\"size {} while the model encodes {} genomic \" \\\n\t\t\t\t\t\"positions\".format(celltype, assay, len(track), \n\t\t\t\t\t\tself.n_genomic_positions))\n\n\t\tif X_valid is not None:\n\t\t\tfor (celltype, assay), track in X_valid.items():\n\t\t\t\tif celltype not in self.celltypes:\n\t\t\t\t\traise ValueError(\"Celltype {} appears in the validation \" \\\n\t\t\t\t\t\t\"data but not in the list of cell types already in \" \\\n\t\t\t\t\t\t\"the model.\".format(celltype))\n\n\t\t\t\tif assay in self.assays:\n\t\t\t\t\traise ValueError(\"Assay {} appears in the training data \" \\\n\t\t\t\t\t\t\"and also in the list of assays already in the \" \\\n\t\t\t\t\t\t\"model.\".format(assay))\n\n\t\t\t\tif len(track) != self.n_genomic_positions:\n\t\t\t\t\traise ValueError(\"The track corresponding to {} {} is of \" \\\n\t\t\t\t\t\t\"size {} while the model encodes {} genomic \" \\\n\t\t\t\t\t\t\"positions\".format(celltype, assay, len(track), \n\t\t\t\t\t\t\tself.n_genomic_positions))\n\n\t\tnew_assays = list(numpy.unique([assay for _, assay in X_train.keys()]))\n\n\t\tmodel = build_model(n_celltypes=self.n_celltypes,\n\t\t\t\t\t\t\tn_celltype_factors=self.n_celltype_factors,\n\t\t\t\t\t\t\tn_assays=len(new_assays),\n\t\t\t\t\t\t\tn_assay_factors=self.n_assay_factors,\n\t\t\t\t\t\t\tn_genomic_positions=self.n_genomic_positions,\n\t\t\t\t\t\t\tn_25bp_factors=self.n_25bp_factors,\n\t\t\t\t\t\t\tn_250bp_factors=self.n_250bp_factors,\n\t\t\t\t\t\t\tn_5kbp_factors=self.n_5kbp_factors,\n\t\t\t\t\t\t\tn_layers=self.n_layers,\n\t\t\t\t\t\t\tn_nodes=self.n_nodes,\n\t\t\t\t\t\t\tfreeze_celltypes=True,\n\t\t\t\t\t\t\tfreeze_assays=False,\n\t\t\t\t\t\t\tfreeze_genome_25bp=True,\n\t\t\t\t\t\t\tfreeze_genome_250bp=True,\n\t\t\t\t\t\t\tfreeze_genome_5kbp=True,\n\t\t\t\t\t\t\tfreeze_network=True)\n\n\t\tfor old_layer, new_layer in zip(self.model.layers, model.layers):\n\t\t\tif 'input' in old_layer.name:\n\t\t\t\tcontinue\n\t\t\tif old_layer.name == 'assay_embedding':\n\t\t\t\tcontinue\n\n\t\t\tnew_layer.set_weights(old_layer.get_weights())\n\n\n\t\tX_train_gen = sequential_data_generator(self.celltypes, new_assays, \n\t\t\tX_train, self.n_genomic_positions, self.batch_size)\n\n\t\tif X_valid is not None:\n\t\t\tX_valid_gen = data_generator(self.celltypes, new_assays, \n\t\t\t\tX_valid, self.n_genomic_positions, self.batch_size)\n\n\t\t\thistory = model.fit_generator(X_train_gen, epoch_size, n_epochs, \n\t\t\t\tworkers=1, validation_data=X_valid_gen, \n\t\t\t\tvalidation_steps=30, verbose=verbose, callbacks=callbacks, \n\t\t\t\t**kwargs)\n\t\telse:\n\t\t\thistory = model.fit_generator(X_train_gen, epoch_size, n_epochs, \n\t\t\t\tworkers=1, verbose=verbose, \n\t\t\t\tcallbacks=callbacks, **kwargs)\n\n\t\tfor layer in self.model.layers:\n\t\t\tif layer.name == 'assay_embedding':\n\t\t\t\tassay_embedding = layer.get_weights()[0]\n\t\t\t\tbreak\n\n\t\tfor layer in model.layers:\n\t\t\tif layer.name == 'assay_embedding':\n\t\t\t\tnew_assay_embedding = layer.get_weights()[0]\n\t\t\t\tbreak\n\n\t\tassay_embedding = numpy.concatenate([assay_embedding, \n\t\t\tnew_assay_embedding]) \n\n\t\tself.assays.extend(new_assays)\n\t\tself.n_assays = len(self.assays)\n\n\t\tmodel = build_model(n_celltypes=self.n_celltypes,\n\t\t\t\t\t\t\tn_celltype_factors=self.n_celltype_factors,\n\t\t\t\t\t\t\tn_assays=self.n_assays,\n\t\t\t\t\t\t\tn_assay_factors=self.n_assay_factors,\n\t\t\t\t\t\t\tn_genomic_positions=self.n_genomic_positions,\n\t\t\t\t\t\t\tn_25bp_factors=self.n_25bp_factors,\n\t\t\t\t\t\t\tn_250bp_factors=self.n_250bp_factors,\n\t\t\t\t\t\t\tn_5kbp_factors=self.n_5kbp_factors,\n\t\t\t\t\t\t\tn_layers=self.n_layers,\n\t\t\t\t\t\t\tn_nodes=self.n_nodes,\n\t\t\t\t\t\t\tfreeze_celltypes=self.freeze_celltypes,\n\t\t\t\t\t\t\tfreeze_assays=self.freeze_assays,\n\t\t\t\t\t\t\tfreeze_genome_25bp=self.freeze_genome_25bp,\n\t\t\t\t\t\t\tfreeze_genome_250bp=self.freeze_genome_250bp,\n\t\t\t\t\t\t\tfreeze_genome_5kbp=self.freeze_genome_5kbp,\n\t\t\t\t\t\t\tfreeze_network=self.freeze_network)\n\n\t\tfor old_layer, new_layer in zip(self.model.layers, model.layers):\n\t\t\tif 'input' in old_layer.name:\n\t\t\t\tcontinue\n\t\t\tif old_layer.name == 'assay_embedding':\n\t\t\t\tnew_layer.set_weights([assay_embedding])\n\t\t\telse:\n\t\t\t\tnew_layer.set_weights(old_layer.get_weights())\n\n\t\tmodel.experiments = self.experiments + list(X_train.keys())\n\t\tself.model = model\n\t\treturn history\n\n\tdef predict(self, celltype, assay, start=0, end=None, verbose=0):\n\t\t\"\"\"Predict a track of epigenomic data.\n\n\t\tThis will predict a track of epigenomic data, resulting in one signal\n\t\tvalue per genomic position modeled. Users pass in the cell type and\n\t\tthe assay that they wish to impute and receive the track of data.\n\n\t\tParameters\n\t\t----------\n\t\tcelltype : str\n\t\t\tThe cell type (aka biosample) to be imputed. Must be one of the\n\t\t\telements from the list of cell types passed in upon model\n\t\t\tinitialization.\n\n\t\tassay : str\n\t\t\tThe assay to be imputed. Must be one of the elements from the list\n\t\t\tof assays passed in upon model initialization.\n\n\t\tstart : int, optional\n\t\t\tThe start position to begin the imputation at. By default this is 0,\n\t\t\tcorresponding to the start of the track. The value is which 25 bp\n\t\t\tbin to begin prediction at, not the raw genomic coordinate.\n\n\t\tend : int or None, optional\n\t\t\tThe end position to stop making imputations at, exclusive. By default\n\t\t\tthis is None, meaning to end at `self.n_genomic_positions.`. \n\n\t\tverbose : int, optional\n\t\t\tThe verbosity level of the prediction. Must be 0 or 1.\n\n\t\tReturns\n\t\t-------\n\t\ttrack : numpy.ndarray\n\t\t\tA track of epigenomic signal value predictions for the specified\n\t\t\tcell type and assay for the considered genomic positions.\n\t\t\"\"\"\n\n\t\tif end is not None and end <= start:\n\t\t\traise ValueError(\"When given, the end coordinate must be greater\" \\\n\t\t\t\t\" than the start coordinate.\")\n\n\t\tif end is None:\n\t\t\tend = self.n_genomic_positions\n\n\t\tcelltype_idx = self.celltypes.index(celltype)\n\t\tassay_idx = self.assays.index(assay)\n\n\t\tcelltype_idxs = numpy.ones(end-start) * celltype_idx\n\t\tassay_idxs = numpy.ones(end-start) * assay_idx\n\n\t\tgenomic_25bp_idxs = numpy.arange(start, end)\n\t\tgenomic_250bp_idxs = numpy.arange(start, end) // 10\n\t\tgenomic_5kbp_idxs = numpy.arange(start, end) // 200\n\n\t\tX = {\n\t\t\t'celltype_input': celltype_idxs, \n\t\t\t'assay_input': assay_idxs, \n\t\t\t'genome_25bp_input': genomic_25bp_idxs, \n\t\t\t'genome_250bp_input': genomic_250bp_idxs,\n\t\t\t'genome_5kbp_input': genomic_5kbp_idxs\n\t\t}\n\t\t\n\t\ttrack = self.model.predict(X, batch_size=self.batch_size, \n\t\t\tverbose=verbose)[:,0]\n\t\t\n\t\treturn track\n\n\tdef get_params(self):\n\t\tparams = []\n\t\tfor layer in model.layers:\n\t\t\tparams.append(layers.get_weghts()[0])\n\n\tdef save(self, name=\"avocado\", separators=(',', ' : '), indent=4):\n\t\t\"\"\"Serialize the model to disk.\n\n\t\tThis function produces two files. The first is a json file that has the\n\t\tmodel hyperparameters associated with it. The second is a h5 file that\n\t\tcontains the architecture of the neural network model, the weights, and\n\t\tthe optimizer.\n\n\t\tParameters\n\t\t----------\n\t\tname : str, optional\n\t\t\tThe name to use for the json and the h5 file that are stored.\n\n\t\tseparators : tuple, optional\n\t\t\tThe separators to use in the resulting JSON object.\n\n\t\tindent : int, optional\n\t\t\tThe number of spaces to use in the indent of the JSON.\n\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t\"\"\"\n\n\t\td = {\n\t\t\t'celltypes': self.celltypes,\n\t\t\t'assays': self.assays,\n\t\t\t'experiments': self.experiments,\n\t\t\t'n_celltype_factors': self.n_celltype_factors,\n\t\t\t'n_assay_factors': self.n_assay_factors,\n\t\t\t'n_genomic_positions': self.n_genomic_positions,\n\t\t\t'n_25bp_factors': self.n_25bp_factors,\n\t\t\t'n_250bp_factors': self.n_250bp_factors,\n\t\t\t'n_5kbp_factors': self.n_5kbp_factors,\n\t\t\t'n_layers': self.n_layers,\n\t\t\t'n_nodes': self.n_nodes,\n\t\t\t'batch_size': self.batch_size\n\t\t}\n\n\t\td = json.dumps(d, separators=separators, indent=indent)\n\n\t\twith open(\"{}.json\".format(name), \"w\") as outfile:\n\t\t\toutfile.write(d)\n\n\t\tself.model.save(\"{}.h5\".format(name))\n\n\tdef load_weights(self, name, verbose=0):\n\t\t\"\"\"Load serialized weights on a layer-by-layer case.\n\n\t\tLoad the weights of a pre-saved model on a layer-by-layer case. This\n\t\tmethod will iterate through the layers of the serialized model and\n\t\tthis model jointly and set the weights in this model to that of the\n\t\tserialized model should the weight matrices be of the same size. Should\n\t\tthey not be of the same size it will not modify the current weight\n\t\tmatrix. \n\n\t\tA primary use of this function should be after an initial model has been\n\t\ttrained on the Pilot regions and now one is fitting a model to each of\n\t\tthe chromosomes. The size of the genome factors will differ but the other\n\t\tcomponents will remain the same. Correspondingly, the identically sized\n\t\tweight matrices are those that should be held constant while the differing\n\t\tsize weight matrices should differ.\n\n\t\tParameters\n\t\t----------\n\t\tname : str\n\t\t\tThe suffix of the name of the weights file.\n\n\t\tverbose : int, optional\n\t\t\tThe verbosity level when loading weights. 0 means silent, 1 means\n\t\t\tnotify when a weight matrix has been set, 2 means notify what\n\t\t\taction has been taken on each layer.\n\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t\"\"\"\n\n\t\tmodel = keras.models.load_model(\"{}.h5\".format(name))\n\n\t\tfor i, (self_layer, layer) in enumerate(zip(self.model.layers, model.layers)):\n\t\t\tw = layer.get_weights()\n\t\t\tw0 = self_layer.get_weights()\n\t\t\tname = self_layer.name\n\n\t\t\tif len(w) == 0:\n\t\t\t\tif verbose == 2:\n\t\t\t\t\tprint(\"{} has no weights to set\".format(name))\n\t\t\t\t\n\t\t\t\tcontinue\n\n\t\t\tif w[0].shape != w0[0].shape:\n\t\t\t\tif verbose == 2:\n\t\t\t\t\tprint(\"{} is of different size and not set\".format(name))\n\n\t\t\t\tcontinue\n\n\t\t\tself_layer.set_weights(w)\n\t\t\tif verbose > 0:\n\t\t\t\tprint(\"{} has been set from serialized model\".format(name))\n\n\t@classmethod\n\tdef load(self, name, freeze_celltypes=False, freeze_assays=False,\n\t\tfreeze_genome_25bp=False, freeze_genome_250bp=False, \n\t\tfreeze_genome_5kbp=False, freeze_network=False):\n\t\t\"\"\"Load a model that has been serialized to disk.\n\n\t\tThe keras model that is saved to disk does not contain any of the\n\t\twrapper information \n\n\t\tParameters\n\t\t----------\n\t\tname : str\n\t\t\tThe name of the file to load. There must be both a .json and a\n\t\t\t.h5 file with this suffix. For example, if \"Avocado\" is passed in,\n\t\t\tthere must be both a \"Avocado.json\" and a \"Avocado.h5\" file to\n\t\t\tbe loaded in.\n\n\t\tfreeze_celltypes : bool, optional\n\t\t\tWhether to freeze the training of the cell type embedding. Default\n\t\t\tis False.\n\n\t\tfreeze_assays : bool, optional\n\t\t\tWhether to freeze the training of the assay embeddings. Default\n\t\t\tis False.\n\n\t\tfreeze_genome_25bp : bool, optional\n\t\t\tWhether to freeze the training of the 25 bp genome factors. Default\n\t\t\tis False.\n\n\t\tfreeze_genome_250bp : bool, optional\n\t\t\tWhether to freeze the training of the 250 bp genome factors. Default\n\t\t\tis False.\n\n\t\tfreeze_genome_5kbp : bool, optional\n\t\t\tWhether to freeze the training of the 5 kbp genome factors. Default\n\t\t\tis False.\n\n\t\tfreeze_network : bool, optional\n\t\t\tWhether to freeze the training of the neural network. Default\n\t\t\tis False.\n\n\t\tReturns\n\t\t-------\n\t\tmodel : Avocado\n\t\t\tAn Avocado model.\n\t\t\"\"\"\n\n\t\twith open(\"{}.json\".format(name), \"r\") as infile:\n\t\t\td = json.load(infile)\n\n\t\tif 'experiments' in d:\n\t\t\texperiments = d['experiments']\n\t\t\tdel d['experiments']\n\t\telse:\n\t\t\texperiments = []\n\n\t\tmodel = Avocado(freeze_celltypes=freeze_celltypes,\n\t\t\t\t\t\tfreeze_assays=freeze_assays,\n\t\t\t\t\t\tfreeze_genome_25bp=freeze_genome_25bp,\n\t\t\t\t\t\tfreeze_genome_250bp=freeze_genome_250bp,\n\t\t\t\t\t\tfreeze_genome_5kbp=freeze_genome_5kbp,\n\t\t\t\t\t\tfreeze_network=freeze_network,\n\t\t\t\t\t\t**d)\n\n\t\tmodel.experiments = experiments\n\t\tmodel.model = keras.models.load_model(\"{}.h5\".format(name))\n\t\treturn model\n"
] |
[
[
"numpy.concatenate",
"numpy.arange",
"numpy.empty",
"numpy.ones"
]
] |
matthewygf/incubator-tvm
|
[
"348144cb8b0485adca37aead0dfef9269cd2300d"
] |
[
"tests/python/frontend/onnx/test_forward.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport math\nimport onnx\nfrom onnx import helper, TensorProto, mapping\nimport torch\nimport torchvision\nimport tvm.topi.testing\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib import graph_runtime\nimport scipy\nimport tvm.testing\n\n\ndef get_input_data_shape_dict(graph_def, input_data):\n if isinstance(input_data, list):\n input_names = {}\n shape_dict = {}\n for i, _ in enumerate(input_data):\n input_names[i] = graph_def.graph.input[i].name\n shape_dict[input_names[i]] = input_data[i].shape\n else:\n input_names = graph_def.graph.input[0].name\n shape_dict = {input_names: input_data.shape}\n\n return input_names, shape_dict\n\n\ndef get_tvm_output_with_vm(graph_def, input_data, target, ctx, opset=None):\n \"\"\" Generic function to execute and get tvm output with vm executor\"\"\"\n\n _, shape_dict = get_input_data_shape_dict(graph_def, input_data)\n\n mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)\n\n ex = relay.create_executor(\"vm\", mod=mod, ctx=ctx, target=target)\n result = ex.evaluate()(*input_data)\n if isinstance(result, tvm.runtime.NDArray):\n return result.asnumpy()\n return [r.asnumpy() for r in result]\n\n\ndef get_tvm_output(\n graph_def, input_data, target, ctx, output_shape=None, output_dtype=\"float32\", opset=None\n):\n \"\"\" Generic function to execute and get tvm output\"\"\"\n target = \"llvm\"\n\n input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)\n\n mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)\n\n with tvm.transform.PassContext(opt_level=1):\n graph, lib, params = relay.build(mod, target, params=params)\n\n ctx = tvm.cpu(0)\n m = graph_runtime.create(graph, lib, ctx)\n # set inputs\n if isinstance(input_data, list):\n for i, e in enumerate(input_names):\n # Its possible for some onnx inputs to not be needed in the tvm\n # module, confirm its present before setting.\n try:\n m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype)))\n except:\n continue\n else:\n m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))\n\n m.set_input(**params)\n # execute\n m.run()\n # get outputs\n if isinstance(output_shape, list) and isinstance(output_dtype, list):\n tvm_output_list = []\n for i, _ in enumerate(output_shape):\n tvm_output = m.get_output(i)\n tvm_output_list.append(tvm_output.asnumpy())\n return tvm_output_list\n else:\n tvm_output = m.get_output(0)\n return tvm_output.asnumpy()\n\n\ndef get_onnxruntime_output(model, inputs, dtype=\"float32\"):\n import onnxruntime.backend\n\n rep = onnxruntime.backend.prepare(model, \"CPU\")\n if isinstance(inputs, list) and len(inputs) > 1:\n return rep.run(inputs)\n elif isinstance(inputs, list) and len(inputs) == 1:\n inp = inputs[0]\n else:\n inp = inputs\n return rep.run(inp.astype(dtype))[0]\n\n\ndef verify_with_ort_with_inputs(\n model,\n inputs,\n out_shape=None,\n targets=None,\n use_vm=False,\n opset=None,\n dtype=\"float32\",\n rtol=1e-5,\n atol=1e-5,\n):\n def flatten(out):\n if isinstance(out, list) and len(out) == 1:\n out = out[0]\n if isinstance(out, np.ndarray):\n return out.flatten()\n return out\n\n ort_out = get_onnxruntime_output(model, inputs, dtype)\n\n if targets is None:\n targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n\n for target in targets:\n ctx = tvm.context(target, 0)\n\n if use_vm:\n tvm_out = get_tvm_output_with_vm(model, inputs, target, ctx, opset=opset)\n else:\n tvm_out = get_tvm_output(model, inputs, target, ctx, out_shape, dtype, opset=opset)\n\n tvm.testing.assert_allclose(flatten(ort_out), flatten(tvm_out), rtol=rtol, atol=atol)\n\n\ndef verify_with_ort(\n model,\n input_shapes,\n out_shape=None,\n targets=None,\n use_vm=False,\n opset=None,\n dtype=\"float32\",\n rtol=1e-5,\n atol=1e-5,\n):\n inputs = [np.random.uniform(size=ishape).astype(dtype) for ishape in input_shapes]\n verify_with_ort_with_inputs(\n model,\n inputs,\n out_shape=out_shape,\n targets=targets,\n use_vm=use_vm,\n opset=opset,\n dtype=dtype,\n rtol=rtol,\n atol=atol,\n )\n\n\ndef make_constant_node(name, data_type, dims, vals):\n return helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[name],\n value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),\n )\n\n\[email protected]_gpu\ndef test_reshape():\n in_shape = (4, 3, 3, 4)\n ref_shape = (6, 2, 4, 3)\n\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n reshape_node = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out\"])\n\n graph = helper.make_graph(\n [ref_node, reshape_node],\n \"reshape_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"reshape_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"int32\")\n tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, \"float32\")\n tvm.testing.assert_allclose(ref_shape, tvm_out.shape)\n\n\[email protected]_gpu\ndef test_expand():\n def _test_expand(name, data, shape, ref_data):\n shape_array = np.array(shape)\n shape_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"shape\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=shape_array.shape,\n vals=shape_array.flatten().astype(\"int32\"),\n ),\n )\n expand_node = helper.make_node(\"Expand\", [\"in\", \"shape\"], [\"out\"])\n\n graph = helper.make_graph(\n [shape_node, expand_node],\n \"expand_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(data.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_data.shape))],\n )\n\n model = helper.make_model(graph, producer_name=name)\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, data, target, ctx, ref_data.shape, \"float32\")\n tvm.testing.assert_allclose(ref_data, tvm_out)\n\n in_shape = (3, 1)\n shape = (3, 4)\n data = np.random.uniform(size=in_shape).astype(np.float32)\n ref_data = np.tile(data, 4)\n _test_expand(\"expand_with_dim_unchanged_test\", data, shape, ref_data)\n\n in_shape = (3, 1)\n shape = (2, 1, 6)\n data = np.random.uniform(size=in_shape).astype(np.float32)\n ref_data = data * np.ones(shape, dtype=np.float32)\n _test_expand(\"expand_with_dim_changed_test\", data, shape, ref_data)\n\n\ndef verify_depth_to_space(inshape, outshape, mode, blockSize):\n node = onnx.helper.make_node(\"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=blockSize)\n\n graph = helper.make_graph(\n [node],\n \"depth_to_space_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=\"depth_to_space_test\")\n\n verify_with_ort(model, [inshape], outshape)\n\n\[email protected]_gpu\ndef test_depth_to_space():\n # current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.\n # TO-DO, we can add mode arguement to test CRD mode and DCR mode\n # in the future when we update to a newer onnx version.\n verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode=\"CRD\", blockSize=2)\n\n\ndef verify_space_to_depth(inshape, outshape, blockSize):\n node = onnx.helper.make_node(\"SpaceToDepth\", inputs=[\"x\"], outputs=[\"y\"], blocksize=blockSize)\n\n graph = helper.make_graph(\n [node],\n \"space_to_depth_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=\"space_to_depth_test\")\n\n verify_with_ort(model, [inshape], outshape)\n\n\[email protected]_gpu\ndef test_space_to_depth():\n verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)\n\n\[email protected]_gpu\ndef test_shape():\n in_shape = (4, 3, 3, 4)\n ref_shape = (6, 2, 4, 3)\n\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n reshape_node = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out\"])\n\n shape_node = helper.make_node(\"Shape\", [\"out\"], [\"final_out\"])\n\n graph = helper.make_graph(\n [ref_node, reshape_node, shape_node],\n \"shape_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"final_out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"shape_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"int32\")\n tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, \"int32\")\n tvm.testing.assert_allclose(ref_shape, tvm_out)\n\n\ndef _test_power_iteration(x_shape, y_shape):\n if isinstance(y_shape, int):\n y_shape = [y_shape]\n\n x = np.random.uniform(size=x_shape).astype(np.float32)\n y = np.random.uniform(size=y_shape).astype(np.float32)\n\n np_res = np.power(x, y).astype(np.float32)\n\n res = helper.make_node(\"Pow\", [\"x\", \"y\"], [\"out\"])\n\n graph = helper.make_graph(\n [res],\n \"power_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(np_res.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"power_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)\n tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_power():\n _test_power_iteration((1, 3), (1))\n _test_power_iteration((2, 3), (2, 3))\n _test_power_iteration((2, 3), (1, 3))\n\n\[email protected]_gpu\ndef test_squeeze():\n in_shape = (1, 3, 1, 3, 1, 1)\n out_shape = (3, 3)\n y = helper.make_node(\"Squeeze\", [\"in\"], [\"out\"], axes=[0, 2, 4, 5])\n\n graph = helper.make_graph(\n [y],\n \"squeeze_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"squeeze_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n tvm_out = get_tvm_output(model, x, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_shape, tvm_out.shape)\n\n\[email protected]_gpu\ndef test_flatten():\n\n in_shape = (1, 3, 4, 4)\n axis = 1\n ref_shape = (1, 48)\n\n flatten_node = helper.make_node(\"Flatten\", [\"in\"], [\"out\"], axis=axis)\n\n graph = helper.make_graph(\n [flatten_node],\n \"flatten_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"flatten_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"int32\")\n tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, \"float32\")\n tvm.testing.assert_allclose(ref_shape, tvm_out.shape)\n\n\[email protected]_gpu\ndef test_unsqueeze():\n in_shape = (3, 3)\n axis = (0, 3, 4)\n out_shape = (1, 3, 3, 1, 1)\n y = helper.make_node(\"Unsqueeze\", [\"in\"], [\"out\"], axes=list(axis))\n\n graph = helper.make_graph(\n [y],\n \"squeeze_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"squeeze_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n tvm_out = get_tvm_output(model, x, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_shape, tvm_out.shape)\n\n\ndef verify_gather(in_shape, indices, axis, dtype):\n x = np.random.uniform(size=in_shape).astype(dtype)\n indices = np.array(indices, dtype=\"int32\")\n out_np = np.take(x, indices, axis=axis)\n\n y = helper.make_node(\"Gather\", [\"in\", \"indices\"], [\"out\"], axis=axis)\n\n graph = helper.make_graph(\n [y],\n \"gather_test\",\n inputs=[\n helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_np.shape))],\n )\n model = helper.make_model(graph, producer_name=\"gather_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape)\n tvm.testing.assert_allclose(out_np, tvm_out)\n\n\[email protected]_gpu\ndef test_gather():\n verify_gather((4,), [1], 0, \"int32\")\n verify_gather((1, 4), [0], 0, \"int32\")\n verify_gather((4,), [[[1, 0], [0, 1]]], 0, \"float32\")\n verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, \"int32\")\n verify_gather((3, 3, 3), [[[1, 0]]], -1, \"int32\")\n verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, \"float32\")\n\n\ndef verify_gatherelements(in_shape, indices, axis):\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n indices = np.array(indices, dtype=\"int32\")\n\n y = helper.make_node(\"GatherElements\", [\"data\", \"indices\"], [\"output\"], axis=axis)\n graph = helper.make_graph(\n [y],\n \"gather_elements_test\",\n inputs=[\n helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n )\n model = helper.make_model(graph, producer_name=\"gather_elements_test\")\n\n verify_with_ort_with_inputs(model, [x, indices])\n\n\[email protected]_gpu\ndef test_gatherelements():\n verify_gatherelements((4,), [3, 0, 2, 1], 0)\n verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)\n verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)\n verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)\n\n indices = [\n [[1, 0, 0], [1, 0, 1], [0, 1, 1]],\n [[1, 1, 1], [1, 2, 1], [1, 0, 1]],\n [[1, 2, 1], [1, 2, 1], [1, 2, 1]],\n ]\n\n verify_gatherelements((3, 3, 3), indices, 2)\n\n\ndef verify_scatter(in_shape, indices, axis):\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n indices = np.array(indices, dtype=\"int32\")\n updates = np.random.uniform(size=indices.shape).astype(\"float32\")\n\n y = helper.make_node(\"ScatterElements\", [\"data\", \"indices\", \"updates\"], [\"output\"], axis=axis)\n\n graph = helper.make_graph(\n [y],\n \"scatter_test\",\n inputs=[\n helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n helper.make_tensor_value_info(\"updates\", TensorProto.FLOAT, list(indices.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n )\n model = helper.make_model(graph, producer_name=\"scatter_test\")\n verify_with_ort_with_inputs(model, [x, indices, updates])\n\n\[email protected]_gpu\ndef test_scatter():\n verify_scatter((4,), [1], 0)\n verify_scatter((1, 4), [[0]], 0)\n verify_scatter((4,), [2, 3], 0)\n verify_scatter((2, 2), [[1, 0], [0, 1]], 1)\n verify_scatter((3, 3, 3), [[[-1, -3]]], -1)\n verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)\n\n\ndef _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):\n if axes:\n y = helper.make_node(\"Slice\", [\"in\"], [\"out\"], axes=axes, starts=starts, ends=ends)\n else:\n y = helper.make_node(\"Slice\", [\"in\"], [\"out\"], starts=starts, ends=ends)\n\n graph = helper.make_graph(\n [y],\n \"slice_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"slice_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, \"float32\", opset=1)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\ndef _test_slice_iteration_v10(indata, outdata, **attrs):\n starts = attrs[\"starts\"]\n ends = attrs[\"ends\"]\n axes = None if \"axes\" not in attrs else attrs[\"axes\"]\n steps = None if \"steps\" not in attrs else attrs[\"steps\"]\n starts = np.asarray(starts)\n ends = np.asarray(ends)\n inputs = [\n helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"starts\", TensorProto.INT64, list(starts.shape)),\n helper.make_tensor_value_info(\"ends\", TensorProto.INT64, list(ends.shape)),\n ]\n initializer = [\n helper.make_tensor(\"starts\", TensorProto.INT64, list(starts.shape), starts),\n helper.make_tensor(\"ends\", TensorProto.INT64, list(ends.shape), ends),\n ]\n nodes = []\n\n if \"add_noop_to_input_attrs\" in attrs:\n\n def add_noop_to_input_attr(attr_name, attr):\n output_name = attr_name + \"_output\"\n\n ref_shape = list(np.array(attr).shape)\n ref_shape.insert(0, 1)\n ref_shape = tuple(ref_shape)\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in_\" + attr_name],\n value=onnx.helper.make_tensor(\n name=\"const_tensor__1_\" + attr_name,\n data_type=onnx.TensorProto.INT64,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n in_shape = np.array(attr).shape\n in_array = np.array(in_shape)\n ref_node2 = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"input_shape_\" + attr_name],\n value=onnx.helper.make_tensor(\n name=\"const_tensor__2_\" + attr_name,\n data_type=onnx.TensorProto.INT64,\n dims=in_array.shape,\n vals=in_array.flatten().astype(int),\n ),\n )\n\n reshape1_node = helper.make_node(\n \"Reshape\", [attr_name, \"ref_in_\" + attr_name], [\"reshape_\" + attr_name]\n )\n reshape2_node = helper.make_node(\n \"Reshape\", [\"reshape_\" + attr_name, \"input_shape_\" + attr_name], [output_name]\n )\n return [ref_node, ref_node2, reshape1_node, reshape2_node]\n\n slice_inputs = []\n for attr_name in [\"starts\", \"ends\", \"axes\", \"steps\"]:\n if attr_name not in attrs:\n continue\n if \"add_noop_to_input_attrs\" in attrs and attr_name in attrs[\"add_noop_to_input_attrs\"]:\n nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))\n slice_inputs.append(attr_name + \"_output\")\n else:\n slice_inputs.append(attr_name)\n\n if axes:\n axes = np.asarray(axes)\n inputs.append(helper.make_tensor_value_info(\"axes\", TensorProto.INT32, list(axes.shape)))\n initializer.append(helper.make_tensor(\"axes\", TensorProto.INT32, list(axes.shape), axes))\n\n if steps:\n assert axes is not None and len(axes) == len(steps)\n steps = np.asarray(steps)\n inputs.append(helper.make_tensor_value_info(\"steps\", TensorProto.INT32, list(axes.shape)))\n initializer.append(helper.make_tensor(\"steps\", TensorProto.INT32, list(steps.shape), steps))\n\n y = helper.make_node(\"Slice\", [\"data\", *slice_inputs], [\"out\"])\n\n nodes.append(y)\n graph = helper.make_graph(\n nodes,\n \"slice_test\",\n inputs=inputs,\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n initializer=initializer,\n )\n model = helper.make_model(graph, producer_name=\"slice_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, \"float32\", opset=10)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_slice():\n x = np.random.randn(20, 10, 5).astype(np.float32)\n _test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))\n _test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))\n _test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))\n _test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))\n _test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))\n _test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))\n _test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))\n _test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))\n _test_slice_iteration_v10(\n x,\n x[0:3, 0:10],\n starts=(0, 0),\n ends=(3, 10),\n axes=(0, 1),\n add_noop_to_input_attrs=[\"starts\"],\n )\n _test_slice_iteration_v10(\n x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=[\"ends\"]\n )\n _test_slice_iteration_v10(\n x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=[\"axes\"]\n )\n _test_slice_iteration_v10(\n x,\n x[:, 0:-1],\n starts=(0,),\n ends=(-1,),\n axes=(1,),\n add_noop_to_input_attrs=[\"starts\", \"ends\"],\n )\n _test_slice_iteration_v10(\n x,\n x[0:3, 0:10],\n starts=(0, 0),\n ends=(3, 10),\n axes=(0, 1),\n add_noop_to_input_attrs=[\"ends\", \"axes\"],\n )\n _test_slice_iteration_v10(\n x,\n x[:, :, 3:4],\n starts=(0, 0, 3),\n ends=(20, 10, 4),\n add_noop_to_input_attrs=[\"starts\", \"axes\"],\n )\n _test_slice_iteration_v10(\n x,\n x[:, 1:1000],\n starts=(1,),\n ends=(1000,),\n axes=(1,),\n add_noop_to_input_attrs=[\"starts\", \"ends\", \"axes\"],\n )\n x = np.random.randn(1, 1, 1, 128).astype(np.float32)\n _test_slice_iteration_v10(\n x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)\n )\n\n x = np.random.randn(4, 4).astype(np.float32)\n _test_slice_iteration_v10(\n x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)\n )\n _test_slice_iteration_v10(\n x,\n x[0::1, 1::2],\n starts=(0, 1),\n ends=(4, 4),\n axes=(0, 1),\n steps=(1, 2),\n )\n\n\ndef _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs):\n indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)\n outdata = outfunc(indata, **npargs)\n\n y = helper.make_node(opname, [\"in\"], [\"out\"], **kwargs)\n\n graph = helper.make_graph(\n [y],\n opname + \"_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=opname + \"_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_floor():\n _test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, \"float32\", \"Floor\", {})\n\n\[email protected]_gpu\ndef test_ceil():\n _test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, \"float32\", \"Ceil\", {})\n\n\[email protected]_gpu\ndef test_clip():\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n np.clip,\n {\"a_min\": -1.0, \"a_max\": 1.0},\n \"float32\",\n \"Clip\",\n {\"min\": -1.0, \"max\": 1.0},\n )\n\n\[email protected]_gpu\ndef test_clip_min_max_as_inputs():\n input_shape = (2, 4, 5, 6)\n nodes = [\n make_constant_node(\"min\", onnx.TensorProto.FLOAT, (), [0.0]),\n make_constant_node(\"max\", onnx.TensorProto.FLOAT, (), [6.0]),\n ]\n input_names = [\"in\", \"min\", \"max\"]\n nodes.append(helper.make_node(\"Clip\", inputs=input_names, outputs=[\"out\"]))\n graph = helper.make_graph(\n nodes,\n \"clip_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(input_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(input_shape))],\n )\n model = helper.make_model(graph, producer_name=\"clip_test\")\n\n verify_with_ort(model, [input_shape], input_shape)\n\n\[email protected]_gpu\ndef test_round():\n _test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, \"float32\", \"Round\", {})\n\n\ndef _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):\n indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)\n\n outdata = outfunc(indata, **npargs)\n y = helper.make_node(opname, [\"in\"], [\"out\"], **kwargs)\n\n graph = helper.make_graph(\n [y],\n opname + \"_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=opname + \"_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_isinf():\n _test_finite_ops((2, 4, 5, 6), np.isinf, {}, \"float32\", \"IsInf\", {})\n\n\[email protected]_gpu\ndef test_isnan():\n _test_finite_ops((2, 4, 5, 6), np.isnan, {}, \"float32\", \"IsNaN\", {})\n\n\ndef verify_gather_nd(in_shape, indices, dtype):\n x = np.random.uniform(size=in_shape).astype(dtype)\n indices = np.array(indices, dtype=\"int32\")\n out_np = tvm.topi.testing.gather_nd_python(x, indices)\n\n y = helper.make_node(\"GatherND\", [\"in\", \"indices\"], [\"out\"])\n\n graph = helper.make_graph(\n [y],\n \"gather_test\",\n inputs=[\n helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_np.shape))],\n )\n model = helper.make_model(graph, producer_name=\"gather_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape)\n tvm.testing.assert_allclose(out_np, tvm_out)\n\n\[email protected]_gpu\ndef test_gather_nd():\n verify_gather_nd((2, 2), [[0, 0], [1, 1]], \"int32\")\n verify_gather_nd((3, 3, 3), [[0, 1], [1, 0]], \"float32\")\n verify_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], \"float32\")\n\n\[email protected]_gpu\ndef test_onehot():\n indices_shape = [10]\n indices_array = np.random.randint(low=0, high=9, size=indices_shape, dtype=\"int32\")\n depth = 10\n values = np.asarray([0, 1])\n out_np = np.eye(depth)[indices_array.reshape(-1)]\n\n onehot_node = helper.make_node(\"OneHot\", [\"indices\", \"depth\", \"values\"], [\"out\"])\n\n graph = helper.make_graph(\n [onehot_node],\n \"onehot_test\",\n inputs=[\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, indices_shape),\n helper.make_tensor_value_info(\"depth\", TensorProto.INT32, [1]),\n helper.make_tensor_value_info(\"values\", TensorProto.INT32, values.shape),\n ],\n initializer=[\n helper.make_tensor(\"depth\", TensorProto.INT32, [1], [depth]),\n helper.make_tensor(\"values\", TensorProto.INT32, values.shape, values),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.INT32, out_np.shape)],\n )\n\n model = helper.make_model(graph, producer_name=\"onehot_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [indices_array], target, ctx, out_np.shape)\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_matmul():\n a_shape = (4, 3)\n b_shape = (3, 4)\n\n a_array = np.random.uniform(size=a_shape).astype(\"float32\")\n b_array = np.random.uniform(size=b_shape).astype(\"float32\")\n out_np = np.matmul(a_array, b_array)\n\n mul_node = helper.make_node(\"MatMul\", [\"a\", \"b\"], [\"out\"])\n\n graph = helper.make_graph(\n [mul_node],\n \"matmul_test\",\n inputs=[\n helper.make_tensor_value_info(\"a\", TensorProto.FLOAT, list(a_shape)),\n helper.make_tensor_value_info(\"b\", TensorProto.FLOAT, list(b_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"matmul_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_array, b_array], target, ctx, out_np.shape)\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\ndef verify_batch_matmul(a_shape, b_shape):\n a_array = np.random.uniform(size=a_shape).astype(\"float32\")\n b_array = np.random.uniform(size=b_shape).astype(\"float32\")\n out_np = np.matmul(a_array, b_array)\n\n mul_node = helper.make_node(\"MatMul\", [\"a\", \"b\"], [\"out\"])\n\n graph = helper.make_graph(\n [mul_node],\n \"matmul_test\",\n inputs=[\n helper.make_tensor_value_info(\"a\", TensorProto.FLOAT, list(a_shape)),\n helper.make_tensor_value_info(\"b\", TensorProto.FLOAT, list(b_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"matmul_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_array, b_array], target, ctx, out_np.shape)\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_batch_matmul():\n verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4))\n verify_batch_matmul((2, 4, 3), (3, 4))\n verify_batch_matmul((2, 3, 4, 3), (3, 4))\n\n\ndef verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):\n in_array = np.random.uniform(size=shape).astype(dtype)\n\n if alpha == None and beta == None and bias == None:\n alpha = 0.0001\n beta = 0.75\n bias = 1.0\n node = onnx.helper.make_node(\"LRN\", inputs=[\"in\"], outputs=[\"out\"], size=nsize)\n else:\n node = onnx.helper.make_node(\n \"LRN\", inputs=[\"in\"], outputs=[\"out\"], alpha=alpha, beta=beta, bias=bias, size=nsize\n )\n\n graph = helper.make_graph(\n [node],\n \"lrn_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(shape))],\n )\n model = helper.make_model(graph, producer_name=\"lrn_test\")\n\n def _get_python_lrn():\n square_sum = np.zeros(shape).astype(dtype)\n for n, c, h, w in np.ndindex(in_array.shape):\n square_sum[n, c, h, w] = sum(\n in_array[\n n,\n max(0, c - int(math.floor((nsize - 1) / 2))) : min(\n 5, c + int(math.ceil((nsize - 1) / 2)) + 1\n ),\n h,\n w,\n ]\n ** 2\n )\n py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)\n return py_out\n\n for target, ctx in tvm.testing.enabled_targets():\n input_name = model.graph.input[0].name\n py_out = _get_python_lrn()\n tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, \"float32\")\n tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_lrn():\n verify_lrn((5, 5, 5, 5), 3, \"float32\")\n verify_lrn((5, 5, 5, 5), 3, \"float32\", alpha=0.0002, beta=0.5, bias=2.0)\n\n\ndef verify_instance_norm(shape, axis=1):\n def _get_python_instance_norm(x, gamma, beta, epsilon=1e-5):\n dims_x = len(x.shape)\n axis = tuple(range(2, dims_x))\n mean = np.mean(x, axis=axis, keepdims=True)\n var = np.var(x, axis=axis, keepdims=True)\n dim_ones = (1,) * (dims_x - 2)\n gamma = gamma.reshape(-1, *dim_ones)\n beta = beta.reshape(-1, *dim_ones)\n return gamma * (x - mean) / np.sqrt(var + epsilon) + beta\n\n x = np.random.randn(*shape).astype(np.float32)\n gamma = np.random.randn(shape[1]).astype(np.float32)\n beta = np.random.randn(shape[1]).astype(np.float32)\n epsilon = 1e-5\n y = _get_python_instance_norm(x, gamma, beta, epsilon).astype(np.float32)\n\n node = onnx.helper.make_node(\n \"InstanceNormalization\",\n inputs=[\"x\", \"gamma\", \"beta\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n )\n graph = helper.make_graph(\n [node],\n \"instance_norm_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(shape)),\n helper.make_tensor_value_info(\"gamma\", TensorProto.FLOAT, (shape[1],)),\n helper.make_tensor_value_info(\"beta\", TensorProto.FLOAT, (shape[1],)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(shape))],\n )\n model = helper.make_model(graph, producer_name=\"instance_norm_test\")\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, gamma, beta], target, ctx, shape, \"float32\")\n tvm.testing.assert_allclose(y, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_instance_norm():\n verify_instance_norm((2, 3, 4, 5))\n verify_instance_norm((32, 64, 80, 64))\n verify_instance_norm((8, 6, 5))\n verify_instance_norm((8, 7, 6, 5, 4))\n\n\ndef _test_upsample_nearest():\n scale = 2\n in_shape = (1, 1, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\"], [\"out\"], mode=\"nearest\", scales=[1.0, 1.0, 2.0, 2.0])\n\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n out_array = tvm.topi.testing.upsampling_python(in_array, (scale, scale), \"NCHW\")\n\n graph = helper.make_graph(\n [y],\n \"upsample_nearest_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_nearest_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_array, tvm_out)\n\n\ndef _test_upsample3d_nearest():\n scale = 2\n in_shape = (1, 1, 3, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)\n y = helper.make_node(\n \"Upsample\", [\"in\"], [\"out\"], mode=\"nearest\", scales=[1.0, 1.0, 2.0, 2.0, 2.0]\n )\n\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n out_array = tvm.topi.testing.upsampling3d_python(in_array, (scale, scale, scale), \"NCDHW\")\n\n graph = helper.make_graph(\n [y],\n \"upsample_nearest_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_nearest_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_array, tvm_out)\n\n\ndef _test_upsample_bilinear():\n scale = 2\n in_shape = (1, 1, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\"], [\"out\"], mode=\"linear\", scales=[1.0, 1.0, 2.0, 2.0])\n\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n out_array = tvm.topi.testing.bilinear_resize_python(in_array, (3 * scale, 3 * scale), \"NCHW\")\n\n graph = helper.make_graph(\n [y],\n \"upsample_bilinear_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_bilinear_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)\n\n\ndef _test_upsample_bilinear_opset9():\n scale = 2\n in_shape = (1, 1, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\", \"scales\"], [\"out\"], mode=\"linear\")\n scales = [1, 1, 2, 2]\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n out_array = tvm.topi.testing.bilinear_resize_python(in_array, (3 * scale, 3 * scale), \"NCHW\")\n\n ref_node = helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"const\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=TensorProto.FLOAT,\n dims=scales,\n vals=np.random.random(scales).flatten().astype(float),\n ),\n )\n\n shape_node = helper.make_node(\"Shape\", [\"const\"], [\"scales\"])\n\n graph = helper.make_graph(\n [ref_node, shape_node, y],\n \"upsample_bilinear_opset9_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_bilinear_opset9_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)\n\n\ndef _test_upsample3d_trilinear():\n scale = 2\n in_shape = (1, 1, 3, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\", \"scales\"], [\"out\"], mode=\"linear\")\n scales = [1.0, 1.0, 2.0, 2.0, 2.0]\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n out_array = tvm.topi.testing.trilinear_resize3d_python(\n in_array,\n (3 * scale, 3 * scale, 3 * scale),\n \"NCDHW\",\n coordinate_transformation_mode=\"half_pixel\",\n )\n\n ref_array = np.array(scales)\n ref_node = helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"scales\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=TensorProto.FLOAT,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(float),\n ),\n )\n\n graph = helper.make_graph(\n [ref_node, y],\n \"upsample_trilinear_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_trilinear_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_upsample():\n _test_upsample_nearest()\n _test_upsample_bilinear()\n _test_upsample_bilinear_opset9()\n _test_upsample3d_nearest()\n _test_upsample3d_trilinear()\n\n\ndef _test_softmax(inshape, axis):\n opname = \"Softmax\"\n indata = np.random.uniform(size=inshape).astype(np.float32)\n outshape = inshape\n outdata = tvm.topi.testing.softmax_python(indata)\n if isinstance(axis, int):\n y = helper.make_node(opname, [\"in\"], [\"out\"], axis=axis)\n elif axis is None:\n y = helper.make_node(opname, [\"in\"], [\"out\"])\n\n graph = helper.make_graph(\n [y],\n opname + \"_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=opname + \"_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, indata, target, ctx, outshape, \"float32\")\n tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_softmax():\n _test_softmax((1, 10), None)\n _test_softmax((1, 10), 1)\n\n\ndef verify_min(input_dim):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n a_np2 = np.random.uniform(size=input_dim).astype(dtype)\n a_np3 = np.random.uniform(size=input_dim).astype(dtype)\n\n b_np = np.min((a_np1, a_np2, a_np3), axis=0)\n\n min_node = helper.make_node(\"Min\", [\"a_np1\", \"a_np2\", \"a_np3\"], [\"out\"])\n\n graph = helper.make_graph(\n [min_node],\n \"Min_test\",\n inputs=[\n helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np2\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np3\", TensorProto.FLOAT, list(input_dim)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(b_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"Min_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)\n tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_forward_min():\n verify_min((1, 3, 20, 20))\n verify_min((20, 20))\n\n\ndef verify_max(input_dim):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n a_np2 = np.random.uniform(size=input_dim).astype(dtype)\n a_np3 = np.random.uniform(size=input_dim).astype(dtype)\n\n b_np = np.max((a_np1, a_np2, a_np3), axis=0)\n\n max_node = helper.make_node(\"Max\", [\"a_np1\", \"a_np2\", \"a_np3\"], [\"out\"])\n\n graph = helper.make_graph(\n [max_node],\n \"Max_test\",\n inputs=[\n helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np2\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np3\", TensorProto.FLOAT, list(input_dim)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(b_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"Max_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)\n tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_forward_max():\n verify_max((1, 3, 20, 20))\n verify_max((20, 20))\n\n\ndef verify_mean(input_dim):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n a_np2 = np.random.uniform(size=input_dim).astype(dtype)\n a_np3 = np.random.uniform(size=input_dim).astype(dtype)\n\n b_np = np.mean((a_np1, a_np2, a_np3), axis=0)\n\n mean_node = helper.make_node(\"Mean\", [\"a_np1\", \"a_np2\", \"a_np3\"], [\"out\"])\n\n graph = helper.make_graph(\n [mean_node],\n \"Mean_test\",\n inputs=[\n helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np2\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np3\", TensorProto.FLOAT, list(input_dim)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(b_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"Mean_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)\n tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_forward_mean():\n verify_mean((1, 3, 20, 20))\n verify_mean((20, 20))\n\n\ndef verify_hardsigmoid(input_dim, alpha, beta):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n\n b_np = np.clip(a_np1 * alpha + beta, 0, 1)\n\n hardsigmoid_node = helper.make_node(\"HardSigmoid\", [\"a_np1\"], [\"out\"], alpha=alpha, beta=beta)\n\n graph = helper.make_graph(\n [hardsigmoid_node],\n \"HardSigmoid_test\",\n inputs=[helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(b_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"HardSigmoid_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape)\n tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_forward_hardsigmoid():\n verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)\n verify_hardsigmoid((20, 20), 0.3, 0.4)\n\n\ndef verify_argmin(input_dim, axis=None, keepdims=None):\n def _argmin_numpy(data, axis=0, keepdims=True):\n result = np.argmin(data, axis=axis)\n if keepdims == 1:\n result = np.expand_dims(result, axis)\n return result.astype(data.dtype)\n\n a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)\n if keepdims is None and axis is None:\n b_np = _argmin_numpy(a_np1)\n node = onnx.helper.make_node(\"ArgMin\", inputs=[\"a_np1\"], outputs=[\"out\"])\n elif axis is None:\n b_np = _argmin_numpy(a_np1, keepdims=keepdims)\n node = onnx.helper.make_node(\"ArgMin\", inputs=[\"a_np1\"], outputs=[\"out\"], keepdims=keepdims)\n elif keepdims is None:\n b_np = _argmin_numpy(a_np1, axis=axis)\n node = onnx.helper.make_node(\"ArgMin\", inputs=[\"a_np1\"], outputs=[\"out\"], axis=axis)\n else:\n b_np = _argmin_numpy(a_np1, axis=axis, keepdims=keepdims)\n node = onnx.helper.make_node(\n \"ArgMin\", inputs=[\"a_np1\"], outputs=[\"out\"], axis=axis, keepdims=keepdims\n )\n graph = helper.make_graph(\n [node],\n \"argmin_test\",\n inputs=[helper.make_tensor_value_info(\"a_np1\", TensorProto.INT32, list(a_np1.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.INT32, list(b_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"argmin_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)\n tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\ndef verify_argmax(input_dim, axis=None, keepdims=None):\n def _argmax_numpy(data, axis=0, keepdims=True):\n result = np.argmax(data, axis=axis)\n if keepdims == 1:\n result = np.expand_dims(result, axis)\n return result.astype(data.dtype)\n\n a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)\n if keepdims is None and axis is None:\n b_np = _argmax_numpy(a_np1)\n node = onnx.helper.make_node(\"ArgMax\", inputs=[\"a_np1\"], outputs=[\"out\"])\n elif axis is None:\n b_np = _argmax_numpy(a_np1, keepdims=keepdims)\n node = onnx.helper.make_node(\"ArgMax\", inputs=[\"a_np1\"], outputs=[\"out\"], keepdims=keepdims)\n elif keepdims is None:\n b_np = _argmax_numpy(a_np1, axis=axis)\n node = onnx.helper.make_node(\"ArgMax\", inputs=[\"a_np1\"], outputs=[\"out\"], axis=axis)\n else:\n b_np = _argmax_numpy(a_np1, axis=axis, keepdims=keepdims)\n node = onnx.helper.make_node(\n \"ArgMax\", inputs=[\"a_np1\"], outputs=[\"out\"], axis=axis, keepdims=keepdims\n )\n\n graph = helper.make_graph(\n [node],\n \"argmax_test\",\n inputs=[helper.make_tensor_value_info(\"a_np1\", TensorProto.INT32, list(a_np1.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.INT32, list(b_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"argmax_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)\n tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_forward_arg_min_max():\n \"\"\"Verify argmin and argmax\"\"\"\n verify_argmin([3, 4, 4])\n verify_argmax([3, 4, 4])\n verify_argmin([3, 4, 4], axis=1)\n verify_argmax([3, 4, 4], axis=0)\n verify_argmin([3, 4, 4], keepdims=0)\n verify_argmax([3, 4, 4], keepdims=1)\n for axis in [None, 0, 1, 2]:\n for keepdims in [None, True, False]:\n verify_argmin([3, 4, 4], axis, keepdims)\n verify_argmax([3, 4, 4], axis, keepdims)\n\n\ndef verify_constantofshape(input_dim, value, dtype):\n out = np.empty(shape=input_dim, dtype=dtype)\n out.fill(value)\n\n fill_node = helper.make_node(\n \"ConstantOfShape\",\n [\"input\"],\n [\"output\"],\n value=helper.make_tensor(\n \"value\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1,), (value,)\n ),\n )\n\n inputs = [helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, input_dim)]\n\n graph = helper.make_graph(\n [fill_node],\n \"fill_test\",\n inputs,\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(out.shape))],\n initializer=[helper.make_tensor(\"input\", TensorProto.INT32, (len(input_dim),), input_dim)],\n )\n\n model = helper.make_model(graph, producer_name=\"fill_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [], target, ctx, out.shape)\n\n tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_constantofshape():\n verify_constantofshape((2, 3, 4, 5), 10, \"float32\")\n verify_constantofshape((3, 3), 0, \"int32\")\n verify_constantofshape((1, 2, 3), -1, \"float32\")\n\n\ndef verify_pad(indata, pads, mode=\"constant\", value=0.0):\n indata = np.array(indata).astype(np.float32)\n # numpy expect result\n len_dim = len(pads) // 2\n np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]\n # onnx graph\n if mode in [\"edge\", \"reflect\"]:\n outdata = np.pad(indata, pad_width=np_pads, mode=mode)\n node = helper.make_node(\n \"Pad\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n mode=mode,\n pads=pads,\n )\n else:\n outdata = np.pad(indata, pad_width=np_pads, mode=\"constant\", constant_values=value)\n node = helper.make_node(\n \"Pad\", inputs=[\"input\"], outputs=[\"output\"], mode=\"constant\", pads=pads, value=value\n )\n graph = helper.make_graph(\n [node],\n \"pad_test\",\n inputs=[helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(outdata.shape))],\n )\n model = helper.make_model(graph, producer_name=\"pad_test\")\n # tvm result\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, \"float32\", opset=2)\n tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)\n\n\ndef verify_pad_v11(indata, pads, mode=\"constant\", value=0.0):\n indata = np.array(indata).astype(np.float32)\n # numpy expect result\n len_dim = len(pads) // 2\n np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]\n pads = np.array(pads)\n # onnx graph\n if mode in [\"edge\", \"reflect\"]:\n inputs = [indata, pads]\n outdata = np.pad(indata, pad_width=np_pads, mode=mode)\n node = helper.make_node(\"Pad\", inputs=[\"input\", \"pads\"], outputs=[\"output\"], mode=mode)\n graph = helper.make_graph(\n [node],\n \"pad_test\",\n inputs=[\n helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"pads\", TensorProto.INT64, (len(pads),)),\n ],\n initializer=[helper.make_tensor(\"pads\", TensorProto.INT64, (len(pads),), pads)],\n outputs=[\n helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(outdata.shape))\n ],\n )\n else:\n inputs = [indata, pads, np.array([value])]\n outdata = np.pad(indata, pad_width=np_pads, mode=\"constant\", constant_values=value)\n node = helper.make_node(\n \"Pad\", inputs=[\"input\", \"pads\", \"constant_value\"], outputs=[\"output\"], mode=\"constant\"\n )\n graph = helper.make_graph(\n [node],\n \"pad_test\",\n inputs=[\n helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"pads\", TensorProto.INT64, (len(pads),)),\n helper.make_tensor_value_info(\"constant_value\", TensorProto.INT64, (1,)),\n ],\n initializer=[\n helper.make_tensor(\"pads\", TensorProto.INT64, (len(pads),), pads),\n helper.make_tensor(\"constant_value\", TensorProto.FLOAT, (1,), [value]),\n ],\n outputs=[\n helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(outdata.shape))\n ],\n )\n model = helper.make_model(graph, producer_name=\"pad_test\")\n # tvm result\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, inputs, target, ctx, outdata.shape, \"float32\", opset=11)\n tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_pad():\n verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], \"constant\", 0.0)\n verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], \"constant\", 0.0)\n verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], \"constant\", 5.0)\n verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"edge\")\n verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"reflect\")\n\n verify_pad_v11(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], \"constant\", 0.0)\n verify_pad_v11(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], \"constant\", 0.0)\n verify_pad_v11(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], \"constant\", 5.0)\n verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"edge\")\n verify_pad_v11(\n np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"reflect\"\n )\n\n\ndef verify_reduce_func(func, data, axis, keepdims):\n inshape = data.shape\n outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape\n\n if axis:\n node = onnx.helper.make_node(\n func, inputs=[\"x\"], outputs=[\"y\"], axes=axis, keepdims=keepdims\n )\n else:\n node = onnx.helper.make_node(func, inputs=[\"x\"], outputs=[\"y\"], keepdims=keepdims)\n\n graph = helper.make_graph(\n [node],\n \"reduce_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=\"reduce_test\")\n\n verify_with_ort_with_inputs(model, [data], outshape)\n\n\[email protected]_gpu\ndef test_all_reduce_funcs():\n funcs = [\n \"ReduceMax\",\n \"ReduceMean\",\n \"ReduceMin\",\n \"ReduceProd\",\n \"ReduceSum\",\n \"ReduceSumSquare\",\n \"ReduceLogSum\",\n \"ReduceLogSumExp\",\n \"ReduceL1\",\n \"ReduceL2\",\n ]\n\n for func in funcs:\n for keepdims in [True, False]:\n verify_reduce_func(\n func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims\n )\n\n\ndef verify_split(indata, outdatas, split, axis=0):\n indata = np.array(indata).astype(np.float32)\n outdatas = [np.array(o).astype(np.float32) for o in outdatas]\n if split:\n split_index = range(len(split))\n else:\n split_index = range(len(outdatas))\n node = helper.make_node(\n \"Split\",\n inputs=[\"input\"],\n outputs=[\"output_{}\".format(i) for i in range(len(split_index))],\n axis=axis,\n split=split,\n )\n graph = helper.make_graph(\n [node],\n \"split_test\",\n inputs=[helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[\n helper.make_tensor_value_info(\n \"output_{}\".format(i), TensorProto.FLOAT, list(outdatas[i].shape)\n )\n for i in range(len(split_index))\n ],\n )\n model = helper.make_model(graph, producer_name=\"split_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n output_shape = [o.shape for o in outdatas]\n output_type = [\"float32\", \"float32\", \"float32\"]\n tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type)\n for o, t in zip(outdatas, tvm_out):\n tvm.testing.assert_allclose(o, t)\n\n\[email protected]_gpu\ndef test_split():\n # 1D\n verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0)\n verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0)\n # 2D\n verify_split(\n [[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],\n [[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],\n [2, 2],\n 1,\n )\n # Split evenly (unstack)\n verify_split([1, 2, 3], [[1], [2], [3]], False)\n\n\[email protected]_gpu\ndef test_binary_ops():\n in_shape = (1, 2, 3, 3)\n dtype = \"float32\"\n out_shape = in_shape\n\n def verify_binary_ops(op, x, y, out_np, x_name=\"in1\", y_name=\"in2\", broadcast=None):\n if broadcast is None:\n z = helper.make_node(op, [x_name, y_name], [\"out\"])\n else:\n z = helper.make_node(op, [x_name, y_name], [\"out\"], broadcast=1)\n graph = helper.make_graph(\n [z],\n \"_test\",\n inputs=[\n helper.make_tensor_value_info(x_name, TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(y_name, TensorProto.FLOAT, list(in_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"_test\")\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, y], target, ctx)\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n x = np.random.uniform(size=in_shape).astype(dtype)\n y = np.random.uniform(size=in_shape).astype(dtype)\n z = np.random.uniform(size=(3,)).astype(dtype)\n verify_binary_ops(\"Add\", x, y, x + y, broadcast=None)\n verify_binary_ops(\"Add\", x, z, x + z, broadcast=True)\n verify_binary_ops(\"Sub\", x, y, x - y, broadcast=None)\n verify_binary_ops(\"Sub\", x, z, x - z, broadcast=True)\n verify_binary_ops(\"Mul\", x, y, x * y, broadcast=None)\n verify_binary_ops(\"Mul\", x, z, x * z, broadcast=True)\n verify_binary_ops(\"Mul\", x, x, x * x, x_name=\"in1\", y_name=\"in1\", broadcast=None)\n verify_binary_ops(\"Div\", x, y, x / y, broadcast=None)\n verify_binary_ops(\"Div\", x, z, x / z, broadcast=True)\n verify_binary_ops(\"Sum\", x, y, x + y, broadcast=None)\n verify_binary_ops(\"Greater\", x, y, x > y, broadcast=True)\n verify_binary_ops(\"Less\", x, y, x < y, broadcast=True)\n verify_binary_ops(\"Equal\", x, y, x == y, broadcast=True)\n\n\[email protected]_gpu\ndef test_single_ops():\n in_shape = (1, 2, 3, 3)\n dtype = \"float32\"\n out_shape = in_shape\n\n def verify_single_ops(op, x, out_np, rtol=1e-5, atol=1e-5):\n z = helper.make_node(op, [\"in1\"], [\"out\"])\n graph = helper.make_graph(\n [z],\n \"_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", TensorProto.FLOAT, list(in_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"_test\")\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x], target, ctx)\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)\n\n x = np.random.uniform(size=in_shape).astype(dtype)\n verify_single_ops(\"Neg\", x, -x)\n verify_single_ops(\"Abs\", x, np.abs(x))\n verify_single_ops(\"Reciprocal\", x, 1 / x)\n verify_single_ops(\"Sqrt\", x, np.sqrt(x))\n verify_single_ops(\"Relu\", x, np.maximum(x, 0))\n verify_single_ops(\"Exp\", x, np.exp(x))\n verify_single_ops(\"Log\", x, np.log(x))\n verify_single_ops(\"Log\", x, np.log(x))\n verify_single_ops(\"ACos\", x, np.arccos(x))\n verify_single_ops(\"ACosh\", x, np.arccosh(x))\n verify_single_ops(\"ASin\", x, np.arcsin(x))\n verify_single_ops(\"ASinh\", x, np.arcsinh(x))\n verify_single_ops(\"ATan\", x, np.arctan(x))\n verify_single_ops(\"ATanh\", x, np.arctanh(x))\n verify_single_ops(\"Cos\", x, np.cos(x))\n verify_single_ops(\"Cosh\", x, np.cosh(x))\n verify_single_ops(\"Sin\", x, np.sin(x))\n verify_single_ops(\"Sinh\", x, np.sinh(x))\n verify_single_ops(\"Tan\", x, np.tan(x))\n verify_single_ops(\"Tanh\", x, np.tanh(x))\n verify_single_ops(\"Sigmoid\", x, 1 / (1 + np.exp(-x)))\n verify_single_ops(\"Softsign\", x, x / (1 + np.abs(x)))\n verify_single_ops(\"SoftPlus\", x, np.log(1 + np.exp(x)))\n\n\[email protected]_gpu\ndef test_leaky_relu():\n def leaky_relu_x(x, alpha):\n return np.where(x >= 0, x, x * alpha)\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6), leaky_relu_x, {\"alpha\": 0.25}, \"float32\", \"LeakyRelu\", {\"alpha\": 0.25}\n )\n\n\[email protected]_gpu\ndef test_elu():\n def elu_x(x, alpha):\n return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6), elu_x, {\"alpha\": 0.25}, \"float32\", \"Elu\", {\"alpha\": 0.25}\n )\n\n\[email protected]_gpu\ndef test_selu():\n def selu_x(x, alpha, gamma):\n return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n selu_x,\n {\"alpha\": 0.25, \"gamma\": 0.3},\n \"float32\",\n \"Selu\",\n {\"alpha\": 0.25, \"gamma\": 0.3},\n )\n\n\[email protected]_gpu\ndef test_prelu():\n def verify_prelu(x_shape, a_shape):\n node = helper.make_node(\"PRelu\", inputs=[\"X\", \"slope\"], outputs=[\"Y\"])\n\n graph = helper.make_graph(\n [node],\n \"prelu_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"slope\", TensorProto.FLOAT, list(a_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(x_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"prelu_test\")\n\n verify_with_ort(model, [x_shape, a_shape], list(x_shape))\n\n verify_prelu([3, 4, 5, 6], [1, 4, 1, 1])\n verify_prelu([1, 8, 5, 6], [1, 8, 1, 1])\n verify_prelu([2, 12, 16, 16], [1, 12, 1, 1])\n verify_prelu([2, 12, 16, 16], [1]) # Test alpha broadcasting.\n\n\[email protected]_gpu\ndef test_ThresholdedRelu():\n def ThresholdedRelu_x(x, alpha):\n out_np = np.clip(x, alpha, np.inf)\n out_np[out_np == alpha] = 0\n return out_np\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n ThresholdedRelu_x,\n {\"alpha\": 0.25},\n \"float32\",\n \"ThresholdedRelu\",\n {\"alpha\": 0.25},\n )\n\n\[email protected]_gpu\ndef test_ScaledTanh():\n def ScaledTanh_x(x, alpha, beta):\n return alpha * np.tanh(beta * x)\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n ScaledTanh_x,\n {\"alpha\": 0.25, \"beta\": 0.3},\n \"float32\",\n \"ScaledTanh\",\n {\"alpha\": 0.25, \"beta\": 0.3},\n )\n\n\[email protected]_gpu\ndef test_ParametricSoftplus():\n def ParametricSoftplus_x(x, alpha, beta):\n return alpha * np.log(np.exp(beta * x) + 1)\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n ParametricSoftplus_x,\n {\"alpha\": 0.25, \"beta\": 0.3},\n \"float32\",\n \"ParametricSoftplus\",\n {\"alpha\": 0.25, \"beta\": 0.3},\n )\n\n\[email protected]_gpu\ndef test_Scale():\n def Scale_x(x, scale):\n return scale * x\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6), Scale_x, {\"scale\": 0.25}, \"float32\", \"Scale\", {\"scale\": 0.25}\n )\n\n\[email protected]_gpu\ndef test_LogSoftmax():\n _test_onnx_op_elementwise(\n (1, 4), tvm.topi.testing.log_softmax_python, {}, \"float32\", \"LogSoftmax\", {\"axis\": 1}\n )\n\n\ndef check_torch_conversion(model, input_size):\n dummy_input = torch.randn(*input_size)\n file_name = \"{}.onnx\".format(model.__name__)\n # Set verbose=True for more output\n torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)\n onnx_model = onnx.load(file_name)\n input_data = np.random.uniform(size=input_size).astype(\"int32\")\n verify_with_ort_with_inputs(onnx_model, [input_data])\n\n\[email protected]_gpu\ndef test_resnet():\n check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224))\n # check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))\n\n\n# def test_alexnet():\n# Torch's ONNX export does not support the adaptive pooling used by AlexNet?\n# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))\n\n# Torch's ONNX export does not support the adaptive pooling used by vgg16?\n# def test_vgg16():\n# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))\n\n# TODO(@jroesch): Update Torch + ONNX to support this import.\n# def test_squeezenet():\n# # Torch's ONNX export does not support the max pooling used by Squezenet\n# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))\n\n\[email protected]_gpu\ndef test_densenet():\n check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224))\n\n\[email protected]_gpu\ndef test_inception():\n check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224))\n\n\n# TODO(@jroesch): Update Torch + ONNX to support this import.\n# def test_googlenet():\n# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))\n\n# TODO(@jroesch): Update Torch + ONNX to support this import.\n# def test_shufflenetv2():\n# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))\n\n\[email protected]_gpu\ndef test_sign():\n def Sign_x(x):\n return np.sign(x)\n\n _test_onnx_op_elementwise((3, 4, 5, 6), Sign_x, {}, \"float32\", \"Sign\", {})\n\n\ndef verify_not(indata, dtype):\n x = indata.astype(dtype)\n outdata = np.logical_not(x)\n\n node = helper.make_node(\n \"Not\",\n inputs=[\"in\"],\n outputs=[\"out\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"not_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.BOOL, list(x.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"not_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x], target, ctx, outdata.shape)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_not():\n # 2d\n verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)\n # 3d\n verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)\n # 4d\n verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)\n\n\ndef verify_and(indata, dtype):\n x = indata[0].astype(dtype)\n y = indata[1].astype(dtype)\n outdata = np.logical_and(x, y)\n\n node = helper.make_node(\n \"And\",\n inputs=[\"in1\", \"in2\"],\n outputs=[\"out\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"and_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", TensorProto.BOOL, list(x.shape)),\n helper.make_tensor_value_info(\"in2\", TensorProto.BOOL, list(y.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"and_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_and():\n # 2d\n x = np.random.randn(3, 4) > 0\n y = np.random.randn(3, 4) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 3d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(3, 4, 5) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 4d\n x = np.random.randn(3, 4, 5, 6) > 0\n y = np.random.randn(3, 4, 5, 6) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 3d vs 1d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(5) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 3d vs 2d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(4, 5) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n\ndef verify_tile_v1(indata, outdata, **kwargs):\n node = helper.make_node(\"Tile\", inputs=[\"in\"], outputs=[\"out\"], **kwargs)\n graph = helper.make_graph(\n [node],\n \"tile_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"tile_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape, opset=1)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\ndef verify_tile_v6(indata, repeats, outdata):\n node = helper.make_node(\"Tile\", inputs=[\"input\", \"repeats\"], outputs=[\"out\"])\n graph = helper.make_graph(\n [node],\n \"tile_test\",\n inputs=[\n helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"repeats\", TensorProto.INT64, list(repeats.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n initializer=[\n helper.make_tensor(\"repeats\", TensorProto.INT64, list(repeats.shape), repeats)\n ],\n )\n\n model = helper.make_model(graph, producer_name=\"tile_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape, opset=6)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_tile():\n x = np.random.rand(2, 3, 4, 5).astype(np.float32)\n repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n z = np.tile(x, repeats)\n verify_tile_v1(x, z, repeats=repeats)\n verify_tile_v6(x, repeats, z)\n\n\ndef verify_erf(indata, outdata):\n node = helper.make_node(\"Erf\", inputs=[\"in\"], outputs=[\"out\"])\n graph = helper.make_graph(\n [node],\n \"erf_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n model = helper.make_model(graph, producer_name=\"erf_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_erf():\n x = np.random.rand(2, 3, 4, 6).astype(np.float32)\n z = scipy.special.erf(x)\n verify_erf(x, z)\n\n\ndef verify_where(condition, x, y, dtype, outdata):\n node = helper.make_node(\"Where\", inputs=[\"condition\", \"x\", \"y\"], outputs=[\"out\"])\n graph = helper.make_graph(\n [node],\n \"where_test\",\n inputs=[\n helper.make_tensor_value_info(\"condition\", TensorProto.BOOL, list(condition.shape)),\n helper.make_tensor_value_info(\"x\", dtype, list(x.shape)),\n helper.make_tensor_value_info(\"y\", dtype, list(y.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", dtype, list(outdata.shape))],\n )\n model = helper.make_model(graph, producer_name=\"where_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [condition, x, y], target, ctx, outdata.shape)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_where():\n condition = np.array([[1, 0], [1, 1]], dtype=np.bool)\n x = np.array([[1, 2], [3, 4]], dtype=np.int64)\n y = np.array([[9, 8], [7, 6]], dtype=np.int64)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.INT64, outdata)\n\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[9, 8], [7, 6]], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n x = np.array(1, dtype=np.float32)\n y = np.array([2], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n x = np.array([2], dtype=np.float32)\n y = np.array(1, dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n condition = np.array(1, dtype=np.bool)\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[5, 6], [7, 8]], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[1], [7]], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n\ndef verify_or(indata, dtype):\n x = indata[0].astype(dtype)\n y = indata[1].astype(dtype)\n outdata = np.logical_or(x, y)\n\n node = helper.make_node(\n \"Or\",\n inputs=[\"in1\", \"in2\"],\n outputs=[\"out\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"or_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", TensorProto.BOOL, list(x.shape)),\n helper.make_tensor_value_info(\"in2\", TensorProto.BOOL, list(y.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"or_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)\n tvm.testing.assert_allclose(outdata, tvm_out)\n\n\[email protected]_gpu\ndef test_or():\n # 2d\n x = np.random.randn(3, 4) > 0\n y = np.random.randn(3, 4) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 3d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(3, 4, 5) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 4d\n x = np.random.randn(3, 4, 5, 6) > 0\n y = np.random.randn(3, 4, 5, 6) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 3d vs 1d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(5) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 3d vs 2d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(4, 5) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n\[email protected]_gpu\ndef test_batch_norm():\n def verify_batch_norm(in_shape):\n batchnorm = onnx.helper.make_node(\n \"BatchNormalization\", inputs=[\"x\", \"scale\", \"B\", \"mean\", \"var\"], outputs=[\"Y\"]\n )\n\n graph = helper.make_graph(\n [batchnorm],\n \"batchnorm_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"scale\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"B\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"mean\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"var\", TensorProto.FLOAT, [in_shape[1]]),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(in_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"batchnorm_test\")\n # X, scale, b, mean, var\n inshapes = [in_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]\n verify_with_ort(model, inshapes, in_shape)\n\n verify_batch_norm([1, 3, 224, 224])\n verify_batch_norm([1, 3, 24, 24])\n verify_batch_norm([16, 3, 24, 24])\n verify_batch_norm([16, 16, 24, 24])\n verify_batch_norm([16, 16, 10, 10])\n\n\[email protected]_gpu\ndef test_batch_norm_dynamic_subgraph():\n def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):\n batchnorm = onnx.helper.make_node(\n \"BatchNormalization\", inputs=[\"x\", \"scale\", \"B\", \"mean\", \"var\"], outputs=[\"Y\"]\n )\n\n shape_node = helper.make_node(\"Shape\", [\"Y\"], [\"shape\"])\n reshape_node = helper.make_node(\"Reshape\", [\"in\", \"shape\"], [\"out\"])\n graph = helper.make_graph(\n [batchnorm, shape_node, reshape_node],\n \"batchnorm_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(o_shape)),\n helper.make_tensor_value_info(\"scale\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"B\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"mean\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"var\", TensorProto.FLOAT, [in_shape[1]]),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(in_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"batchnorm_test\")\n # X, inp, scale, b, mean, var\n inshapes = [in_shape, o_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]\n verify_with_ort(model, inshapes, in_shape, use_vm=False)\n\n verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])\n\n\ndef verify_conv(\n x_shape,\n w_shape,\n y_shape,\n padding,\n kernel_shape,\n strides,\n dilations,\n auto_pad=\"NOTSET\",\n unset_pad=False,\n):\n if unset_pad:\n node = helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n # groups=1\n )\n elif padding is None:\n node = helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n # groups=1\n auto_pad=auto_pad,\n )\n else:\n node = helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n # groups=1\n pads=padding,\n )\n\n graph = helper.make_graph(\n [node],\n \"conv_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, list(w_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"conv_test\")\n\n verify_with_ort(model, [x_shape, w_shape], y_shape)\n\n\[email protected]_gpu\ndef test_conv():\n def repeat(N, D):\n return tuple([N for _ in range(D)])\n\n for D in [1, 2, 3]:\n # Convolution with padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution without padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution with autopadding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with valid autopadding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"VALID\",\n )\n # Convolution with unset padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n True,\n )\n # Convolution with non uniform stride\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(2, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with dilation\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(2, D),\n repeat(3, D),\n repeat(1, D),\n repeat(2, D),\n )\n\n\ndef verify_convtranspose(x_shape, w_shape, y_shape, p):\n node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n strides=[3, 2],\n group=1,\n kernel_shape=[3, 3],\n pads=p,\n )\n\n graph = helper.make_graph(\n [node],\n \"verify_convtranspose_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, list(w_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"convtranspose_trest\")\n verify_with_ort(model, [x_shape, w_shape], y_shape)\n\n\[email protected]_gpu\ndef test_convtranspose():\n # Convolution Transpose with padding\n # (1, 1, 3, 3) input tensor\n # (1, 2, 3, 3) tensor for convolution weights\n # (1, 2, 7, 3) output tensor\n # [1, 2, 1, 2] list for pads\n verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])\n\n\[email protected]_gpu\ndef test_unsqueeze_constant():\n from torch.nn import Linear, Sequential, Module\n\n class Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n import tempfile\n\n with tempfile.NamedTemporaryFile() as fp:\n file_name = fp.name\n input_size = (1, 16, 32, 32)\n dummy_input = torch.randn(*input_size)\n layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))\n torch.onnx.export(layer, dummy_input, file_name, export_params=True)\n\n onnx_model = onnx.load(file_name)\n relay.frontend.from_onnx(onnx_model, {\"0\": input_size})\n\n\ndef verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad=\"NOTSET\"):\n x_np = np.random.uniform(size=x_shape).astype(\"float32\")\n\n if mode == \"max\":\n node_type = \"MaxPool\"\n elif mode == \"average\":\n node_type = \"AveragePool\"\n else:\n raise ValueError(\"Pool method {} is not supported.\".format(mode))\n\n pool_node = helper.make_node(\n node_type, inputs=[\"x\"], outputs=[\"y\"], kernel_shape=kernel_shape, strides=strides\n )\n\n if pads is None:\n pad_attr = helper.make_attribute(\"auto_pad\", auto_pad)\n else:\n pad_attr = helper.make_attribute(\"pads\", pads)\n pool_node.attribute.append(pad_attr)\n\n if mode == \"max\":\n storage_attr = helper.make_attribute(\"storage_order\", 0)\n pool_node.attribute.append(storage_attr)\n\n graph = helper.make_graph(\n [pool_node],\n \"pooling_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"pooling_test\")\n verify_with_ort(model, [x_shape], out_shape)\n\n\[email protected]_gpu\ndef test_pooling():\n for mode in [\"max\", \"average\"]:\n # Pool1D\n verify_pooling(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n strides=[1],\n pads=[1, 1],\n out_shape=[1, 1, 32],\n mode=mode,\n )\n # Pool2D\n verify_pooling(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n strides=[1, 1],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 32, 32],\n mode=mode,\n )\n\n # Pool1D with stride\n verify_pooling(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n strides=[2],\n pads=[1, 1],\n out_shape=[1, 1, 16],\n mode=mode,\n )\n # Pool2D with stride\n verify_pooling(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n strides=[2, 2],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 16, 16],\n mode=mode,\n )\n\n # Pool1D with stride and autopadding\n verify_pooling(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n strides=[2],\n pads=None,\n out_shape=[1, 1, 16],\n mode=mode,\n auto_pad=\"SAME_UPPER\",\n )\n # Pool2D with stride and autopadding\n verify_pooling(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n strides=[2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16],\n mode=mode,\n auto_pad=\"SAME_UPPER\",\n )\n\n # Pool3D with stride\n verify_pooling(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n strides=[2, 2, 2],\n pads=[1, 1, 1, 1, 1, 1],\n out_shape=[1, 1, 16, 16, 16],\n mode=mode,\n )\n\n # Pool3D with stride and autopadding\n verify_pooling(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n strides=[2, 2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16, 16],\n mode=mode,\n auto_pad=\"SAME_UPPER\",\n )\n\n\ndef verify_mod(x_shape, y_shape, fmod, out_shape, dtype=\"float32\"):\n x_np = np.random.uniform(-100.0, 100.0, x_shape).astype(dtype)\n y_np = np.random.uniform(-100.0, 100.0, y_shape).astype(dtype)\n y_np = np.where(y_np == 0, 1, y_np) # remove 0's to avoid division by zero error\n\n mod_node = helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=fmod)\n\n onnx_dtype = TensorProto.FLOAT if dtype == \"float32\" else TensorProto.INT32\n graph = helper.make_graph(\n [mod_node],\n \"mod_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", onnx_dtype, list(x_shape)),\n helper.make_tensor_value_info(\"y\", onnx_dtype, list(y_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"z\", onnx_dtype, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"mod_test\")\n verify_with_ort_with_inputs(model, [x_np, y_np], out_shape)\n\n\[email protected]_gpu\ndef test_mod():\n # Mod\n verify_mod(\n x_shape=[1, 32, 32], y_shape=[1, 1, 32], fmod=0, out_shape=(1, 32, 32), dtype=\"int32\"\n )\n verify_mod(\n x_shape=[1, 32, 32, 32],\n y_shape=[1, 32, 32, 32],\n fmod=0,\n out_shape=(1, 32, 32, 32),\n dtype=\"int32\",\n )\n\n # fmod\n verify_mod(\n x_shape=[1, 32, 32], y_shape=[1, 32, 32], fmod=1, out_shape=(1, 32, 32), dtype=\"int32\"\n )\n verify_mod(x_shape=[1, 1, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))\n verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 1, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))\n verify_mod(\n x_shape=[1, 32, 32, 32],\n y_shape=[1, 32, 32, 32],\n fmod=1,\n out_shape=(1, 32, 32, 32),\n dtype=\"int32\",\n )\n verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))\n\n\ndef verify_xor(x_shape, y_shape):\n x_np = np.random.choice(a=[False, True], size=x_shape).astype(\"bool\")\n y_np = np.random.choice(a=[False, True], size=y_shape).astype(\"bool\")\n\n np_out = np.logical_xor(x_np, y_np)\n out_shape = np_out.shape\n\n xor_node = helper.make_node(\"Xor\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\n onnx_dtype = TensorProto.BOOL\n graph = helper.make_graph(\n [xor_node],\n \"xor_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", onnx_dtype, list(x_shape)),\n helper.make_tensor_value_info(\"y\", onnx_dtype, list(y_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"z\", onnx_dtype, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"xor_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x_np, y_np], target, ctx, out_shape)\n tvm.testing.assert_allclose(np_out, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_xor():\n # XOR\n verify_xor(x_shape=[1, 32, 32], y_shape=[1, 32, 32])\n\n # Xor broadcast\n verify_xor(x_shape=[1, 32, 32], y_shape=[1, 1, 32])\n\n\ndef verify_max_roi_pool(x_shape, rois_shape, pooled_shape, spatial_scale, out_shape):\n if spatial_scale is None:\n pool_node = helper.make_node(\n \"MaxRoiPool\", inputs=[\"x\", \"rois\"], outputs=[\"y\"], pooled_shape=pooled_shape\n )\n else:\n pool_node = helper.make_node(\n \"MaxRoiPool\",\n inputs=[\"x\", \"rois\"],\n outputs=[\"y\"],\n pooled_shape=pooled_shape,\n spatial_scale=spatial_scale,\n )\n\n graph = helper.make_graph(\n [pool_node],\n \"pool_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"rois\", TensorProto.FLOAT, list(rois_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"pool_test\")\n verify_with_ort(model, [x_shape, rois_shape], out_shape)\n\n\[email protected]_gpu\ndef test_max_roi_pool():\n verify_max_roi_pool(\n x_shape=[1, 3, 6, 6],\n rois_shape=[3, 5],\n pooled_shape=[1, 1],\n spatial_scale=None,\n out_shape=[3, 3, 1, 1],\n )\n\n verify_max_roi_pool(\n x_shape=[1, 3, 10, 10],\n rois_shape=[4, 5],\n pooled_shape=[2, 2],\n spatial_scale=2.0,\n out_shape=[4, 3, 2, 2],\n )\n\n\ndef verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad=\"NOTSET\"):\n if pads is None:\n pool_node = helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n p=p,\n auto_pad=auto_pad,\n strides=strides,\n )\n else:\n pool_node = helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n p=p,\n pads=pads,\n strides=strides,\n )\n\n graph = helper.make_graph(\n [pool_node],\n \"lppool_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"lppool_test\")\n verify_with_ort(model, [x_shape], out_shape)\n\n\[email protected]_gpu\ndef test_lppool():\n # Pool1D\n verify_lppool(\n x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[1], pads=[1, 1], out_shape=[1, 1, 32]\n )\n\n # Pool2D\n verify_lppool(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n p=2,\n strides=[1, 1],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 32, 32],\n )\n\n # Pool1D with stride\n verify_lppool(\n x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[2], pads=[1, 1], out_shape=[1, 1, 16]\n )\n\n # Pool2D with stride\n verify_lppool(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n p=2,\n strides=[2, 2],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 16, 16],\n )\n\n # Pool1D with stride and autopadding\n verify_lppool(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n p=2,\n strides=[2],\n pads=None,\n out_shape=[1, 1, 16],\n auto_pad=\"SAME_UPPER\",\n )\n\n # Pool2D with stride and autopadding\n verify_lppool(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n p=2,\n strides=[2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16],\n auto_pad=\"SAME_UPPER\",\n )\n\n # Pool3D with stride\n verify_lppool(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n p=2,\n strides=[2, 2, 2],\n pads=[1, 1, 1, 1, 1, 1],\n out_shape=[1, 1, 16, 16, 16],\n )\n\n # Pool3D with stride and autopadding\n verify_lppool(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n p=2,\n strides=[2, 2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16, 16],\n auto_pad=\"SAME_UPPER\",\n )\n\n\ndef verify_rnn(\n seq_length,\n batch_size,\n input_size,\n hidden_size,\n rnn_type=\"LSTM\",\n use_bias=False,\n activations=None,\n alphas=None,\n betas=None,\n use_initial_state=False,\n use_peep=False,\n linear_before_reset=False,\n):\n if rnn_type == \"LSTM\":\n multiplier = 4\n elif rnn_type == \"GRU\":\n multiplier = 3\n else:\n raise NotImplementedError(\"%s RNNs not yet supported.\" % rnn_type)\n x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype(\"float32\")\n w_np = np.random.uniform(size=(1, multiplier * hidden_size, input_size)).astype(\"float32\")\n r_np = np.random.uniform(size=(1, multiplier * hidden_size, hidden_size)).astype(\"float32\")\n input_names = [\"X\", \"W\", \"R\"]\n input_tensors = [\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(x_np.shape)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, list(w_np.shape)),\n helper.make_tensor_value_info(\"R\", TensorProto.FLOAT, list(r_np.shape)),\n ]\n input_values = [x_np, w_np, r_np]\n\n if use_bias:\n b_np = np.random.uniform(size=(1, multiplier * 2 * hidden_size)).astype(\"float32\")\n input_names.append(\"B\")\n input_tensors.append(\n helper.make_tensor_value_info(\"B\", TensorProto.FLOAT, [1, multiplier * 2 * hidden_size])\n )\n input_values.append(b_np)\n\n if use_initial_state:\n assert use_bias == True, \"Initial states must have bias specified.\"\n sequence_np = np.repeat(seq_length, batch_size).astype(\"int32\")\n input_names.append(\"sequence_lens\")\n input_tensors.append(\n helper.make_tensor_value_info(\"sequence_lens\", TensorProto.INT32, [batch_size])\n )\n input_values.append(sequence_np)\n\n initial_h_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype(\"float32\")\n input_names.append(\"initial_h\")\n input_tensors.append(\n helper.make_tensor_value_info(\n \"initial_h\", TensorProto.FLOAT, [1, batch_size, hidden_size]\n )\n )\n input_values.append(initial_h_np)\n\n if rnn_type == \"LSTM\":\n initial_c_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype(\"float32\")\n input_names.append(\"initial_c\")\n input_tensors.append(\n helper.make_tensor_value_info(\n \"initial_c\", TensorProto.FLOAT, [1, batch_size, hidden_size]\n )\n )\n input_values.append(initial_c_np)\n\n if use_peep and rnn_type == \"LSTM\":\n assert use_initial_state == True, \"Peepholes require initial state to be specified.\"\n p_np = np.random.uniform(size=(1, 3 * hidden_size)).astype(\"float32\")\n input_names.append(\"P\")\n input_tensors.append(\n helper.make_tensor_value_info(\"P\", TensorProto.FLOAT, [1, 3 * hidden_size])\n )\n input_values.append(p_np)\n\n Y_shape = [seq_length, 1, batch_size, hidden_size]\n Y_h_shape = [1, batch_size, hidden_size]\n outputs = [\"Y\", \"Y_h\"]\n graph_outputs = [\n helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(Y_shape)),\n helper.make_tensor_value_info(\"Y_h\", TensorProto.FLOAT, list(Y_h_shape)),\n ]\n output_shapes = [Y_shape, Y_h_shape]\n\n if rnn_type == \"LSTM\":\n Y_c_shape = [1, batch_size, hidden_size]\n outputs.append(\"Y_c\")\n graph_outputs.append(\n helper.make_tensor_value_info(\"Y_c\", TensorProto.FLOAT, list(Y_c_shape))\n )\n output_shapes.append(Y_c_shape)\n\n rnn_node = helper.make_node(\n rnn_type, inputs=input_names, outputs=outputs, hidden_size=hidden_size\n )\n if activations is not None:\n activations_attr = helper.make_attribute(\"activations\", activations)\n rnn_node.attribute.append(activations_attr)\n if alphas is not None:\n alphas_attr = helper.make_attribute(\"activation_alpha\", alphas)\n rnn_node.attribute.append(alphas_attr)\n if betas is not None:\n betas_attr = helper.make_attribute(\"activation_beta\", betas)\n rnn_node.attribute.append(betas_attr)\n if linear_before_reset and rnn_type == \"GRU\":\n lbr_attr = helper.make_attribute(\"linear_before_reset\", 1)\n rnn_node.attribute.append(lbr_attr)\n\n graph = helper.make_graph([rnn_node], \"rnn_test\", inputs=input_tensors, outputs=graph_outputs)\n\n model = helper.make_model(graph, producer_name=\"rnn_test\")\n\n for target, ctx in tvm.testing.enabled_targets():\n onnx_out = get_onnxruntime_output(model, input_values, \"float32\")\n tvm_out = get_tvm_output(\n model,\n input_values,\n target,\n ctx,\n output_shapes,\n output_dtype=[\"float32\"] * len(output_shapes),\n )\n for o_out, t_out in zip(onnx_out, tvm_out):\n tvm.testing.assert_allclose(o_out, t_out, rtol=5e-3, atol=5e-3)\n\n\[email protected]_gpu\ndef test_lstm():\n # No bias.\n verify_rnn(\n seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type=\"LSTM\"\n )\n # large batch.\n verify_rnn(\n seq_length=4, batch_size=8, input_size=16, hidden_size=32, use_bias=True, rnn_type=\"LSTM\"\n )\n # Non power of two.\n verify_rnn(\n seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type=\"LSTM\"\n )\n # Long sequence.\n verify_rnn(\n seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type=\"LSTM\"\n )\n # Large hidden.\n verify_rnn(\n seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type=\"LSTM\"\n )\n # Large input.\n verify_rnn(\n seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type=\"LSTM\"\n )\n\n # Different activation testing.\n # Default value hardsigmoid.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"Tanh\", \"Tanh\"],\n rnn_type=\"LSTM\",\n )\n # Multiple parameterized activations.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"LeakyRelu\", \"Tanh\"],\n alphas=[2.0, 0.5],\n betas=[0.3],\n rnn_type=\"LSTM\",\n )\n # All parameterized with new Affine activation.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"LeakyRelu\", \"Affine\"],\n alphas=[2.0, 0.5, 0.8],\n betas=[0.3, 0.1],\n rnn_type=\"LSTM\",\n )\n\n # Testing with initial state and peepholes\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n use_initial_state=True,\n rnn_type=\"LSTM\",\n )\n\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n use_initial_state=True,\n use_peep=True,\n rnn_type=\"LSTM\",\n )\n\n\[email protected]_gpu\ndef test_gru():\n # No bias.\n verify_rnn(\n seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type=\"GRU\"\n )\n # large batch.\n verify_rnn(\n seq_length=4,\n batch_size=8,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"GRU\",\n linear_before_reset=True,\n )\n # Non power of two.\n verify_rnn(\n seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type=\"GRU\"\n )\n # Long sequence.\n verify_rnn(\n seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type=\"GRU\"\n )\n # Large hidden.\n verify_rnn(\n seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type=\"GRU\"\n )\n # Large input.\n verify_rnn(\n seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type=\"GRU\"\n )\n\n # Different activation testing.\n # Default value hardsigmoid.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"Softsign\"],\n rnn_type=\"GRU\",\n )\n # Multiple parameterized activations.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"LeakyRelu\"],\n alphas=[2.0, 0.5],\n betas=[0.3],\n rnn_type=\"GRU\",\n )\n # All parameterized with new Affine activation.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"Affine\"],\n alphas=[2.0, 0.8],\n betas=[0.3, 0.1],\n rnn_type=\"GRU\",\n )\n\n # Testing with initial state\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n use_initial_state=True,\n rnn_type=\"GRU\",\n )\n\n\[email protected]_gpu\ndef test_resize():\n def verify(ishape, oshape, scales, mode, coord_trans):\n nodes = [\n make_constant_node(\"roi\", onnx.TensorProto.FLOAT, (0,), []),\n make_constant_node(\"scales\", onnx.TensorProto.FLOAT, (len(scales),), scales),\n ]\n input_names = [\"X\", \"roi\", \"scales\"]\n if oshape != []:\n nodes.append(\n make_constant_node(\"sizes\", onnx.TensorProto.INT64, (len(oshape),), oshape)\n )\n input_names.append(\"sizes\")\n nodes.append(\n helper.make_node(\n \"Resize\",\n inputs=input_names,\n outputs=[\"Y\"],\n mode=mode,\n coordinate_transformation_mode=coord_trans,\n )\n )\n\n if oshape == []:\n oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]\n\n graph = helper.make_graph(\n nodes,\n \"resize_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, ishape)],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, oshape)],\n )\n\n model = helper.make_model(graph, producer_name=\"resize_test\")\n\n verify_with_ort(model, [ishape], oshape, use_vm=False, opset=11)\n\n # upsampling\n verify([1, 16, 32, 32], [1, 16, 64, 64], [], \"nearest\", \"asymmetric\")\n verify([1, 16, 32, 32], [1, 16, 64, 64], [], \"linear\", \"align_corners\")\n verify([1, 16, 32, 32], [1, 16, 64, 64], [], \"linear\", \"half_pixel\")\n # downsampling\n verify([1, 16, 32, 32], [1, 16, 16, 16], [], \"nearest\", \"asymmetric\")\n verify([1, 16, 32, 32], [1, 16, 16, 16], [], \"linear\", \"align_corners\")\n verify([1, 16, 32, 32], [1, 16, 16, 16], [], \"linear\", \"half_pixel\")\n # scales are specified instead of sizes\n verify([1, 16, 32, 32], [], [1, 1, 2, 2], \"nearest\", \"asymmetric\")\n verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], \"linear\", \"half_pixel\")\n\n\[email protected]_gpu\ndef test_nonzero():\n def verify_nonzero(indata, outdata, dtype):\n node = helper.make_node(\n \"NonZero\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"nonzero_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.INT64, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.INT64, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"nonzero_test\")\n\n verify_with_ort_with_inputs(\n model, [indata], targets=[\"llvm\"], dtype=\"int64\", use_vm=True, opset=9\n )\n\n input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)\n result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 1], [0, 0, 1]]\n verify_nonzero(input_data, result, dtype=np.int64)\n\n input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)\n result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 2, 2], [0, 1, 0, 1]]\n verify_nonzero(input_data, result, dtype=np.int64)\n\n\[email protected]_gpu\ndef test_topk():\n def verify_topk(input_dims, K, axis=-1):\n output_dims = list(input_dims)\n output_dims[axis] = K\n\n node = helper.make_node(\n \"TopK\", inputs=[\"X\", \"K\"], outputs=[\"Values\", \"Indicies\"], axis=axis\n )\n\n graph = helper.make_graph(\n [node],\n \"topk_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(input_dims)),\n helper.make_tensor_value_info(\n \"K\",\n TensorProto.INT64,\n [\n 1,\n ],\n ),\n ],\n initializer=[helper.make_tensor(\"K\", TensorProto.INT64, [1], [K])],\n outputs=[\n helper.make_tensor_value_info(\"Values\", TensorProto.FLOAT, output_dims),\n helper.make_tensor_value_info(\"Indicies\", TensorProto.INT64, output_dims),\n ],\n )\n\n model = helper.make_model(graph, producer_name=\"topk_test\")\n\n indata = np.random.uniform(-10, 10, input_dims).astype(np.float32)\n onnx_out = get_onnxruntime_output(model, [indata, k])\n\n for target, ctx in [(\"llvm\", tvm.cpu())]:\n tvm_out = get_tvm_output(\n model,\n indata,\n target,\n ctx,\n [output_dims, output_dims],\n output_dtype=[\"float32\", \"int64\"],\n )\n tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-05, atol=1e-05)\n\n for n in [12, 32]:\n for shape in [[n], [n, n], [n, n, n]]:\n for k in [1, 5, 10]:\n verify_topk(shape, k)\n\n verify_topk([n, n, n], 5, 0)\n verify_topk([n, n, n], 5, 1)\n verify_topk([n, n, n], 5, 2)\n\n\[email protected]_gpu\ndef test_roi_align():\n def verify_roi_align(\n input_dims, num_roi, output_height, output_width, sampling_ratio=0, spatial_scale=1.0\n ):\n output_dims = [num_roi, input_dims[1], output_height, output_width]\n\n node = helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indicies\"],\n outputs=[\"Y\"],\n mode=\"avg\",\n output_height=output_height,\n output_width=output_width,\n sampling_ratio=sampling_ratio,\n spatial_scale=spatial_scale,\n )\n\n graph = helper.make_graph(\n [node],\n \"roialign_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(input_dims)),\n helper.make_tensor_value_info(\"rois\", TensorProto.FLOAT, [num_roi, 4]),\n helper.make_tensor_value_info(\n \"batch_indicies\",\n TensorProto.INT64,\n [\n num_roi,\n ],\n ),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, output_dims)],\n )\n\n model = helper.make_model(graph, producer_name=\"roialign_test\")\n\n np_data = np.random.uniform(size=input_dims).astype(\"float32\")\n np_rois = np.random.uniform(size=[num_roi, 4]).astype(\"float32\") * input_dims[2]\n np_batch_indicies = np.random.randint(low=0, high=input_dims[0], size=num_roi)\n\n verify_with_ort_with_inputs(model, [np_data, np_rois, np_batch_indicies], output_dims)\n\n verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((4, 4, 16, 32), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 8, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 8, 8), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 16), 16, 5, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 12), 8, 7, 3, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=0.5)\n verify_roi_align((3, 4, 12, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.5)\n verify_roi_align((5, 4, 16, 14), 32, 7, 7, sampling_ratio=1, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=2, spatial_scale=1.0)\n\n\nif __name__ == \"__main__\":\n test_flatten()\n test_reshape()\n test_shape()\n test_expand()\n test_power()\n test_squeeze()\n test_unsqueeze()\n test_slice()\n test_floor()\n test_ceil()\n test_round()\n test_isinf()\n test_isnan()\n test_clip()\n test_clip_min_max_as_inputs()\n test_onehot()\n test_matmul()\n test_batch_matmul()\n test_gather()\n test_gatherelements()\n test_gather_nd()\n test_scatter()\n test_lrn()\n test_instance_norm()\n test_upsample()\n test_forward_min()\n test_forward_max()\n test_forward_mean()\n test_forward_hardsigmoid()\n test_forward_arg_min_max()\n test_softmax()\n test_constantofshape()\n test_all_reduce_funcs()\n test_pad()\n test_split()\n test_binary_ops()\n test_single_ops()\n test_leaky_relu()\n test_elu()\n test_selu()\n test_prelu()\n test_ThresholdedRelu()\n test_ScaledTanh()\n test_ParametricSoftplus()\n test_Scale()\n test_LogSoftmax()\n test_resnet()\n test_inception()\n test_densenet()\n test_sign()\n test_not()\n test_and()\n test_tile()\n test_erf()\n test_where()\n test_or()\n test_depth_to_space()\n test_space_to_depth()\n test_batch_norm()\n test_batch_norm_dynamic_subgraph()\n test_conv()\n test_convtranspose()\n test_unsqueeze_constant()\n test_pooling()\n test_lppool()\n test_lstm()\n test_gru()\n test_resize()\n test_nonzero()\n test_topk()\n test_mod()\n test_xor()\n test_max_roi_pool()\n test_roi_align()\n"
] |
[
[
"numpy.logical_xor",
"numpy.expand_dims",
"numpy.arctanh",
"numpy.take",
"numpy.sqrt",
"numpy.arctan",
"numpy.asarray",
"numpy.dtype",
"numpy.max",
"numpy.mean",
"numpy.argmin",
"numpy.random.randn",
"numpy.var",
"numpy.exp",
"numpy.where",
"numpy.random.randint",
"torch.onnx.export",
"numpy.pad",
"numpy.clip",
"numpy.arcsin",
"torch.randn",
"numpy.eye",
"numpy.matmul",
"numpy.sin",
"numpy.argmax",
"numpy.repeat",
"scipy.special.erf",
"numpy.zeros",
"numpy.logical_not",
"numpy.log",
"numpy.cosh",
"numpy.nonzero",
"numpy.min",
"numpy.power",
"numpy.random.choice",
"numpy.arccosh",
"numpy.arccos",
"numpy.logical_or",
"numpy.tan",
"torch.nn.Linear",
"numpy.ndim",
"numpy.random.rand",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.arcsinh",
"numpy.tanh",
"numpy.maximum",
"numpy.abs",
"numpy.random.random",
"numpy.tile",
"numpy.cos",
"numpy.ones",
"numpy.sinh",
"numpy.sign",
"numpy.random.uniform",
"numpy.ndindex",
"numpy.empty"
]
] |
iDurugkar/adversarial-intrinsic-motivation
|
[
"e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2"
] |
[
"grid_world_experiments/main.py"
] |
[
"\"\"\"\nGAIL file\n\"\"\"\nimport numpy as np\nimport torch\nfrom torch import nn\n# from torch.nn import utils\nimport torch.nn.functional as f\nimport random\nfrom policy import MlpNetwork, SoftQLearning\nfrom grid_mdp import GridMDP, MazeWorld, WindyMazeWorld, ToroidWorld\nfrom rnd import RND\nimport matplotlib.pyplot as plt\nfrom buffers import ReplayBuffer\nimport argparse\nimport os\nfrom os import path\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--seed', help='random seed', type=int, default=1123)\nparser.add_argument('--rnd', help='random network distillation', type=bool, default=False)\nparser.add_argument('--reward', help=\"reward function to use ['gail', 'airl', 'fairl', 'aim', 'none']\", type=str,\n default='aim')\nparser.add_argument('--dir', help=\"directory to save results in\", type=str,\n default='aim_results')\nargs = parser.parse_args()\ntorch.set_default_dtype(torch.float32)\n# Set random seeds\nseed = 42 * args.seed\nprint(args.seed)\ntorch.manual_seed(seed)\nrandom.seed = seed\nnp.random.seed = seed\nreward_to_use = args.reward # use one of ['gail', 'airl', 'fairl', 'none']\nprint(reward_to_use)\n\n\ndef wasserstein_reward(d):\n \"\"\"\n return the wasserstein reward\n \"\"\"\n return d\n\n\ndef gail_reward(d):\n \"\"\"\n Take discriminaotr output and return the gail reward\n :param d:\n :return:\n \"\"\"\n d = torch.sigmoid(d)\n return d.log() # - (1 - d).log()\n\n\ndef airl_reward(d):\n \"\"\"\n Take discriminaotr output and return AIRL reward\n :param d:\n :return:\n \"\"\"\n s = torch.sigmoid(d)\n reward = s.log() - (1 - s).log()\n return reward\n\n\ndef fairl_reward(d):\n \"\"\"\n Take discriminator output and return FAIRL reward\n :param d:\n :return:\n \"\"\"\n d = torch.sigmoid(d)\n h = d.log() - (1 - d).log()\n return h.exp() * (-h)\n\n\nreward_dict = {'gail': gail_reward, 'airl': airl_reward, 'fairl': fairl_reward, 'aim': wasserstein_reward,\n 'none': None}\n\n\nclass Discriminator(nn.Module):\n \"\"\"\n The discriminator used to learn the potentials or the reward functions\n \"\"\"\n def __init__(self, x_dim=1, max_state=10., min_state=0):\n super(Discriminator, self).__init__()\n self.mean_state = torch.tensor((max_state - min_state) / 2 + min_state, dtype=torch.float32)\n self.diff_state = torch.tensor(max_state - min_state, dtype=torch.float32)\n self.input_dim = x_dim\n self.d = MlpNetwork(self.input_dim, n_units=64) # , activ=f.tanh)\n\n def normalize(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n normalize input\n :param x:\n :return:\n \"\"\"\n x = x.type(torch.float32)\n x = (x - self.mean_state) / self.diff_state\n return x\n\n def forward(self, x: torch.Tensor) -> (torch.Tensor, torch.Tensor):\n \"\"\"\n return discriminator output\n :param x:\n :return:\n \"\"\"\n x = self.normalize(x)\n output = self.d(x)\n return output\n\n\ndef to_one_hot(x: torch.Tensor, num_vals) -> torch.Tensor:\n \"\"\"\n Convert tensor to one-hot encoding\n \"\"\"\n if type(x) is not torch.Tensor:\n x = torch.tensor(x)\n x = x.type(torch.long)\n x_one_hot = torch.zeros((x.shape[0], num_vals), dtype=torch.float32)\n x_one_hot = x_one_hot.scatter(1, x, 1.)\n return x_one_hot\n\n\nclass GAIL:\n \"\"\"\n Class to take the continuous MDP and use gail to match given target distribution\n \"\"\"\n\n def __init__(self):\n self.env = MazeWorld()\n # self.env = ToroidWorld()\n self.policy = SoftQLearning(x_dim=self.env.dims, out_dim=len(self.env.action_space),\n max_state=self.env.max_state, min_state=self.env.min_state,\n ent_coef=.3, target_update=3e-2)\n self.discriminator = Discriminator(x_dim=self.env.dims, max_state=self.env.max_state,\n min_state=self.env.min_state)\n self.discount = 0.99\n self.check_state = set()\n self.agent_buffer = ReplayBuffer(size=5000)\n self.policy_optimizer = torch.optim.Adam(self.policy.parameters()) # , lr=3e-4)\n self.discriminator_optimizer = torch.optim.Adam(self.discriminator.parameters()) # , lr=1e-4)\n if args.rnd:\n self.rnd = RND(x_dim=self.env.dims)\n else:\n self.rnd = None\n self.max_r = 0.\n self.min_r = -1.\n\n def gather_data(self, num_trans=100) -> None:\n \"\"\"\n Gather data from current policy\n used to:\n * fit value function\n * update policy\n * plot histograms\n :param num_trans:\n :return:\n \"\"\"\n t = 0\n while t < num_trans:\n s = self.env.reset()\n s = torch.tensor(s).type(torch.float32).reshape([-1, self.env.dims])\n done = False\n while not done:\n # self.states.append(deepcopy(s))\n action = self.policy.sample_action(s)\n # self.actions.append(a)\n a = np.squeeze(action.data.detach().numpy())\n s_p, r, done, _ = self.env.step(a)\n s_p = torch.tensor(s_p).type(torch.float32).reshape([-1, self.env.dims])\n # d = self.discriminator(sp)\n # i_r = gail_reward(d)\n # self.next_states.append(deepcopy(s))\n # self.rewards.append(i_r) # deepcopy(r))\n # self.dones.append(deepcopy(done))\n self.agent_buffer.add(s.squeeze(), action.reshape([-1]).detach(), r, s_p.squeeze(), done)\n # if s_p not in self.check_state:\n # self.check_state.add(s_p)\n # self.target_buffer.add(s, a, r, s_p, done)\n s = s_p\n t += 1\n # self.states.append(s)\n\n def compute_td_targets(self, states, next_states, dones, rewards=None):\n \"\"\"\n Compute the value of the current states and\n the TD target based on one step reward\n and value of next states\n :return: value of current states v, TD target targets\n \"\"\"\n states = states.reshape([-1, self.env.dims])\n next_states = next_states.reshape([-1, self.env.dims])\n v = self.policy(states)[0]\n v_prime = self.policy(next_states)[-1]\n if rewards is not None:\n dones = rewards.type(torch.float32).reshape([-1, 1])\n else:\n dones = dones.type(torch.float32).reshape([-1, 1])\n reward_func = reward_dict[reward_to_use]\n if reward_func is not None:\n # d0 = self.discriminator(states)\n d1 = self.discriminator(next_states)\n # Compute rewards\n # r0 = reward_func(d0)\n r1 = reward_func(d1)\n rewards = rewards.type(torch.float32).reshape([-1, 1]) + ((r1 - self.max_r) / (self.max_r - self.min_r))\n targets = rewards.type(torch.float32).reshape([-1, 1])\n targets += (1. - dones) * self.discount * v_prime.reshape([-1, 1])\n return v, targets.detach()\n\n def fit_v_func(self):\n \"\"\"\n This function will train the value function using the collected data\n :return:\n \"\"\"\n self.policy_optimizer.zero_grad()\n s, a, r, s_p, dones = self.agent_buffer.sample(100)\n if args.rnd:\n spn = s_p.detach().numpy()\n self.rnd.update(spn)\n r += 0.3 * self.rnd.reward(spn)\n\n q, targets = self.compute_td_targets(s, s_p, dones, rewards=r)\n actions = torch.tensor(a, dtype=torch.long)\n v = q.gather(dim=-1, index=actions)\n loss = torch.mean(0.5 * (targets - v) ** 2)\n loss.backward()\n self.policy_optimizer.step()\n self.policy.update_target()\n return\n\n # def optimize_policy(self):\n # \"\"\"\n # This function will optimize the policy to maximize returns\n # Based on collected data\n # :return:\n # \"\"\"\n # self.policy_optimizer.zero_grad()\n # s, a, r, s_p, dones = self.agent_buffer.sample(100)\n # v, targets = self.compute_td_targets(s, s_p, dones, rewards=r)\n # advantages = (targets - v).detach()\n # a = a.reshape([-1, 1]).detach()\n # neg_log_pi = -1. * self.policy.pi_loss(s.reshape([-1, self.env.dims]), a)\n # entropy_kl = self.policy.entropy(s.reshape([-1, self.env.dims]))\n # loss = torch.mean(advantages * neg_log_pi) + 1e-1 * torch.mean(entropy_kl)\n # loss.backward()\n # self.policy_optimizer.step()\n # return\n\n def compute_aim_pen(self,\n target_state: torch.Tensor,\n prev_state: torch.Tensor,\n next_state_state: torch.Tensor, lambda_=10.):\n \"\"\"\n Computes values of the discriminator at different points\n and constraints the difference to be 0.1\n \"\"\"\n prev_out = self.discriminator(prev_state)\n next_out = self.discriminator(next_state_state)\n penalty = lambda_ * torch.max(torch.abs(next_out - prev_out) - 0.1, torch.tensor(0.)).pow(2).mean()\n return penalty\n\n def compute_grad_pen(self,\n target_state: torch.Tensor,\n policy_state: torch.Tensor,\n lambda_=10.):\n \"\"\"\n Computes the gradients by mixing the data randomly\n and creates a loss for the magnitude of the gradients.\n \"\"\"\n alpha = torch.rand(target_state.size(0), 1)\n # expert_data = torch.cat([expert_state, expert_action], dim=1)\n # policy_data = torch.cat([policy_state, policy_action], dim=1)\n\n alpha = alpha.expand_as(target_state).to(target_state.device)\n\n mixup_data = alpha * target_state + (1 - alpha) * policy_state\n mixup_data.requires_grad = True\n\n disc = self.discriminator(mixup_data)\n ones = torch.ones(disc.size()).to(disc.device)\n grad = torch.autograd.grad(\n outputs=disc,\n inputs=mixup_data,\n grad_outputs=ones,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n\n grad_pen = lambda_ * (torch.max(grad.norm(2, dim=1) - 0.01, torch.tensor(0.))).pow(2).mean()\n return grad_pen\n\n def optimize_discriminator(self):\n \"\"\"\n Optimize the discriminator based on the memory and\n target_distribution\n :return:\n \"\"\"\n num_samples = 100\n self.discriminator_optimizer.zero_grad()\n # _, _, _, target_distribution, _ = self.target_buffer.sample(100)\n target_dist = np.reshape(self.env.target_distribution(), (-1,))\n target_distribution = np.random.choice(target_dist.shape[0], num_samples, p=target_dist)\n states, _, _, next_states, _ = self.agent_buffer.sample(num_samples)\n # target_distribution = sample_target_distribution(mean=self.env.target_mean, std=self.env.target_std,\n # num=100)\n target_distribution = target_distribution.reshape([-1, 1])\n if self.env.dims > 1:\n target_distribution = np.concatenate([target_distribution, target_distribution], axis=-1)\n target_distribution[:, 0] = target_distribution[:, 0] // self.env.y_dim\n target_distribution[:, 1] = target_distribution[:, 1] % self.env.y_dim\n next_states = next_states.reshape([-1, self.env.dims])\n ones = torch.tensor(target_distribution).type(torch.float32).reshape([-1, self.env.dims])\n zeros = torch.tensor(next_states).type(torch.float32).reshape([-1, self.env.dims])\n zeros_prev = torch.tensor(states).type(torch.float32).reshape([-1, self.env.dims])\n\n # ########## GAIL loss\n if reward_to_use != 'aim':\n labels_ones = torch.ones((num_samples, 1)) * 0.9\n labels_zeros = torch.ones((num_samples, 1)) * 0.1\n data = torch.cat([ones, zeros])\n pred = self.discriminator(data)\n labels = torch.cat([labels_ones, labels_zeros])\n gail_loss = f.binary_cross_entropy_with_logits(pred, labels)\n grad_penalty = self.compute_grad_pen(ones, zeros)\n loss = gail_loss + grad_penalty\n else:\n # ####### WGAN loss\n pred_ones = self.discriminator(ones)\n pred_zeros = self.discriminator(zeros)\n preds = torch.cat([pred_zeros, pred_ones], dim=0)\n self.max_r = torch.max(preds).detach().cpu().numpy() + 0.1\n self.min_r = torch.min(preds).detach().cpu().numpy() - 0.1\n wgan_loss = torch.mean(pred_zeros) + torch.mean(pred_ones * (-1.))\n aim_penalty = self.compute_aim_pen(ones, zeros_prev, zeros)\n # grad_penalty = self.compute_grad_pen(ones, zeros)\n loss = wgan_loss + aim_penalty # + grad_penalty\n\n # loss = torch.mean(- labels * pred.log() - (1 - labels) * (1. - pred).log())\n loss.backward()\n # utils.clip_grad_norm_(self.discriminator.parameters(), max_norm=0.5)\n self.discriminator_optimizer.step()\n\n def plot_dist(self, num_samples=100, it=0, dname='aim'):\n \"\"\"\n plot the two distributions as histograms\n :return:\n \"\"\"\n # dname = 'r_neg'\n if not path.exists(dname):\n os.mkdir(dname)\n\n # _, _, _, target_distribution, _ = self.target_buffer.sample(num_samples)\n states, _, _, next_states, _ = self.agent_buffer.sample(num_samples)\n target_dist = np.reshape(self.env.target_distribution(), (-1,))\n target_distribution = np.random.choice(target_dist.shape[0], num_samples, p=target_dist)\n target_distribution = target_distribution.reshape([-1, 1]).astype(np.float32)\n if self.env.dims > 1:\n target_distribution = np.concatenate([target_distribution, target_distribution], axis=-1)\n target_distribution[:, 0] = target_distribution[:, 0] // self.env.y_dim\n target_distribution[:, 1] = target_distribution[:, 1] % self.env.y_dim\n # target_distribution += np.random.normal(loc=0, scale=0.5, size=target_distribution.shape)\n next_states = next_states.numpy().reshape([-1, self.env.dims]).astype(np.float32)\n # next_states += np.random.normal(loc=0., scale=0.01, size=next_states.shape)\n q, v, qt, vt = self.policy(states)\n print(f\"q: {np.mean(q.detach().numpy())}, v: {np.mean(v.detach().numpy())},\"\n f\" qt: {np.mean(qt.detach().numpy())}, vt: {np.mean(vt.detach().numpy())}\")\n if self.env.dims == 1:\n xloc = np.arange(0, self.env.num_states)\n target_distribution = to_one_hot(target_distribution, self.env.num_states).numpy()\n plt.bar(xloc, np.sum(target_distribution, axis=0), color='r', alpha=0.3, label='target')\n next_states = to_one_hot(next_states, self.env.num_states).numpy()\n plt.bar(xloc, np.sum(next_states, axis=0), color='b', alpha=0.3, label='agent')\n for t in self.env.target_state:\n plt.axvline(x=t, color='r', linestyle='dashed', linewidth=2)\n # sns.kdeplot(np.squeeze(target_distribution), shade=True, color='r', shade_lowest=False, alpha=0.3,\n # label='target')\n # sns.kdeplot(np.squeeze(next_states), shade=True, color='b', shade_lowest=False, alpha=0.3,\n # label='agent')\n else:\n from matplotlib.ticker import AutoMinorLocator\n target_vals, target_counts = np.unique(target_distribution, axis=0, return_counts=True)\n agent_vals, agent_counts = np.unique(next_states, axis=0, return_counts=True)\n target_counts = target_counts.astype(np.float) / np.max(target_counts)\n agent_counts = agent_counts.astype(np.float) / np.max(agent_counts)\n # for it in range(target_counts.shape[0]):\n # plt.plot(target_vals[it, 0] + 0.5, target_vals[it, 1] + 0.5, marker_size=40 * target_counts[it],\n # color='r', alpha=0.2)\n # for ia in range(agent_counts.shape[0]):\n # plt.plot(agent_vals[ia, 0] + 0.5, agent_vals[ia, 1] + 0.5, marker_size=40 * agent_counts[ia],\n # color='b', alpha=0.2)\n\n plt.xlim(left=0., right=self.env.x_dim)\n plt.ylim(bottom=0., top=self.env.y_dim)\n plt.scatter(target_vals[:, 0] + 0.5, target_vals[:, 1] + 0.5, 200 * target_counts,\n color='r', alpha=0.5, label='target')\n plt.scatter(agent_vals[:, 0] + 0.5, agent_vals[:, 1] + 0.5, 200 * agent_counts,\n color='b', alpha=0.5, label='agent')\n plt.xticks(np.arange(self.env.x_dim) + 0.5, np.arange(self.env.x_dim))\n plt.yticks(np.arange(self.env.y_dim) + 0.5, np.arange(self.env.y_dim))\n minor_locator = AutoMinorLocator(2)\n plt.gca().xaxis.set_minor_locator(minor_locator)\n plt.gca().yaxis.set_minor_locator(minor_locator)\n plt.gca().set_aspect('equal')\n plt.grid(which='minor')\n # sns.kdeplot(target_distribution[:, 0], target_distribution[:, 1],\n # shade=True, color='r', shade_lowest=False,\n # alpha=0.5, label='target')\n # sns.kdeplot(next_states[:, 0], next_states[:, 1], shade=True, color='b', shade_lowest=False, alpha=0.5,\n # label='agent')\n plt.legend()\n # plt.hist(target_distribution, bins=10, alpha=0.4, color='red')\n # plt.hist(next_states, bins=10, alpha=0.4, color='blue')\n # plt.axvline(x=self.env.target_state, color='r', linestyle='dashed', linewidth=2)\n # plt.legend(['target', 'agent'])\n plt.title(f'Density for agent and target distributions state Iteration {it}')\n # plt.show()\n plt.tight_layout()\n plt.savefig(f'{dname}/d_{it // 10}.png', dpi=300)\n # exit()\n plt.cla()\n plt.clf()\n\n reward_func = reward_dict[reward_to_use]\n if reward_func is not None:\n r_states = []\n for ia in range(self.env.x_dim):\n for ja in range(self.env.y_dim):\n r_states.append([ia, ja])\n r_states = np.asarray(r_states)\n r_states = torch.tensor(r_states)\n d = reward_func(self.discriminator(r_states)).detach().numpy()\n print(f'Max potential: {np.max(d)}, Min potential: {np.min(d)}')\n # Compute rewards\n rewards = (d - self.max_r) / (self.max_r - self.min_r)\n rewards = np.reshape(rewards, newshape=(self.env.x_dim, self.env.y_dim))\n # Flip matrix so the rewards image is plotted aligned with the distribution\n # grid above\n # rewards = np.flip(rewards, axis=0)\n rewards = np.transpose(rewards)\n plt.imshow(rewards, cmap='magma', origin='lower')\n plt.colorbar()\n plt.title(f'Rewards at Iteration {it}')\n plt.tight_layout()\n plt.savefig(f'{dname}/r_{it // 10}.png', dpi=300)\n plt.cla()\n plt.clf()\n entropy_kl = self.policy.entropy(states.reshape([-1, self.env.dims]))\n entropy_kl = np.mean(entropy_kl.detach().numpy())\n print(f\"Entropy KL at Iteration {it} is {entropy_kl}\")\n\n if self.env.dims == 1:\n states = np.arange(0, self.env.num_states)\n s = torch.tensor(states).type(torch.float32).reshape([-1, 1])\n # s = to_one_hot(s, self.env.num_states)\n d = self.discriminator(s)\n reward_func = reward_dict[reward_to_use]\n if reward_func is not None:\n rewards = reward_func(d).squeeze().detach().numpy()\n plt.cla()\n plt.bar(states, rewards, width=0.5)\n plt.xlabel('states')\n plt.ylabel('rewards')\n plt.title('Rewards for entering state')\n plt.show()\n logits = self.policy(s)[0]\n policy = torch.exp(logits).detach().numpy()\n plt.bar(states, policy[:, 0], width=0.5)\n plt.xlabel('states')\n plt.ylabel('P(left|state)')\n plt.title('Policy')\n plt.show()\n plt.cla()\n\n\nif __name__ == '__main__':\n gail = GAIL()\n gail.gather_data(num_trans=500)\n gail.plot_dist(num_samples=500, dname=args.dir)\n print('')\n for i in range(500):\n if reward_to_use != 'none':\n for _ in range(5):\n # gail.gather_data(num_trans=500)\n gail.optimize_discriminator()\n for _ in range(10):\n gail.gather_data(num_trans=500)\n gail.fit_v_func()\n\n # Useful only if using a separate policy\n # gail.gather_data(num_trans=500)\n # gail.optimize_policy()\n # if i % 10 == 0:\n # gail.plot_dist(num_samples=500, dname=args.dir)\n if (i + 1) % 10 == 0:\n # gather more data if you want to see exactly what the agent's policy is\n # gail.gather_data()\n print(i + 1)\n gail.plot_dist(num_samples=500, it=(i + 1), dname=args.dir)\n"
] |
[
[
"torch.mean",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"torch.abs",
"torch.max",
"torch.zeros",
"torch.set_default_dtype",
"torch.cat",
"matplotlib.ticker.AutoMinorLocator",
"numpy.asarray",
"numpy.concatenate",
"numpy.max",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"torch.ones",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"torch.tensor",
"torch.autograd.grad",
"torch.sigmoid",
"matplotlib.pyplot.title",
"numpy.random.choice",
"numpy.min",
"matplotlib.pyplot.ylim",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.min",
"matplotlib.pyplot.savefig",
"torch.exp",
"numpy.transpose",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.scatter",
"torch.manual_seed",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel"
]
] |
gstonge/gcm
|
[
"c5d1f0860f7e921ed191a96f7859b232547092d2"
] |
[
"test/test_optimal_seeding.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTest the initializaiton, insertion, sampling, etc. methods for samplable set\n\nAuthor: Guillaume St-Onge <[email protected]>\n\"\"\"\n\nimport pytest\nimport numpy as np\nfrom gcm import *\nfrom scipy.special import loggamma\n\n\ndef constraint_prob(sm,fni,fni_tilde,state_meta):\n assert (fni <= 1).all() and (fni >= 0).all()\n\ndef constraint_norm(sm,fni,fni_tilde,state_meta):\n for n in range(2,len(fni)):\n assert np.isclose(np.sum(fni[n]),1)\n\ndef constraint_norm_tilde(sm,fni,fni_tilde,state_meta):\n for n in range(2,len(fni_tilde)):\n assert np.isclose(np.sum(fni_tilde[n]),1)\n\ndef constraint_stubs(sm,fni,fni_tilde,state_meta):\n nmax = state_meta[1]\n m = state_meta[2]\n gm = state_meta[3]\n pn = state_meta[4]\n imat = state_meta[5]\n nmat = state_meta[6]\n pnmat = state_meta[7]\n nmean = np.sum(pn*np.arange(nmax+1))\n mmean = np.sum(m*gm)\n assert np.isclose(np.sum(fni*(nmat-imat)*pnmat),\n np.sum(m*sm*gm)*nmean/mmean)\n\ndef constraint_eta(sm,fni,fni_tilde,state_meta):\n nmax = state_meta[1]\n m = state_meta[2]\n pn = state_meta[4]\n imat = state_meta[5]\n pnmat = state_meta[7]\n nmean = np.sum(pn*np.arange(nmax+1))\n\n eta = np.sum(imat*fni_tilde*pnmat)/nmean\n assert np.isclose((1.-eta)**m,sm).all()\n\nconstraint_list = [constraint_prob,constraint_norm,constraint_stubs,\n constraint_eta, constraint_norm_tilde]\n\nclass TestFniOptimization:\n def test_reg_net_1(self):\n #structure\n nmax = 5\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[5] = 1\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 3.\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 10**(-2)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n\n\n def test_reg_net_2(self):\n #structure\n nmax = 5\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[5] = 1\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 3.\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 10**(-1)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n def test_reg_net_3(self):\n #structure\n nmax = 5\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[5] = 1\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 0.5\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 5*10**(-1)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n def test_hom_net_1(self):\n #structure\n nmax = 10\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[2:nmax+1] = np.exp(-0.1*np.arange(2,nmax+1))\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 3.\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 10**(-2)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n def test_hom_net_2(self):\n #structure\n nmax = 10\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[2:nmax+1] = np.exp(-0.5*np.arange(2,nmax+1))\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 3.\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 5*10**(-2)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n def test_hom_net_3(self):\n #structure\n nmax = 20\n mmax = 20\n m = np.arange(mmax+1)\n n = np.arange(nmax+1)\n pn = np.zeros(nmax+1)\n param = 5\n gm = np.exp(m*np.log(param) - loggamma(m+1))\n gm[0:1] = 0\n gm /= np.sum(gm)\n pn = np.exp(n*np.log(param) - loggamma(n+1))\n pn[0:2] = 0\n pn /= np.sum(pn)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 0.5\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 10**(-3)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n def test_het_1(self):\n #structure\n nmax = 20\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[2:nmax+1] = (1.*np.arange(2,nmax+1))**(-3)\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 3.\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 5*10**(-2)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n def test_het_2(self):\n #structure\n nmax = 20\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[2:nmax+1] = (1.*np.arange(2,nmax+1))**(-3)\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 2.\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 5*10**(-2)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n\n def test_het_3(self):\n #structure\n nmax = 20\n mmax = 5\n pn = np.zeros(nmax+1)\n pn[2:nmax+1] = (1.*np.arange(2,nmax+1))**(-3)\n pn /= np.sum(pn)\n gm = np.zeros(mmax+1)\n gm[5] = 5\n gm /= np.sum(gm)\n state_meta = get_state_meta(mmax, nmax, gm, pn)\n\n #infection\n nu = 0.5\n beta = lambda n,i,trate,nu: trate*i**nu\n trate = invasion_threshold_safe(beta, gm, pn, fixed_args=(nu,))*1.1\n inf_mat = infection_matrix(beta,nmax,args=(trate,nu))\n initial_density = 5*10**(-2)\n\n sm,fni,fni_tilde = optimize_fni(initial_density,inf_mat,state_meta)\n\n for const in constraint_list:\n const(sm,fni,fni_tilde,state_meta)\n\n"
] |
[
[
"numpy.log",
"numpy.arange",
"scipy.special.loggamma",
"numpy.zeros",
"numpy.sum",
"numpy.isclose"
]
] |
adrift00/cs231n-assignments-2019
|
[
"38ffc935fa1aa6e26ba5cc560269650db521d3c4"
] |
[
"assignments/2019/assignment1/cs231n/gradient_check.py"
] |
[
"from __future__ import print_function\n\nimport numpy as np\nfrom random import randrange\n\ndef eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n \"\"\"\n a naive implementation of numerical gradient of f at x\n - f should be a function that takes a single argument\n - x is the point (numpy array) to evaluate the gradient at\n \"\"\"\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad\n\n\ndef eval_numerical_gradient_array(f, x, df, h=1e-5):\n \"\"\"\n Evaluate a numeric gradient for a function that accepts a numpy\n array and returns a numpy array.\n \"\"\"\n grad = np.zeros_like(x)\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n ix = it.multi_index\n\n oldval = x[ix]\n x[ix] = oldval + h\n pos = f(x).copy()\n x[ix] = oldval - h\n neg = f(x).copy()\n x[ix] = oldval\n\n grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n it.iternext()\n return grad\n\n\ndef eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):\n \"\"\"\n Compute numeric gradients for a function that operates on input\n and output blobs.\n\n We assume that f accepts several input blobs as arguments, followed by a\n blob where outputs will be written. For example, f might be called like:\n\n f(x, w, out)\n\n where x and w are input Blobs, and the result of f will be written to out.\n\n Inputs:\n - f: function\n - inputs: tuple of input blobs\n - output: output blob\n - h: step size\n \"\"\"\n numeric_diffs = []\n for input_blob in inputs:\n diff = np.zeros_like(input_blob.diffs)\n it = np.nditer(input_blob.vals, flags=['multi_index'],\n op_flags=['readwrite'])\n while not it.finished:\n idx = it.multi_index\n orig = input_blob.vals[idx]\n\n input_blob.vals[idx] = orig + h\n f(*(inputs + (output,)))\n pos = np.copy(output.vals)\n input_blob.vals[idx] = orig - h\n f(*(inputs + (output,)))\n neg = np.copy(output.vals)\n input_blob.vals[idx] = orig\n\n diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)\n\n it.iternext()\n numeric_diffs.append(diff)\n return numeric_diffs\n\n\ndef eval_numerical_gradient_net(net, inputs, output, h=1e-5):\n return eval_numerical_gradient_blobs(lambda *args: net.forward(),\n inputs, output, h=h)\n\n\ndef grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):\n \"\"\"\n sample a few random elements and only return numerical\n in this dimensions.\n \"\"\"\n\n for i in range(num_checks):\n ix = tuple([randrange(m) for m in x.shape])\n\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evaluate f(x + h)\n x[ix] = oldval - h # increment by h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # reset\n\n grad_numerical = (fxph - fxmh) / (2 * h)\n grad_analytic = analytic_grad[ix]\n rel_error = (abs(grad_numerical - grad_analytic) /\n (abs(grad_numerical) + abs(grad_analytic)))\n print('numerical: %f analytic: %f, relative error: %e'\n %(grad_numerical, grad_analytic, rel_error))\n"
] |
[
[
"numpy.sum",
"numpy.copy",
"numpy.zeros_like",
"numpy.nditer"
]
] |
kaywuensche/clustering_images
|
[
"3e09d89f1b41c20d905f2959743ec3e7fd2ebe7b"
] |
[
"src/main/utils.py"
] |
[
"import os\r\nimport io\r\nimport shutil\r\nimport math\r\nimport random\r\nimport numpy as np\r\nimport requests\r\nimport urllib.request\r\nfrom PIL import Image, ImageDraw\r\nfrom bs4 import BeautifulSoup\r\nimport imghdr\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.decomposition import PCA\r\nfrom keras.models import Model\r\nfrom keras.preprocessing.image import load_img\r\nfrom keras.applications.vgg16 import preprocess_input\r\nfrom keras.applications.vgg16 import VGG16\r\nfrom yellowbrick.cluster import KElbowVisualizer\r\nimport matplotlib.pyplot as plt\r\n\r\nALLOWED_EXTENSIONS = set(['.png', '.jpg', '.jpeg'])\r\n\r\ndef initiat_exchange(folder):\r\n if not os.path.isdir(folder):\r\n os.mkdir(folder)\r\n\r\ndef del_prev_session(folder):\r\n if os.path.isdir(folder):\r\n shutil.rmtree(folder)\r\n os.mkdir(folder)\r\n\r\ndef get_image_from_url(url_img, search_dir):\r\n tempfile = os.path.join(search_dir, \"temp.jpeg\")\r\n urllib.request.urlretrieve(url_img, tempfile)\r\n extension = imghdr.what(tempfile)\r\n os.remove(tempfile)\r\n urllib.request.urlretrieve(url_img, os.path.join(search_dir, str(len(os.listdir(search_dir)) + 1) + \".\" + extension))\r\n\r\ndef get_images_from_web(searchTerm, search_dir):\r\n url_list = \"https://images.search.yahoo.com/search/images;?p={}\", \"https://www.google.com/search?q={}&site=webhp&tbm=isch\", \"https://www.bing.com/images/search?q={}&scope=images\"\r\n done = ()\r\n for url in url_list:\r\n searchUrl = url.format(searchTerm)\r\n d = requests.get(searchUrl).text\r\n soup = BeautifulSoup(d, 'html.parser')\r\n img_tags = soup.find_all('img')\r\n for img in img_tags:\r\n try:\r\n key_list = img.attrs.keys()\r\n url_img = 'na'\r\n if 'src' in key_list and img['src'].startswith(\"http\"):\r\n url_img = img['src']\r\n elif 'src2' in key_list and img['src2'].startswith(\"http\"):\r\n url_img = img['src2']\r\n get_image_from_url(url_img, search_dir)\r\n done.append(url_img)\r\n except:\r\n pass\r\n linkTags = soup.findAll('a')\r\n for linkTag in linkTags:\r\n try:\r\n linkUrl = linkTag['href']\r\n if linkUrl.startswith(\"http\"):\r\n if linkUrl.endswith(\".jpg\") or linkUrl.endswith(\".jpeg\") or linkUrl.endswith(\".png\") and linkUrl not in done:\r\n get_image_from_url(linkUrl, search_dir)\r\n except:\r\n pass\r\n\r\ndef create_image_from_input(directory):\r\n if os.path.exists(os.path.join(directory, \"temp.jpeg\")):\r\n os.remove(os.path.join(directory, \"temp.jpeg\"))\r\n files = [filename.path for filename in os.scandir(directory) if os.path.splitext(filename.name)[1] in ALLOWED_EXTENSIONS]\r\n grid_size = math.ceil(math.sqrt(len(files))) * 100\r\n with Image.new('RGBA', (grid_size, grid_size), color=\"white\") as new_im:\r\n k = 0\r\n for i in range(0, grid_size, 100):\r\n for j in range(0, grid_size, 100):\r\n with Image.open(files[k]) as im:\r\n im.thumbnail((100, 100))\r\n new_im.paste(im, (i, j))\r\n k += 1\r\n if k >= len(files):\r\n break\r\n if k >= len(files):\r\n break\r\n buf = io.BytesIO()\r\n new_im.save(buf, format='PNG')\r\n return buf\r\n\r\ndef resize_img_static(image, size):\r\n pil_image = Image.open(image)\r\n width, height = pil_image.size\r\n pil_image = pil_image.resize((size, int(height * (size / width))), Image.ANTIALIAS)\r\n return pil_image\r\n\r\ndef remove_corrupt_images(input, output):\r\n for filename in os.scandir(input):\r\n extension = os.path.splitext(filename.name)[1]\r\n if extension in ALLOWED_EXTENSIONS:\r\n try:\r\n with Image.open(filename.path) as img:\r\n img.save(os.path.join(output, filename.name.replace(extension, '.png')), 'PNG')\r\n except:\r\n print('file ' + filename.name + ' skipped')\r\n\r\ndef remove_duplicate_images(img_dir):\r\n no_duplicates = {}\r\n for image in os.scandir(img_dir):\r\n pil_image = resize_img_static(image.path, 500)\r\n bytes_pil_image = pil_image.tobytes()\r\n hashed_value = hash(bytes_pil_image)\r\n no_duplicates[hashed_value] = image.path\r\n pil_image.close()\r\n for image in os.scandir(img_dir):\r\n if image.path not in list(no_duplicates.values()):\r\n os.remove(image.path)\r\n\r\ndef extract_features(file, model):\r\n img = load_img(file, target_size=(224,224))\r\n img = np.array(img)\r\n reshaped_img = img.reshape(1,224,224,3)\r\n imgx = preprocess_input(reshaped_img)\r\n features = model.predict(imgx, use_multiprocessing=True)\r\n return features\r\n\r\ndef create_image_from_clusters(directory):\r\n dirs = [os.path.join(directory, d) for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]\r\n cols = len(dirs)\r\n cols = cols * 100\r\n rows = 11 * 100\r\n with Image.new('RGBA', (cols, rows), color=\"white\") as new_im:\r\n for i in range(0, cols, 100):\r\n cluster_path = dirs[int(i/100)]\r\n files = os.listdir(cluster_path)\r\n ImageDraw.Draw(new_im).text((i, 0), os.path.basename(cluster_path), (0, 0, 0))\r\n l = 0\r\n for j in range(100, rows, 100):\r\n if l < len(files):\r\n with Image.open(os.path.join(cluster_path, files[l])) as im:\r\n im.thumbnail((100, 100))\r\n new_im.paste(im, (i, j))\r\n l += 1\r\n buf = io.BytesIO()\r\n new_im.save(buf, format='PNG')\r\n return buf\r\n\r\ndef clustering(path, amount_of_clusters, meth):\r\n model = VGG16()\r\n model = Model(inputs=model.inputs, outputs=model.layers[-2].output)\r\n os.chdir(path)\r\n with os.scandir(path) as files:\r\n images = [file.name for file in files if file.name.endswith('.png')]\r\n data = {}\r\n for image in images:\r\n feat = extract_features(image, model)\r\n data[image] = feat\r\n filenames = np.array(list(data.keys()))\r\n file_count = len(filenames)\r\n feat = np.array(list(data.values()))\r\n feat = feat.reshape(-1,4096)\r\n if len(feat) > 100 and file_count > 100:\r\n components = 100\r\n else:\r\n components = min(len(feat), file_count)\r\n pca = PCA(n_components=components, random_state=22)\r\n pca.fit(feat)\r\n x = pca.transform(feat)\r\n if amount_of_clusters is None or meth == 'elbow':\r\n if file_count > 50:\r\n rounds = 50\r\n else:\r\n rounds = file_count\r\n model = KMeans()\r\n visualizer = KElbowVisualizer(model, k=(2, rounds), timings=False)\r\n visualizer.fit(x)\r\n if (meth == 'elbow'):\r\n buf = io.BytesIO()\r\n visualizer.show(outpath=buf, format='PNG')\r\n plt.gcf().clear()\r\n return buf\r\n else:\r\n amount_of_clusters = visualizer.elbow_value_\r\n plt.gcf().clear()\r\n kmeans = KMeans(n_clusters=amount_of_clusters, random_state=22)\r\n kmeans.fit(x)\r\n groups = {}\r\n for file, cluster in zip(filenames, kmeans.labels_):\r\n if cluster not in groups.keys():\r\n groups[cluster] = []\r\n groups[cluster].append(file)\r\n os.makedirs(os.path.join(path, 'cluster_' + str(cluster)))\r\n else:\r\n groups[cluster].append(file)\r\n shutil.move(os.path.join(path, file), os.path.join(path, 'cluster_' + str(cluster), file))\r\n return create_image_from_clusters(path)\r\n\r\ndef get_sample_of_cluster(output, samplesize):\r\n list_clusters = [f.path for f in os.scandir(os.path.join(output, \"\")) if f.is_dir()]\r\n sample = os.path.join(output, \"sample\")\r\n os.makedirs(sample)\r\n for cluster in list_clusters:\r\n list_images = [f.path for f in os.scandir(os.path.join(cluster, \"\")) if f.is_file()]\r\n images_of_cluster = math.floor(len(list_images) * samplesize)\r\n if(images_of_cluster <= 1):\r\n images_of_cluster = 1\r\n selected_sample = random.sample(list_images, images_of_cluster)\r\n for file in selected_sample:\r\n shutil.copyfile(file, os.path.join(sample, os.path.basename(file)))\r\n"
] |
[
[
"numpy.array",
"sklearn.decomposition.PCA",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.gcf"
]
] |
dt99jay/League-Tables
|
[
"92c029957734841ded0e5ed9f783fe3ea57f07f3"
] |
[
"Complete University Guide/Subjects/cug_subjects.py"
] |
[
"import urllib\nimport requests\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\n\ndef get_cols(table):\n header = table.find_all(\"th\")\n cols = []\n for column in header:\n try:\n col = column.find(\"a\").get_text()\n except AttributeError:\n col = column.get_text()\n cols.append(col.strip())\n if column.get(\"colspan\"):\n for c in range(1, int(column.get(\"colspan\"))):\n cols.append(\"{} {}\".format(col.strip(), c))\n return cols\n\ndef get_data(years):\n url = \"https://www.thecompleteuniversityguide.co.uk/league-tables/rankings?v=wide&y=\"\n data = []\n for year in years:\n for index, subject in pd.read_csv(\"lookup.csv\")[\"Subject\"].iteritems():\n r = requests.get(url + str(year) + \"&s=\" + urllib.parse.quote(subject))\n soup = BeautifulSoup(r.text, \"lxml\")\n table = soup.find(\"table\", {\"class\": \"league-table-table\"})\n table_cols = get_cols(table)\n table_data = []\n for row in table.find_all(\"tr\"):\n table_row = []\n for cell in row.find_all(\"td\"):\n table_row.append(cell.get_text().strip())\n table_data.append(table_row)\n table_data = [l for l in table_data if len(l) > 2]\n table_data = pd.DataFrame(data=table_data, columns=table_cols)\n table_data = pd.melt(table_data, id_vars=[\"University Name\"], var_name=\"Metric\", value_name=\"Value\")\n table_data[\"Year\"] = year\n table_data[\"Subject\"] = subject\n data.append(table_data)\n return pd.concat(data, axis=0)\n\ndef clean_data(data):\n \"\"\"\n Remove unnecessary metrics, add numerical values\n \"\"\"\n data.rename(columns={\"University Name\": \"Institution\"}, inplace=True)\n data = data.copy().loc[~data[\"Metric\"].isin([\"Rank\", \"Rank 1\", \"Next Steps\", \"Green Score\"])]\n data[\"Numeric Value\"] = data[\"Value\"].astype(str)\n data[\"Numeric Value\"] = data[\"Numeric Value\"].replace(\"n/a\", \"\", regex=False)\n data[\"Numeric Value\"] = data[\"Numeric Value\"].str.replace(\",\", \"\", regex=False)\n data[\"Numeric Value\"] = data[\"Numeric Value\"].str.replace(r\"[a-z]\", \"\", regex=True)\n data[\"Numeric Value\"] = pd.to_numeric(data[\"Numeric Value\"], errors=\"coerce\") # Coerce will turn blanks to NaNs\n return data\n\ndef rank_metrics(data):\n \"\"\"\n Calculate rank and decile for each metric by year\n \"\"\"\n data[\"Rank\"] = data.groupby([\"Year\", \"Subject\", \"Metric\"])[\"Numeric Value\"].rank(ascending=False, method=\"min\")\n data_nan = data.copy().loc[data[\"Numeric Value\"].isnull()] # Exclude NaNs for now\n data_nan[\"Decile\"] = pd.np.nan\n data = data.copy().loc[data[\"Numeric Value\"].notnull()]\n data[\"Decile\"] = data.groupby([\"Year\", \"Subject\", \"Metric\"])[\"Numeric Value\"].transform(\n lambda x: pd.qcut(x.rank(method=\"first\"), 10, labels=range(1,11)) # Calculate deciles on ranked data to avoid duplicate bin edges as https://stackoverflow.com/a/40548606/2950747\n )\n return data.append(data_nan)\n\nyears = [2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020]\ndata = get_data(years)\ndata = clean_data(data)\ndata = rank_metrics(data)\ndata.to_csv(\"Complete University Guide Subjects.csv\", index=False) # Save final CSV to disk\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame",
"pandas.to_numeric",
"pandas.melt"
]
] |
rmenegaux/fastDNA
|
[
"bc6469694daa69a18c458166ef4975b9fc1c308c"
] |
[
"python/fastDNA/evaluate.py"
] |
[
"from sklearn.metrics import accuracy_score\nimport numpy as np\nimport argparse\n\n\nparser = argparse.ArgumentParser(description=\n '''\n Compute average species-level recall and precision.\n ''')\n\nparser.add_argument(\"predictions\", nargs='+',\n help=\"predicted labels, text file with one label per line\",\n type=str)\n\nparser.add_argument(\"truth\", nargs='+',\n help=\"ground truth labels, text file with one label per line\",\n type=str)\n\n\ndef evaluate_predictions(pred_labels, true_labels, verbose=0):\n ''' \n Computes average species-level precision and recall\n '''\n\n y_true = np.genfromtxt(true_labels, dtype=str)\n\n with open(pred_labels) as f:\n y_pred = f.readlines()\n y_pred = np.array([x.strip() for x in y_pred])\n\n unique_true = np.unique(y_true)\n\n rec_per_species = np.array(\n [accuracy_score(y_pred[y_true==k], y_true[y_true==k])\n for k in unique_true]\n )\n recall = np.mean(rec_per_species)\n\n acc_per_species = np.array(\n [accuracy_score(y_pred[y_pred==k], y_true[y_pred==k])\n for k in unique_true if (y_pred==k).any()]\n )\n precision = np.mean(acc_per_species)\n\n if verbose > 0:\n print('''Recall: {:.2%} Precision: {:.2%}'''.format(recall, precision))\n print('''Number of predicted species {}'''.format(len(np.unique(y_pred))))\n\n return {'Recall': recall, 'Precision': precision}\n\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n evaluate_predictions(args.predictions[0], args.truth[0], verbose=1)"
] |
[
[
"sklearn.metrics.accuracy_score",
"numpy.mean",
"numpy.unique",
"numpy.genfromtxt"
]
] |
xtwentian3/BCQ
|
[
"e114f8c474c57a36d9af78c42a06f612831afda2"
] |
[
"continuous_BCQ/main.py"
] |
[
"import argparse\nimport gym\nimport numpy as np\nimport os\nimport torch\n\nimport BCQ\nimport DDPG\nimport utils\n\n\n# Handles interactions with the environment, i.e. train behavioral or generate buffer\ndef interact_with_environment(env, state_dim, action_dim, max_action, device, args):\n\t# For saving files\n\tsetting = f\"{args.env}_{args.seed}\"\n\tbuffer_name = f\"{args.buffer_name}_{setting}\"\n\n\t# Initialize and load policy\n\tpolicy = DDPG.DDPG(state_dim, action_dim, max_action, device)#, args.discount, args.tau)\n\tif args.generate_buffer: policy.load(f\"./models/behavioral_{setting}\")\n\n\t# Initialize buffer\n\treplay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)\n\t\n\tevaluations = []\n\n\tstate, done = env.reset(), False\n\tepisode_reward = 0\n\tepisode_timesteps = 0\n\tepisode_num = 0\n\n\t# Interact with the environment for max_timesteps\n\tfor t in range(int(args.max_timesteps)):\n\n\t\tepisode_timesteps += 1\n\n\t\t# Select action with noise\n\t\tif (\n\t\t\t(args.generate_buffer and np.random.uniform(0, 1) < args.rand_action_p) or \n\t\t\t(args.train_behavioral and t < args.start_timesteps)\n\t\t):\n\t\t\taction = env.action_space.sample()\n\t\telse: \n\t\t\taction = (\n\t\t\t\tpolicy.select_action(np.array(state))\n\t\t\t\t+ np.random.normal(0, max_action * args.gaussian_std, size=action_dim)\n\t\t\t).clip(-max_action, max_action)\n\n\t\t# Perform action\n\t\tnext_state, reward, done, _ = env.step(action) \n\t\tdone_bool = float(done) if episode_timesteps < env._max_episode_steps else 0\n\n\t\t# Store data in replay buffer\n\t\treplay_buffer.add(state, action, next_state, reward, done_bool)\n\n\t\tstate = next_state\n\t\tepisode_reward += reward\n\n\t\t# Train agent after collecting sufficient data\n\t\tif args.train_behavioral and t >= args.start_timesteps:\n\t\t\tpolicy.train(replay_buffer, args.batch_size)\n\n\t\tif done: \n\t\t\t# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True\n\t\t\tprint(f\"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}\")\n\t\t\t# Reset environment\n\t\t\tstate, done = env.reset(), False\n\t\t\tepisode_reward = 0\n\t\t\tepisode_timesteps = 0\n\t\t\tepisode_num += 1\n\n\t\t# Evaluate episode\n\t\tif args.train_behavioral and (t + 1) % args.eval_freq == 0:\n\t\t\tevaluations.append(eval_policy(policy, args.env, args.seed))\n\t\t\tnp.save(f\"./results/behavioral_{setting}\", evaluations)\n\t\t\tpolicy.save(f\"./models/behavioral_{setting}\")\n\n\t# Save final policy\n\tif args.train_behavioral:\n\t\tpolicy.save(f\"./models/behavioral_{setting}\")\n\n\t# Save final buffer and performance\n\telse:\n\t\tevaluations.append(eval_policy(policy, args.env, args.seed))\n\t\tnp.save(f\"./results/buffer_performance_{setting}\", evaluations)\n\t\treplay_buffer.save(f\"./buffers/{buffer_name}\")\n\n\n# Trains BCQ offline\ndef train_BCQ(state_dim, action_dim, max_action, device, args):\n\t# For saving files\n\tsetting = f\"{args.env}_{args.seed}\"\n\tbuffer_name = f\"{args.buffer_name}_{setting}\"\n\n\t# Initialize policy\n\tpolicy = BCQ.BCQ(state_dim, action_dim, max_action, device, args.discount, args.tau, args.lmbda, args.phi)\n\n\t# Load buffer\n\treplay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)\n\treplay_buffer.load(f\"./buffers/{buffer_name}\")\n\t\n\tevaluations = []\n\tepisode_num = 0\n\tdone = True \n\ttraining_iters = 0\n\t\n\twhile training_iters < args.max_timesteps: \n\t\tpol_vals = policy.train(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)\n\n\t\tevaluations.append(eval_policy(policy, args.env, args.seed))\n\t\tnp.save(f\"./results/BCQ_{setting}\", evaluations)\n\n\t\ttraining_iters += args.eval_freq\n\t\tprint(f\"Training iterations: {training_iters}\")\n\n\n# Runs policy for X episodes and returns average reward\n# A fixed seed is used for the eval environment\ndef eval_policy(policy, env_name, seed, eval_episodes=10):\n\teval_env = gym.make(env_name)\n\teval_env.seed(seed + 100)\n\n\tavg_reward = 0.\n\tfor _ in range(eval_episodes):\n\t\tstate, done = eval_env.reset(), False\n\t\twhile not done:\n\t\t\taction = policy.select_action(np.array(state))\n\t\t\tstate, reward, done, _ = eval_env.step(action)\n\t\t\tavg_reward += reward\n\n\tavg_reward /= eval_episodes\n\n\tprint(\"---------------------------------------\")\n\tprint(f\"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}\")\n\tprint(\"---------------------------------------\")\n\treturn avg_reward\n\n\nif __name__ == \"__main__\":\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--env\", default=\"Hopper-v1\") # OpenAI gym environment name\n\tparser.add_argument(\"--seed\", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds\n\tparser.add_argument(\"--buffer_name\", default=\"Robust\") # Prepends name to filename\n\tparser.add_argument(\"--eval_freq\", default=5e3, type=float) # How often (time steps) we evaluate\n\tparser.add_argument(\"--max_timesteps\", default=1e6, type=int) # Max time steps to run environment or train for (this defines buffer size)\n\tparser.add_argument(\"--start_timesteps\", default=25e3, type=int)# Time steps initial random policy is used before training behavioral\n\tparser.add_argument(\"--rand_action_p\", default=0.3, type=float) # Probability of selecting random action during batch generation\n\tparser.add_argument(\"--gaussian_std\", default=0.3, type=float) # Std of Gaussian exploration noise (Set to 0.1 if DDPG trains poorly)\n\tparser.add_argument(\"--batch_size\", default=100, type=int) # Mini batch size for networks\n\tparser.add_argument(\"--discount\", default=0.99) # Discount factor\n\tparser.add_argument(\"--tau\", default=0.005) # Target network update rate\n\tparser.add_argument(\"--lmbda\", default=0.75) # Weighting for clipped double Q-learning in BCQ\n\tparser.add_argument(\"--phi\", default=0.05) # Max perturbation hyper-parameter for BCQ\n\tparser.add_argument(\"--train_behavioral\", action=\"store_true\") # If true, train behavioral (DDPG)\n\tparser.add_argument(\"--generate_buffer\", action=\"store_true\") # If true, generate buffer\n\targs = parser.parse_args()\n\n\tprint(\"---------------------------------------\")\t\n\tif args.train_behavioral:\n\t\tprint(f\"Setting: Training behavioral, Env: {args.env}, Seed: {args.seed}\")\n\telif args.generate_buffer:\n\t\tprint(f\"Setting: Generating buffer, Env: {args.env}, Seed: {args.seed}\")\n\telse:\n\t\tprint(f\"Setting: Training BCQ, Env: {args.env}, Seed: {args.seed}\")\n\tprint(\"---------------------------------------\")\n\n\tif args.train_behavioral and args.generate_buffer:\n\t\tprint(\"Train_behavioral and generate_buffer cannot both be true.\")\n\t\texit()\n\n\tif not os.path.exists(\"./results\"):\n\t\tos.makedirs(\"./results\")\n\n\tif not os.path.exists(\"./models\"):\n\t\tos.makedirs(\"./models\")\n\n\tif not os.path.exists(\"./buffers\"):\n\t\tos.makedirs(\"./buffers\")\n\n\tenv = gym.make(args.env)\n\n\tenv.seed(args.seed)\n\t# env.action_space.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\tnp.random.seed(args.seed)\n\t\n\tstate_dim = env.observation_space.shape[0]\n\taction_dim = env.action_space.shape[0] \n\tmax_action = float(env.action_space.high[0])\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\tif args.train_behavioral or args.generate_buffer:\n\t\tinteract_with_environment(env, state_dim, action_dim, max_action, device, args)\n\telse:\n\t\ttrain_BCQ(state_dim, action_dim, max_action, device, args)\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"numpy.save",
"numpy.random.normal",
"torch.cuda.is_available",
"numpy.random.uniform",
"numpy.array"
]
] |
ze-lin/AudioCaption
|
[
"0d383c56350b57b3867a91578a11b332b1a29789"
] |
[
"utils/bert/create_sent_embedding.py"
] |
[
"import pickle\nimport fire\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\nclass EmbeddingExtractor(object):\n\n def extract_sentbert(self, caption_file: str, output: str, dev: bool=True, zh: bool=False):\n from sentence_transformers import SentenceTransformer\n lang2model = {\n \"zh\": \"distiluse-base-multilingual-cased\",\n \"en\": \"bert-base-nli-mean-tokens\"\n }\n lang = \"zh\" if zh else \"en\"\n model = SentenceTransformer(lang2model[lang])\n\n self.extract(caption_file, model, output, dev)\n\n def extract_originbert(self, caption_file: str, output: str, dev: bool=True, ip=\"localhost\"):\n from bert_serving.client import BertClient\n caption_df = pd.read_json(caption_file, dtype={\"key\": str})\n client = BertClient(ip)\n \n self.extract(caption_file, client, output, dev)\n\n def extract(self, caption_file: str, model, output, dev: bool):\n caption_df = pd.read_json(caption_file, dtype={\"key\": str})\n embeddings = {}\n\n if dev:\n with tqdm(total=caption_df.shape[0], ascii=True) as pbar:\n for idx, row in caption_df.iterrows():\n caption = row[\"caption\"]\n key = row[\"key\"]\n caption_index = row[\"caption_index\"]\n embeddings[\"{}_{}\".format(key, caption_index)] = np.array(model.encode([caption])).reshape(-1)\n pbar.update()\n\n else:\n dump = {}\n\n with tqdm(total=caption_df.shape[0], ascii=True) as pbar:\n for idx, row in caption_df.iterrows():\n key = row[\"key\"]\n caption = row[\"caption\"]\n value = np.array(model.encode([caption])).reshape(-1)\n\n if key not in embeddings.keys():\n embeddings[key] = [value]\n else:\n embeddings[key].append(value)\n\n pbar.update()\n \n for key in embeddings:\n dump[key] = np.stack(embeddings[key])\n\n embeddings = dump\n\n with open(output, \"wb\") as f:\n pickle.dump(embeddings, f)\n \n def extract_sbert(self, \n input_json: str, \n output: str):\n from sentence_transformers import SentenceTransformer\n import torch\n from h5py import File\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = SentenceTransformer(\"bert-base-nli-mean-tokens\")\n model = model.to(device)\n model.eval()\n\n df = pd.read_json(input_json)\n\n with torch.no_grad(), tqdm(total=df.shape[0], ascii=True) as pbar, File(output, \"w\") as store:\n for idx, row in df.iterrows():\n caption = row[\"caption\"]\n store[row[\"caption_key\"]] = model.encode([caption]).squeeze(0)\n pbar.update()\n\n\nif __name__ == \"__main__\":\n fire.Fire(EmbeddingExtractor)\n"
] |
[
[
"torch.cuda.is_available",
"torch.no_grad",
"pandas.read_json",
"numpy.stack"
]
] |
vasudevanv/OpenPNM
|
[
"51dbd5d9fb5210a67493ab839a91320d77ffd2d8"
] |
[
"tests/unit/algorithms/metrics/RelativePermabilityTest.py"
] |
[
"import openpnm as op\nimport numpy.testing as nt\nmgr = op.Workspace()\n\n\nclass RelativePermeabilityTest:\n\n def setup_class(self):\n self.net = op.network.Cubic(shape=[5, 5, 5], spacing=1)\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.geo[\"pore.diameter\"] = 0.5\n self.geo[\"pore.area\"] = 0.5**2\n self.geo[\"pore.volume\"] = 0.5**3\n self.geo[\"throat.diameter\"] = 0.3\n self.geo[\"throat.area\"] = 0.3**2\n self.geo[\"throat.volume\"] = 0.3**3\n self.geo[\"throat.conduit_lengths.throat\"] = 1\n self.geo[\"throat.conduit_lengths.pore1\"] = 0.3\n self.geo[\"throat.conduit_lengths.pore2\"] = 0.9\n self.non_wet_phase = op.phases.Air(network=self.net)\n self.wet_phase = op.phases.Water(network=self.net)\n mod = op.models.physics.hydraulic_conductance.hagen_poiseuille\n self.non_wet_phase.add_model(propname='throat.hydraulic_conductance',\n model=mod)\n self.wet_phase.add_model(propname='throat.hydraulic_conductance',\n model=mod)\n mod = op.models.physics.capillary_pressure.washburn\n self.non_wet_phase.add_model(propname='throat.entry_pressure',\n model=mod)\n self.wet_phase.add_model(propname='throat.entry_pressure',\n model=mod)\n self.inlet_pores = self.net.pores('back')\n ip = op.algorithms.InvasionPercolation(network=self.net,\n phase=self.non_wet_phase)\n ip.set_inlets(pores=self.inlet_pores)\n ip.run()\n self.non_wet_phase.update(ip.results())\n\n def test_one_phase_definition(self):\n rp = op.algorithms.metrics.RelativePermeability(network=self.net)\n rp.settings.update({'nwp': self.non_wet_phase.name,\n 'invasion_sequence': 'invasion_sequence'})\n rp.run(Snwp_num=10)\n results = rp.get_Kr_data()\n assert results['kr_wp'] is None\n\n def test_overwriting_boundary_faces(self):\n inlets = {'x': 'back', 'y': 'back', 'z': 'back'}\n outlets = {'x': 'front', 'y': 'front', 'z': 'front'}\n rp = op.algorithms.metrics.RelativePermeability(network=self.net)\n rp.settings.update({'nwp': self.non_wet_phase.name,\n 'wp': self.wet_phase.name,\n 'invasion_sequence': 'invasion_sequence'\n })\n rp.settings['flow_inlets'].update(inlets)\n rp.settings['flow_outlets'].update(outlets)\n rp.run(Snwp_num=10)\n results = rp.get_Kr_data()\n kx = results['kr_wp']['x']\n ky = results['kr_wp']['y']\n kr = [7.003833e-01, 4.675499e-01, 4.675499e-01, 2.371033e-06,\n 1.216706e-06, 1.000000e-06, 1.000000e-06, 1.000000e-06,\n 1.000000e-06, 1.000000e-06]\n nt.assert_allclose(kx, ky, rtol=1e-6)\n nt.assert_allclose(kx, kr, rtol=1e-6)\n\n def test_lacking_boundary_faces(self):\n rp = op.algorithms.metrics.RelativePermeability(network=self.net)\n inlets = {'x': 'top'}\n outlets = {'x': 'bottom'}\n rp.settings.update({'nwp': self.non_wet_phase.name,\n 'wp': self.wet_phase.name,\n 'invasion_sequence': 'invasion_sequence',\n })\n rp.settings['flow_inlets'].update(inlets)\n rp.settings['flow_outlets'].update(outlets)\n rp.run(Snwp_num=10)\n results = rp.get_Kr_data()\n kx = results['kr_wp']['x']\n kz = results['kr_wp']['z']\n kr = [5.982845e-01, 4.060000e-01, 4.060000e-01, 2.046288e-01,\n 1.065283e-06, 1.000000e-06, 1.000000e-06, 1.000000e-06,\n 1.000000e-06, 1.000000e-06]\n nt.assert_allclose(kx, kz, rtol=1e-6)\n nt.assert_allclose(kx, kr, rtol=1e-6)\n\n def test_user_defined_boundary_face(self):\n pores_in = self.net.pores('top')\n pores_out = self.net.pores('bottom')\n self.net.set_label(pores=pores_in, label='pore_in')\n self.net.set_label(pores=pores_out, label='pore_out')\n inlets = {'x': 'pore_in'}\n outlets = {'x': 'pore_out'}\n rp = op.algorithms.metrics.RelativePermeability(network=self.net)\n rp.settings.update({'nwp': self.non_wet_phase.name,\n 'wp': self.wet_phase.name,\n 'invasion_sequence': 'invasion_sequence'\n })\n rp.settings['flow_inlets'].update(inlets)\n rp.settings['flow_outlets'].update(outlets)\n rp.run(Snwp_num=10)\n results = rp.get_Kr_data()\n kx = results['kr_wp']['x']\n kz = results['kr_wp']['z']\n kr = [5.982845e-01, 4.060000e-01, 4.060000e-01, 2.046288e-01,\n 1.065283e-06, 1.000000e-06, 1.000000e-06, 1.000000e-06,\n 1.000000e-06, 1.000000e-06]\n nt.assert_allclose(kx, kz, rtol=1e-6)\n nt.assert_allclose(kx, kr, rtol=1e-6)\n\n def setup_2D_model(self, shape):\n self.net = op.network.Cubic(shape=shape, spacing=0.0005)\n self.geo = op.geometry.StickAndBall(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.non_wet_phase = op.phases.Air(network=self.net)\n self.wet_phase = op.phases.Water(network=self.net)\n mod = op.models.physics.hydraulic_conductance.hagen_poiseuille\n self.non_wet_phase.add_model(propname='throat.hydraulic_conductance',\n model=mod)\n self.wet_phase.add_model(propname='throat.hydraulic_conductance',\n model=mod)\n mod = op.models.physics.capillary_pressure.washburn\n self.non_wet_phase.add_model(propname='throat.entry_pressure',\n model=mod)\n self.wet_phase.add_model(propname='throat.entry_pressure',\n model=mod)\n if shape[1] != 1:\n self.inlet_pores = self.net.pores('back')\n else:\n self.inlet_pores = self.net.pores('left')\n ip = op.algorithms.InvasionPercolation(network=self.net,\n phase=self.non_wet_phase)\n ip.set_inlets(pores=self.inlet_pores)\n ip.run()\n self.non_wet_phase.update(ip.results())\n\n def test_2D_model_one_phase_curve(self):\n for i in range(3):\n shape = [10, 10, 10]\n shape[i] = 1\n self.setup_2D_model(shape=shape)\n rp = op.algorithms.metrics.RelativePermeability(network=self.net)\n rp.settings.update({'nwp': self.non_wet_phase.name,\n 'invasion_sequence': 'invasion_sequence'})\n rp.run(Snwp_num=10)\n results = rp.get_Kr_data()\n assert results['kr_wp'] is None\n nt.assert_allclose(len(results['sat']), 2)\n\n def test_2D_model_two_phase_curve(self):\n for i in range(3):\n shape = [10, 10, 10]\n shape[i] = 1\n self.setup_2D_model(shape=shape)\n rp = op.algorithms.metrics.RelativePermeability(network=self.net)\n rp.settings.update({'nwp': self.non_wet_phase.name,\n 'wp': self.wet_phase.name,\n 'invasion_sequence': 'invasion_sequence'})\n rp.run(Snwp_num=10)\n results = rp.get_Kr_data()\n nt.assert_allclose(len(results['kr_wp']), 2)\n nt.assert_allclose(len(results['kr_nwp']), 2)\n nt.assert_allclose(len(results['sat']), 2)\n\n\nif __name__ == '__main__':\n\n t = RelativePermeabilityTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n"
] |
[
[
"numpy.testing.assert_allclose"
]
] |
massquantity/DNN-implementation
|
[
"bca336749a2076fa2873f57d9c2b6f98ebf18a0d"
] |
[
"data/cifar_data.py"
] |
[
"import os\nimport shutil\nimport gzip, tarfile\nimport pickle\nimport subprocess\nimport urllib.request\nimport numpy as np\n\npardir = os.path.dirname(os.path.abspath(__file__))\nif not os.path.exists(pardir + \"/cifar_raw_data\"):\n os.makedirs(pardir + \"/cifar_raw_data\")\ndataset_dir = pardir + \"/cifar_raw_data/\"\n\n\ndef _download():\n print(\"Downloading...\")\n if not os.path.exists(\"cifar-10-python.tar.gz\") and \\\n not os.path.exists(\"cifar_raw_data/cifar-10-python.tar.gz\"):\n subprocess.call(\n 'wget \"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"',\n shell=True)\n\n #\tfile_path = dataset_dir + \"cifar10.tar.gz\"\n #\turllib.request.urlretrieve(\n #\t\t\"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\",\n #\t\tfile_path)\n print(\"Download Done ! \\n\")\n\n else:\n print(\"Dataset already downloaded.\")\n\n\ndef _extract_data():\n # extract data\n file_names = [\"cifar-10-batches-py/data_batch_%d\" % i for i in range(1, 6)] + \\\n [\"cifar-10-batches-py/test_batch\"]\n with tarfile.open(\"cifar-10-python.tar.gz\") as tar:\n for file in file_names:\n tar.extract(file, path=os.path.abspath('.'))\n os.rename(\"cifar-10-batches-py\", \"cifar10\")\n shutil.move(\"cifar-10-python.tar.gz\", \"cifar_raw_data/\")\n print(\"Directory renamed...\")\n\n\ndef init_cifar10():\n _download()\n _extract_data()\n print(\"Done!\")\n\n\nclass standardscale:\n def __init__(self):\n self.mean = None\n self.std = None\n\n def fit_transform(self, X):\n self.mean = np.mean(X, axis=0) # .astype(np.float32)\n self.std = np.std(X, axis=0) # .astype(np.float32)\n return (X - self.mean) / self.std\n\n def transform(self, X):\n return (X - self.mean) / self.std\n\n\ndef _one_hot(labels):\n return np.eye(10)[np.array(labels, dtype=np.int32)]\n\n\ndef load_data(normalize=False, standard=True, one_hot=True):\n if not os.path.exists(dataset_dir):\n init_cifar10()\n\n data, labels = [], []\n file_path = os.path.join(pardir, \"cifar10/\")\n for i in range(1, 6):\n with open(file_path + 'data_batch_%d' % i, 'rb') as f:\n whole = pickle.load(f, encoding='bytes')\n data.extend(whole[b'data'])\n labels.extend(whole[b'labels'])\n\n test_data, test_labels = [], []\n with open(file_path + 'test_batch', 'rb') as f:\n whole = pickle.load(f, encoding='bytes')\n test_data = whole[b'data']\n test_labels = np.array(whole[b'labels'])\n\n X_train = np.array(data).astype(np.float32)\n X_test = np.array(test_data).astype(np.float32)\n\n if normalize:\n X_train = X_train / 255.0\n X_test = X_test / 255.0\n\n if one_hot:\n y_train = _one_hot(labels)\n y_test = _one_hot(test_labels)\n\n if standard:\n ss = standardscale()\n X_train = ss.fit_transform(X_train)\n X_test = ss.transform(X_test)\n\n return (X_train, y_train), (X_test, y_test)\n\n\nif __name__ == \"__main__\":\n init_cifar10()\n load_data()"
] |
[
[
"numpy.array",
"numpy.std",
"numpy.eye",
"numpy.mean"
]
] |
2018sjain/language-by-letters
|
[
"bd952db6a26562bf575ad3e0ef48ee8c9d0b216a"
] |
[
"data_analysis.py"
] |
[
"import numpy as np\n\ndef num(letter):\n\tletter = letter.lower()\n\tif letter == ' ': return 0\n\treturn ord(letter) - 96\n\ndef percents(vals):\n\ttotal = sum(vals)\n\tif total == 0: total = 1 \n\tpercents = [(var/total) for var in vals]\n\treturn percents\n\nfiles = [['ENGLISH/english_clean.txt', 'english'], ['SPANISH/spanish_clean.txt', 'spanish'],\n\t\t ['FRENCH/french_clean.txt', 'french'], ['GERMAN/german_clean.txt', 'german']]\n\nfor file in files:\n\tone = [0 for x in range (27)]\n\ttwo = [[0 for x in range (27)] for x in range (27)]\n\tthree = [[[0 for x in range (27)] for x in range (27)] for x in range (27)]\n\tdata = open(file[0], 'r', encoding = 'utf8').read()\n\tfor letter in range(len(data)-2):\n\t\tcur = num(data[letter])\n\t\tnex = num(data[letter + 1])\n\t\tfur = num(data[letter + 2])\n\t\tone[cur] += 1\n\t\ttwo[cur][nex] += 1\n\t\tthree[cur][nex][fur] += 1\n\n\tone = percents(one)\n\tfor array in range(len(two)):\n\t\ttwo[array] = percents(two[array])\n\tfor value in range(len(three)):\n\t\tfor array in range(len(three[value])):\n\t\t\tthree[value][array] = percents(three[value][array])\n\n\tnp.save(file[1]+'_one', one)\n\tnp.save(file[1]+'_two', two)\n\tnp.save(file[1]+'_three', three)"
] |
[
[
"numpy.save"
]
] |
maximilianKoeper/melp
|
[
"863d1c55a36adf29f3508e15ecd5ed0a77544f53"
] |
[
"melp/taft/utils/cosmic.py"
] |
[
"import warnings\n\nimport ROOT\nimport numpy as np\n\nfrom melp.libs.misc import index_finder\nfrom melp.taft.utils.mu3eDisplay_helper import Trajectory, generate_txt_event_file\n\n# ---------------------------------------------\n\"\"\"\ndef cosmic_correction_z(__detector__, **kwargs):\n print(\"analyzing cosmics file #1\")\n kwargs[\"station\"] = 1\n hist_z_1 = cosmic_linear_correction(kwargs[\"cosmic_file\"], __detector__, **kwargs)\n correction_1 = np.median(hist_z_1)\n print(\"analyzing cosmics file #2\")\n kwargs[\"station\"] = 2\n hist_z_2 = cosmic_linear_correction(kwargs[\"cosmic_file\"], __detector__, **kwargs)\n correction_2 = np.median(hist_z_2)\n print(\"done\")\n phi = __detector__.TileDetector.column_ids(0, 200000)\n\n for p in range(len(phi)):\n corr = []\n row = __detector__.TileDetector.row_ids(p, 200000)\n for i in range(len(row)):\n corr.append(correction_1 * i)\n for i in range(len(row)):\n __detector__.TileDetector.tile[row[i]].dt_cal -= corr[i]\n\n for p in range(len(phi)):\n corr = []\n row = __detector__.TileDetector.row_ids(p, 300000)\n for i in range(len(row)):\n corr.append(correction_2 * i)\n for i in range(len(row)):\n __detector__.TileDetector.tile[row[i]].dt_cal -= corr[i]\n\n\n# --------------------------------------------------\n#\ndef cosmic_linear_correction(filename: str, detector, **kwargs):\n root_file = ROOT.TFile.Open(filename, \"READ\")\n ttree_mu3e = root_file.Get(kwargs[\"ttree_loc\"])\n time_dist_z = []\n\n trajectories = []\n\n # it -> iterator (frame_id). -1 if EOF\n it = find_next_cosmic_event(ttree_mu3e, it=0, station=kwargs[\"station\"])\n while it != -1:\n\n if it % 100000 == 0:\n print(round(it / ttree_mu3e.GetEntries() * 100), \" % | Total Frames: \", ttree_mu3e.GetEntries(),\n end='\\r')\n\n if kwargs[\"mc_primary\"] is False:\n test_dict = check_cosmic_events(ttree_mu3e)\n else:\n test_dict = check_cosmic_events_mc(ttree_mu3e)\n\n for key in test_dict:\n tmp_ids = test_dict[key][0]\n\n if kwargs[\"station\"] == 1:\n if any(y >= 300000 for y in tmp_ids):\n continue\n elif kwargs[\"station\"] == 2:\n if any(y < 300000 for y in tmp_ids):\n continue\n\n tmp_time_1 = min(test_dict[key][1])\n tmp_tile_id_1 = test_dict[key][0][int(*index_finder(list(test_dict[key][1]), tmp_time_1))]\n tmp_time_1 += detector.TileDetector.tile[tmp_tile_id_1].get_offset()\n\n tmp_time_2 = max(test_dict[key][1])\n tmp_tile_id_2 = test_dict[key][0][int(*index_finder(list(test_dict[key][1]), tmp_time_2))]\n tmp_time_2 += detector.TileDetector.tile[tmp_tile_id_2].get_offset()\n # calculating tof\n pos1 = detector.TileDetector.tile[tmp_tile_id_1].pos\n pos2 = detector.TileDetector.tile[tmp_tile_id_2].pos\n dist = np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2 + (pos1[2] - pos2[2]) ** 2) # mm\n dist *= 0.001 # m\n tof = (dist / 299792458) * (10 ** 9) # ns\n\n z_dist = (detector.TileDetector.tile[tmp_tile_id_1].column() - detector.TileDetector.tile[\n tmp_tile_id_2].column())\n\n if abs(z_dist) > kwargs[\"cosmic_threshold\"]:\n time_dist_z.append((abs(tmp_time_1 - tmp_time_2) - tof) / z_dist)\n\n trajectories.append(Trajectory(tile1_pos=pos1, tile2_pos=pos2))\n\n it += 1\n it = find_next_cosmic_event(ttree_mu3e, it, 1)\n\n generate_txt_event_file(trajectories, 500)\n return time_dist_z\n\n\"\"\"\n\n\n# --------------------------------------\ndef station_station_timing(filename: str, detector, **kwargs):\n warnings.warn(\"INEFFICIENT\")\n # trajectories = []\n\n root_file = ROOT.TFile.Open(filename, \"READ\")\n ttree_mu3e = root_file.Get(kwargs[\"ttree_loc\"])\n\n time_dist_betw_stations = []\n\n it = find_next_cosmic_event(ttree_mu3e, it=0, station=1)\n while it != -1:\n\n if it % 100000 == 0:\n print(round(it / ttree_mu3e.GetEntries() * 100), \" % | Total Frames: \", ttree_mu3e.GetEntries(),\n end='\\r')\n\n if kwargs[\"mc_primary\"] is False:\n test_dict = check_cosmic_events(ttree_mu3e)\n else:\n test_dict = check_cosmic_events_mc(ttree_mu3e)\n\n for key in test_dict:\n tmp_ids = test_dict[key][0]\n if any(y >= 300000 for y in tmp_ids) and any(y < 300000 for y in tmp_ids):\n # print(test_dict[key][0])\n tmp_time_arr_2 = []\n tmp_id_arr_2 = []\n tmp_time_arr_1 = []\n tmp_id_arr_1 = []\n for hit_index in range(len(tmp_ids)):\n tile_id = tmp_ids[hit_index]\n if tile_id >= 300000:\n tmp_time_arr_2.append(test_dict[key][1][hit_index])\n tmp_id_arr_2.append(tile_id)\n else:\n tmp_time_arr_1.append(test_dict[key][1][hit_index])\n tmp_id_arr_1.append(tile_id)\n\n # tmp_time_1 = sum(tmp_time_arr_1) / len(tmp_time_arr_1)\n # tmp_time_2 = sum(tmp_time_arr_2) / len(tmp_time_arr_2)\n\n if (max(tmp_time_arr_2) - min(tmp_time_arr_2)) > 0.5 or (max(tmp_time_arr_1) - min(tmp_time_arr_1)) > 0.5:\n continue\n\n # first and last hit used for timing information\n tilehit_times_2, tilehit_ids_2 = (list(t) for t in zip(*sorted(zip(tmp_time_arr_2, tmp_id_arr_2))))\n tmp_time_2 = tilehit_times_2[0]\n if kwargs[\"offset_mode\"] == \"constant\":\n tmp_time_2 += kwargs[\"offset\"]/2\n else:\n tmp_time_2 += detector.TileDetector.tile[tilehit_ids_2[0]].get_offset()\n\n tilehit_times_1, tilehit_ids_1 = (list(t) for t in zip(*sorted(zip(tmp_time_arr_1, tmp_id_arr_1))))\n tmp_time_1 = tilehit_times_1[0]\n if kwargs[\"offset_mode\"] == \"constant\":\n tmp_time_1 -= kwargs[\"offset\"]/2\n else:\n tmp_time_1 += detector.TileDetector.tile[tilehit_ids_1[0]].get_offset()\n\n # tof = 0.\n pos1 = detector.TileDetector.tile[tilehit_ids_1[0]].pos\n pos2 = detector.TileDetector.tile[tilehit_ids_2[0]].pos\n\n dist = np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2 + (pos1[2] - pos2[2]) ** 2) # mm\n dist *= 0.001 # m\n tof = (dist / 299792458) * (10 ** 9)\n\n if kwargs[\"mode\"] == \"tof\":\n if detector.TileDetector.tile[tilehit_ids_1[0]].pos[1] < detector.TileDetector.tile[tilehit_ids_2[0]].pos[1]:\n time_dist_betw_stations.append(((tmp_time_1 - tmp_time_2)) - tof) # abs not needed ?\n else:\n time_dist_betw_stations.append(((tmp_time_2 - tmp_time_1)) - tof) # abs not needed ?\n elif kwargs[\"mode\"] == \"test\":\n time_dist_betw_stations.append(((tmp_time_1 - tmp_time_2))) # abs not needed ?\n elif kwargs[\"mode\"] == \"test tof\":\n if detector.TileDetector.tile[tilehit_ids_1[0]].pos[1] < detector.TileDetector.tile[tilehit_ids_2[0]].pos[1]:\n time_dist_betw_stations.append(((tmp_time_1 - tmp_time_2)) - tof) # abs not needed ?\n else:\n time_dist_betw_stations.append(((tmp_time_1 - tmp_time_2)) + tof) # abs not needed ?\n else:\n if detector.TileDetector.tile[tilehit_ids_1[0]].pos[1] < \\\n detector.TileDetector.tile[tilehit_ids_2[0]].pos[1]:\n time_dist_betw_stations.append(((tmp_time_1 - tmp_time_2))) # abs not needed ?\n else:\n time_dist_betw_stations.append(((tmp_time_2 - tmp_time_1))) # abs not needed ?\n # time_dist_betw_stations.append((tmp_time_1 - tmp_time_2))\n # trajectories.append(Trajectory(tile1_pos=pos1, tile2_pos=pos2))\n\n it += 1\n it = find_next_cosmic_event(ttree_mu3e, it, 1)\n\n # generate_txt_event_file(trajectories, 500)\n return time_dist_betw_stations\n\n\n# --------------------------------------------------\n#\ndef get_cosmic_data_from_file(filename: str, detector, cosmic_station: int, **kwargs):\n root_file = ROOT.TFile.Open(filename, \"READ\")\n ttree_mu3e = root_file.Get(kwargs[\"ttree_loc\"])\n time_offset_between_hits = []\n position_hits_hit1_column = []\n position_hits_hit1_row = []\n position_hits_hit2_column = []\n position_hits_hit2_row = []\n\n # it -> iterator (frame_id). -1 if EOF\n it = find_next_cosmic_event(ttree_mu3e, it=0, station=cosmic_station)\n while it != -1:\n # TODO: just for debugging\n if it >= 400000:\n it = 100000000000001\n if it % 100000 == 0:\n print(\" -> \", round(it / ttree_mu3e.GetEntries() * 100), \" % | Total Frames: \", ttree_mu3e.GetEntries(),\n end='\\r')\n\n if kwargs[\"cosmic_mc_primary\"] is False:\n test_dict = check_cosmic_events(ttree_mu3e)\n else:\n test_dict = check_cosmic_events_mc(ttree_mu3e)\n\n for key in test_dict:\n tmp_ids = test_dict[key][0]\n\n if cosmic_station == 1:\n if any(y >= 300000 for y in tmp_ids):\n continue\n elif cosmic_station == 2:\n if any(y < 300000 for y in tmp_ids):\n continue\n\n # TODO: min() max() is not perfect !!!\n tmp_time_1 = min(test_dict[key][1])\n tmp_tile_id_1 = test_dict[key][0][int(*index_finder(list(test_dict[key][1]), tmp_time_1))]\n tmp_time_1 += detector.TileDetector.tile[tmp_tile_id_1].get_offset()\n\n tmp_time_2 = max(test_dict[key][1])\n tmp_tile_id_2 = test_dict[key][0][int(*index_finder(list(test_dict[key][1]), tmp_time_2))]\n tmp_time_2 += detector.TileDetector.tile[tmp_tile_id_2].get_offset()\n\n # calculating tof\n pos1 = detector.TileDetector.tile[tmp_tile_id_1].pos\n pos2 = detector.TileDetector.tile[tmp_tile_id_2].pos\n dist = np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2 + (pos1[2] - pos2[2]) ** 2) # mm\n dist *= 0.001 # m\n tof = (dist / 299792458) * (10 ** 9) # ns\n\n if abs(dist) >= kwargs[\"cosmic_threshold\"]:\n time_offset_between_hits.append((abs(tmp_time_1 - tmp_time_2) - tof))\n position_hits_hit1_column.append(detector.TileDetector.tile[tmp_tile_id_1].column())\n position_hits_hit1_row.append(detector.TileDetector.tile[tmp_tile_id_1].row())\n position_hits_hit2_column.append(detector.TileDetector.tile[tmp_tile_id_2].column())\n position_hits_hit2_row.append(detector.TileDetector.tile[tmp_tile_id_2].row())\n\n it += 1\n it = find_next_cosmic_event(ttree_mu3e, it, 1)\n\n position_hits = (\n position_hits_hit1_column, position_hits_hit1_row, position_hits_hit2_column, position_hits_hit2_row)\n\n return time_offset_between_hits, position_hits\n\n\n# --------------------------------------\n# returns index of next frame with cosmic event\n# it: iterator -> frame_id\ndef find_next_cosmic_event(ttree_mu3e, it: int, station, threshold=1) -> int:\n # EOF\n if it >= ttree_mu3e.GetEntries():\n return -1\n\n station_offset = 200000\n if station == 2:\n station_offset += 100000\n\n # searches next frame with at least the number of frames defined in threshold\n # (default 1) in the station defined by station\n start = it\n for it in range(start, ttree_mu3e.GetEntries()):\n ttree_mu3e.GetEntry(it)\n if ttree_mu3e.Ntilehit >= threshold:\n if station == 3:\n return it\n for hit_index in range(len(ttree_mu3e.tilehit_tile)):\n tile_id = ttree_mu3e.tilehit_tile[hit_index]\n if station_offset <= tile_id < station_offset + 100000:\n return it\n\n return -1\n\n\n# --------------------------------------\n# checks event for useful data\ndef check_cosmic_events(ttree_mu3e):\n # sort hits in time\n tilehit_times, tilehit_ids = (list(t) for t in\n zip(*sorted(zip(list(ttree_mu3e.tilehit_time), list(ttree_mu3e.tilehit_tile)))))\n # print(tilehit_ids)\n # print(np.array(tilehit_times) - min(tilehit_times))\n\n sorted_tracks = get_single_tracks_time_cut(tilehit_ids, tilehit_times)\n\n return sorted_tracks\n\n\n# --------------------------------------\n# checks event for useful data\n# with primary id information\ndef check_cosmic_events_mc(ttree_mu3e):\n # sort hits for primary id\n indices = np.argsort(list(ttree_mu3e.tilehit_primary))\n tilehit_times = np.asarray(list(ttree_mu3e.tilehit_time))[indices]\n tilehit_ids = np.asarray(list(ttree_mu3e.tilehit_tile))[indices]\n tilehit_primaries = np.asarray(list(ttree_mu3e.tilehit_primary))[indices]\n\n # print(tilehit_ids)\n # print(np.array(tilehit_times) - min(tilehit_times))\n\n sorted_tracks = get_single_tracks_primary_mc(tilehit_ids, tilehit_times, tilehit_primaries)\n\n return sorted_tracks\n\n\n# --------------------------------------\n# split list into chunks for each event (time cut)\n# returns dictionary with data (key is arbitrary but unique)\ndef get_single_tracks_time_cut(tilehit_ids: list, tilehit_times: list, threshold: float = 8.) -> dict:\n single_events = {}\n\n tmp_time_reference = tilehit_times[0]\n index_start_track = 0\n for index in range(len(tilehit_times)):\n if abs(tmp_time_reference - tilehit_times[index]) > threshold:\n single_events[index] = [tilehit_ids[index_start_track:index], tilehit_times[index_start_track:index]]\n index_start_track = index\n tmp_time_reference = tilehit_times[index]\n\n # fill up remaining event\n if index_start_track != len(tilehit_times):\n single_events[len(tilehit_times)] = [tilehit_ids[index_start_track:],\n tilehit_times[index_start_track:]]\n\n # delete entries with only one hit\n keys_to_delete = []\n for key in single_events:\n if len(single_events[key][0]) == 1:\n keys_to_delete.append(key)\n\n for key in keys_to_delete:\n del single_events[key]\n\n return single_events\n\n\n# --------------------------------------\n# split list into chunks for each event (mc event)\n# returns dictionary with data (key is arbitrary but unique)\ndef get_single_tracks_primary_mc(tilehit_ids: list, tilehit_times: list, tilehit_primaries: list) -> dict:\n single_events = {}\n\n tmp_primary_reference = tilehit_primaries[0]\n index_start_track = 0\n for index in range(len(tilehit_times)):\n # do not use events with primary < 0!!!!\n if tilehit_primaries[index] < 0 and index < len(tilehit_times)-1:\n tmp_primary_reference = tilehit_primaries[index+1]\n index_start_track = index + 1\n elif tilehit_primaries[index] != tmp_primary_reference:\n single_events[index] = [tilehit_ids[index_start_track:index], tilehit_times[index_start_track:index]]\n index_start_track = index\n tmp_primary_reference = tilehit_primaries[index]\n\n # fill up remaining event\n if index_start_track != len(tilehit_times):\n single_events[len(tilehit_times)] = [tilehit_ids[index_start_track:],\n tilehit_times[index_start_track:]]\n\n # delete entries with only one hit\n keys_to_delete = []\n for key in single_events:\n if len(single_events[key][0]) == 1:\n keys_to_delete.append(key)\n\n for key in keys_to_delete:\n del single_events[key]\n\n return single_events\n"
] |
[
[
"numpy.sqrt"
]
] |
Happy-Virus-IkBeom/LTH_Tensorflow
|
[
"a032bd01c689823a208b8ca616d483187e1e471e"
] |
[
"foundations/model_base.py"
] |
[
"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A base class for all models to be used in lottery ticket experiments.\n\nDefines a base class for a model that will be used for the lottery ticket\nhypothesis experiment.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass ModelBase(object):\n \"\"\"A base class for all models used in lottery ticket experiments.\"\"\"\n\n def __init__(self, presets=None, masks=None):\n \"\"\"Creates dictionaries for storing references to model parameters.\n\n Args:\n presets: A dictionary mapping strings to numpy arrays. Each key is the\n name of a weight tensor; each value is the corresponding preset initial\n weights to which that tensor should be initialized.\n masks: A dictionary mapping strings to numpy arrays. Each key is the\n name of a weight tensor; each value is the corresponding mask (0 or 1\n values in each entry) that determines which weights have been pruned.\n \"\"\"\n self._masks = masks if masks else {}\n self._presets = presets if presets else {}\n self._weights = {}\n\n self._train_summaries = None\n self._test_summaries = None\n self._validate_summaries = None\n\n @property\n def loss(self):\n return self._loss\n\n @property\n def train_summaries(self):\n return self._train_summaries\n\n @property\n def test_summaries(self):\n return self._test_summaries\n\n @property\n def validate_summaries(self):\n return self._validate_summaries\n\n @property\n def masks(self):\n return self._masks\n\n @property\n def presets(self):\n return self._presets\n\n @property\n def weights(self):\n return self._weights\n\n def get_current_weights(self, sess):\n output = {}\n for k, v in self.weights.items():\n output[k] = sess.run(v)\n return output\n\n def dense_layer(self,\n name,\n inputs,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer=None):\n \"\"\"Mimics tf.dense_layer but masks weights and uses presets as necessary.\"\"\"\n # If there is a preset for this layer, use it.\n if name in self._presets:\n kernel_initializer = tf.constant_initializer(self._presets[name])\n\n # Create the weights.\n weights = tf.get_variable(\n name=name + '_w',\n shape=[inputs.shape[1], units],\n initializer=kernel_initializer)\n\n # Mask the layer as necessary.\n if name in self._masks:\n mask_initializer = tf.constant_initializer(self._masks[name])\n mask = tf.get_variable(\n name=name + '_m',\n shape=[inputs.shape[1], units],\n initializer=mask_initializer,\n trainable=False)\n weights = tf.multiply(weights, mask)\n\n self._weights[name] = weights\n\n # Compute the output.\n output = tf.matmul(inputs, weights)\n\n # Add bias if applicable.\n if use_bias:\n bias = tf.get_variable(\n name=name + '_b', shape=[units], initializer=tf.zeros_initializer())\n output += bias\n\n # Activate.\n if activation:\n return activation(output)\n else:\n return output\n\n def create_loss_and_accuracy(self, label_placeholder, output_logits):\n \"\"\"Creates loss and accuracy once a child class has created the network.\"\"\"\n # Compute cross-entropy loss and accuracy.\n self._loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=label_placeholder, logits=output_logits))\n accuracy = tf.reduce_mean(\n tf.cast(\n tf.equal(\n tf.argmax(label_placeholder, 1),\n tf.argmax(tf.nn.softmax(output_logits), 1)), tf.float32))\n\n # Create summaries for loss and accuracy.\n self._train_summaries = [\n tf.summary.scalar('train_loss', self._loss),\n tf.summary.scalar('train_accuracy', accuracy)\n ]\n self._test_summaries = [\n tf.summary.scalar('test_loss', self._loss),\n tf.summary.scalar('test_accuracy', accuracy)\n ]\n self._validate_summaries = [\n tf.summary.scalar('validate_loss', self._loss),\n tf.summary.scalar('validate_accuracy', accuracy)\n ]\n"
] |
[
[
"tensorflow.get_variable",
"tensorflow.multiply",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.constant_initializer",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.argmax",
"tensorflow.summary.scalar"
]
] |
jinjieyu/RCLSTM
|
[
"0f70617966cf4f21c4459cf321cea06db5b5c7c2"
] |
[
"RCLSTM/xml2csv.py"
] |
[
"# encoding: utf-8\n\n\"\"\"\n@author: huayuxiu\n\nTransform xml files to csv files.\nThe original traffic data are from GEANT, https://totem.info.ucl.ac.be/dataset.html\nPlease refer to data.pdf for the description of the traffic data\n\"\"\"\n\nimport xml.etree.cElementTree as et\nimport numpy as np\nimport os\n\n# path of xml files\nxml_path = './data/traffic-matrices/'\n\n# path of csv files\ncsv_path = './data/csv/'\n\nxml_count = 0\ncsv_count = 0\n\nfor filename in os.listdir(xml_path):\n xml_count += 1\n tree= et.parse(os.path.join(xml_path, filename))\n root = tree.getroot()\n IntraTM = root.getchildren()[1]\n srcs = IntraTM.getchildren()\n\n # get traffic matrix from xml file \n traffic_matrix = np.zeros([23, 23])\n for src in srcs:\n src_id = int(src.attrib['id'])\n for dst in src:\n if 'id' in dst.attrib:\n dst_id = int(dst.attrib['id'])\n bandwidth = float(dst.text)\n traffic_matrix[src_id-1, dst_id-1] = bandwidth\n\n # store traffic matrix to csv file\n portion = os.path.splitext(filename)\n if portion[1] == '.xml':\n newname = portion[0]+'.csv'\n np.savetxt(os.path.join(csv_path, newname), traffic_matrix, delimiter=',')\n csv_count += 1\n print(newname, 'has been saved')\n del(traffic_matrix)\n\nprint('the number of xml files', xml_count)\nprint('the number of csv files', csv_count)\n\nassert xml_count == csv_count\n"
] |
[
[
"numpy.zeros"
]
] |
topher-lo/datathon-starter-abandoned
|
[
"94d33378c165842fcc149fc5ff83ca1e2f8eddf5"
] |
[
"client/app.py"
] |
[
"\"\"\"Runs the streamlit app.\nCall this file in the terminal (from the `datathon-starter` dir)\nvia `streamlit run app.py`.\n\"\"\"\n\nimport pandas as pd\nimport streamlit as st\nimport missingno as msno\nimport time\n\nfrom pandas_profiling import ProfileReport\nfrom streamlit_pandas_profiling import st_profile_report\nfrom typing import List\nfrom typing import Mapping\n\nfrom prefect.client.client import Client\n\nfrom prefect.tasks.prefect.flow_run import StartFlowRun\nfrom prefect.engine.results.local_result import LocalResult\n\n\nR_DATASETS_URL = 'https://vincentarelbundock.github.io/Rdatasets'\nDATASET_URLS = {\n 'SmokeBan': f'{R_DATASETS_URL}/csv/AER/SmokeBan.csv',\n 'airquality': f'{R_DATASETS_URL}/csv/datasets/airquality.csv',\n 'TeachingRatings': f'{R_DATASETS_URL}/csv/AER/TeachingRatings.csv',\n} # A few example datasets to get you started\nDATASET_DOCS_URLS = {\n 'SmokeBan': f'{R_DATASETS_URL}/doc/AER/SmokeBan.html',\n 'airquality': f'{R_DATASETS_URL}/doc/datasets/airquality.html',\n 'TeachingRatings': f'{R_DATASETS_URL}/doc/AER/TeachingRatings.html',\n}\nDATASET_TITLES = {\n 'SmokeBan': 'Do Workplace Smoking Bans Reduce Smoking?',\n 'airquality': 'New York Air Quality Measurements',\n 'TeachingRatings': 'Impact of Beauty on Instructor\\'s Teaching Ratings',\n}\n\n\ndef create_prefect_flow_run(flow_name: str,\n project_name: str,\n task_refs: List,\n params: Mapping) -> str:\n \"\"\"Creates new prefect flow run for given flow id, parameters, task references\n and API server URL to send GraphQL requests to.\n Returns results value and state from a Prefect flow run.\n \"\"\"\n\n try:\n flow_run = StartFlowRun(flow_name=flow_name,\n project_name=project_name,\n parameters=params)\n flow_run_id = flow_run.run()\n client = Client()\n while True:\n time.sleep(10)\n flow_run_info = client.get_flow_run_info(flow_run_id)\n flow_state = flow_run_info.state\n task_runs_info = flow_run_info.task_runs\n if flow_state.is_finished():\n task_res_locs = {}\n for task_run in task_runs_info:\n # Return ref if ref string is a substring of any task slug\n ref = next((ref_str for ref_str in task_refs\n if ref_str in task_run.task_slug), None)\n if ref:\n task_id = task_run.id\n task_state = client.get_task_run_state(task_id)\n task_res_locs[ref] = task_state._result.location\n task_results = {}\n for ref, loc in task_res_locs.items():\n local_res = LocalResult()\n result = local_res.read(loc)\n task_results[ref] = result.value\n return task_results, flow_state, task_res_locs\n except ValueError as err:\n raise err\n\n\ndef sidebar():\n \"\"\"Write Streamlit commands here to display text and widgets in the sidebar.\n Replace the code within this function with your own\n interactive components and UI.\n \"\"\"\n\n st.sidebar.markdown(\n \"\"\"\n ## Datasets\n Three datasets from [R datasets]({}) are provided:\n - Do Workplace Smoking Bans Reduce Smoking\n - New York Air Quality Measurements\n - Impact of Beauty on Instructor's Teaching Ratings\n \"\"\".format(R_DATASETS_URL)\n )\n datasets = [None] + list(DATASET_URLS.keys())\n dataset_item = st.sidebar.selectbox('Which dataset are you interested in?',\n options=datasets)\n\n # Stop execution until a valid dataset is selected\n if not dataset_item:\n st.stop()\n url = DATASET_URLS[dataset_item]\n doc = DATASET_DOCS_URLS[dataset_item]\n # Read first row of csv file\n raw = pd.read_csv(url)\n data = raw.loc[:, ~raw.columns.str.contains('Unnamed')]\n columns = data.columns.tolist()\n\n st.sidebar.subheader('Model Specification')\n st.success(f'Successfully loaded dataset: {dataset_item}')\n st.info(f'URL found [here]({url}). Documentation found [here]({doc}).')\n\n cat_cols = st.sidebar.multiselect('Are there any categorical variables?',\n options=columns)\n transformed_cols = st.sidebar.multiselect('Select columns to transform',\n options=columns)\n transf = st.sidebar.selectbox('Log or arcsinh transform?',\n options=['log', 'arcsinh'])\n endog = st.sidebar.selectbox('Select an endogenous variable'\n ' (must be numeric)',\n options=[None] + columns)\n exog = [col for col in columns if col != endog]\n na_strategies = {\n 'Complete case': 'cc',\n 'Fill-in': 'fi',\n 'Fill-in with indicators': 'fii',\n 'Grand model': 'gm',\n 'MICE': 'mice',\n }\n na_strategy_name = st.sidebar.selectbox(\n 'How should missing values be dealt with?',\n options=[\n 'Complete case',\n 'Fill-in',\n 'Fill-in with indicators',\n 'Grand model',\n 'MICE'\n ])\n na_values_string = st.sidebar.text_input(\n 'Are there any text values that should be recognised as NA?'\n ' (separate values with a comma)',\n 'Missing, missing, not found'\n )\n na_values = [s.strip() for s in na_values_string.split(',')]\n na_strategy = na_strategies[na_strategy_name]\n return {'url': url,\n 'cat_cols': cat_cols,\n 'transformed_cols': transformed_cols,\n 'transf': transf,\n 'endog': endog,\n 'exog': exog,\n 'na_values': na_values,\n 'na_strategy': na_strategy,\n 'data': data,\n 'item': dataset_item}\n\n\ndef main():\n \"\"\"Write Streamlit commands here to display text and data in the app.\n Replace the code within this function with your own data workflow and UI.\n\n Streamlit API reference:\n https://docs.streamlit.io/en/stable/api.html\n \"\"\"\n\n # Configures the default settings\n st.set_page_config(page_title='datathon-starter',\n page_icon='🛠️',\n layout='wide')\n\n # Page title and header\n st.title('🛠️📊')\n st.title('Starter code for data applications')\n st.subheader('MIT License')\n st.markdown(\n \"\"\"\n ---\n 🙌 Build your own data app\n\n Modify pre-existing code and implement empty functions:\\n\n 1. Data tasks are found in `server/tasks.py`\n 2. Data workflows are found in `server/pipeline.py`\n 3. The Streamlit app's UI code is found in `app.py`\n ---\n 🚀 Try a quick example\n\n From the sidebar *(click on > if closed)*:\\n\n 1. Select a dataset\n 2. Select all categorical variables in the multiselect widget\n 3. Select an endogenous variable in the chosen dataset\n\n From the main UI below:\\n\n 4. Press the \"Run workflow\" button\n ---\n \"\"\"\n )\n\n # Example app\n params = sidebar() # Display sidebar in Streamlit app\n # Drop `data` and return its value\n data = params.pop('data')\n # Drop dataset `item` code and return its value\n item = params.pop('item')\n title = DATASET_TITLES[item]\n st.subheader(f'{title}')\n st.text('A random sample of 5 rows:')\n st.table(data.sample(5)) # Display random sample as a static table\n\n # Column container for buttons\n col1, col2, col3 = st.beta_columns(3)\n # Data profiling\n if col1.button('🔬 Data profiling report'):\n profile_report = ProfileReport(data, explorative=True)\n st_profile_report(profile_report)\n # Missing value analysis\n if col2.button('🔎 Missing value plots'):\n # Check if there are any missing values\n if pd.notna(data).all().all():\n st.warning('No missing values in dataset')\n else:\n fig1 = msno.matrix(data).get_figure()\n st.pyplot(fig1)\n fig2 = msno.heatmap(data).get_figure()\n st.pyplot(fig2)\n fig3 = msno.dendrogram(data).get_figure()\n st.pyplot(fig3)\n # Run data workflow\n if col3.button('✨ Run workflow!'):\n st.write('---')\n # Stop execution until a valid endogenous variable is selected\n if not(params.get('endog')):\n st.warning('Please select an endogenous variable')\n st.stop()\n flow_name = 'e2e_pipeline'\n project_name = 'datathon-starter'\n task_refs = ['wrangle_na']\n params = {'url': params.get('url'),\n 'sep': params.get('sep'),\n 'strategy': params.get('na_strategy')}\n results, state_msg = create_prefect_flow_run(flow_name,\n project_name,\n task_refs,\n params)\n # Check if all tasks were successfully executed\n if 'fail' in state_msg:\n # List of each state's (name, state message) in the workflow\n st.warning(state_msg)\n st.info('Please view the Flow logs on the Prefect Server\\'s'\n ' [UI](localhost:8080).')\n # If all tasks were successfully executed\n else:\n # Unpack results\n preprocessed_data, conf_int_chart = results\n # Success!\n st.balloons()\n st.success(state_msg)\n # Retrieve results from prefect flow run\n st.subheader('Pre-processed Data')\n st.dataframe(preprocessed_data)\n st.subheader('Regression Results')\n st.text('Dot and whisker plot of coefficients'\n ' and their confidence intervals:')\n # Plot regression coefficient's confidence intervals\n st.altair_chart(conf_int_chart, use_container_width=True)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.notna",
"pandas.read_csv"
]
] |
VanGy-code/3D-House-Blender
|
[
"8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71"
] |
[
"Generator.py"
] |
[
"import bpy\nimport os\nimport json\nimport numpy as np\nfrom decimal import Decimal\nfrom mathutils import Vector, Matrix\n\n\ndef clean():\n # setting\n bpy.context.scene.use_nodes = True\n tree = bpy.context.scene.node_tree\n links = tree.links\n # Clear default nodes\n for n in tree.nodes:\n tree.nodes.remove(n)\n\n # Delete default cube\n # clean out the scene\n try:\n for o in bpy.context.scene.objects:\n o.select_set(True)\n bpy.ops.object.delete()\n for o in bpy.context.collection['Collection'].objects:\n o.select_set(True)\n bpy.ops.object.delete()\n except Exception:\n print(\" No Object exist !\")\n \n # bpy.context.collection['Collection'].name = 'env'\n for o in bpy.context.scene.objects:\n o.select_set(False)\n\n\ndef load_gltf_transform(path, scene, collection=None, m_json=None):\n path = path + '.gltf'\n print(path)\n if os.path.exists(path):\n mat = Matrix()\n mat.identity()\n\n # if scene['name'] != '44b51d4d-8740-42ad-b88c-93efe49f9d2b':\n # return\n\n print(f\"{scene['name']} loading !\")\n if m_json is not None:\n # column-major ??\n data = np.reshape(m_json, (4, 4)).transpose()\n mat = Matrix(data).to_4x4()\n\n t, q, s = mat.decompose()\n\n # blender_pos_x = x\n # blender_pos_y = -z\n # blender_pos_z = y\n\n # blender_rot_w = w``\n # blender_rot_x = x\n # blender_rot_y = z\n # blender_rot_z = y\n t[1], t[2] = -t[2], t[1]\n q[2], q[3] = q[3], q[2]\n s[1], s[2] = s[2], s[1]\n\n pos = t.to_tuple()\n rot = q\n scale = s.to_tuple()\n\n for o in bpy.context.scene.objects:\n o.select_set(False)\n\n\n bpy.ops.import_scene.gltf(filepath=path)\n obs = []\n for obj in bpy.context.scene.objects:\n if obj.name not in collection.objects and obj.type not in ['CAMERA', 'LAMP', 'SPOT', 'LIGHT']:\n obs.append(obj)\n\n\n if len(obs) > 1:\n ctx = {}\n # # one of the objects to join\n ctx[\"object\"] = ctx[\"active_object\"] = obs[0]\n ctx[\"selected_objects\"] = ctx[\"selected_editable_objects\"] = obs\n \n bpy.ops.object.join(ctx)\n \n\n model = bpy.data.objects[obs[0].name]\n model.name = scene['name']\n model.select_set(state=True)\n collection.objects.link(model)\n bpy.context.scene.collection.objects.unlink(model)\n\n model.location = pos\n model.rotation_quaternion = rot\n model.scale = scale\n\n\ndef add_model(model_file, model_id):\n scene = bpy.context.scene\n bpy.context.scene.cycles.samples = 20\n scene.render.resolution_x = 256\n scene.render.resolution_y = 256\n scene.render.resolution_percentage = 100\n\n house_config = {}\n\n with open(f\"{model_file}\\\\{model_id}\\\\house.json\", \"r\") as f:\n house_config = json.load(f)\n\n levels = house_config['levels']\n if model_id not in bpy.data.collections.keys():\n myCol = bpy.data.collections.new(model_id)\n bpy.context.scene.collection.children.link(myCol)\n else:\n myCol = bpy.data.collections[model_id]\n\n for level in levels:\n nodes = level['nodes']\n for node in nodes:\n if node['type'] == 'Room':\n filename = node['modelId'] + 'c'\n file = {\"name\": filename}\n load_gltf_transform(\n f\"{model_file}\\\\{model_id}\\\\{filename}\", scene=file, collection=myCol)\n filename = node['modelId'] + 'f'\n file = {\"name\": filename}\n load_gltf_transform(\n f\"{model_file}\\\\{model_id}\\\\{filename}\", scene=file, collection=myCol)\n filename = node['modelId'] + 'w'\n file = {\"name\": filename}\n load_gltf_transform(\n f\"{model_file}\\\\{model_id}\\\\{filename}\", scene=file, collection=myCol)\n elif node['type'] == 'Object':\n filename = node['modelId']\n file = {\"name\": filename}\n load_gltf_transform(\n f\"{model_file}\\\\{model_id}\\\\{filename}\", scene=file, m_json=node['transform'], collection=myCol)\n elif node['type'] == 'Ground':\n filename = node['modelId'] + 'f'\n file = {\"name\": filename}\n load_gltf_transform(\n f\"{model_file}\\\\{model_id}\\\\{filename}\", scene=file, collection=myCol)\n\n\n# clean()\nadd_model(\n model_file='C:\\\\Project\\\\3D-House-Blender\\\\scenes', model_id='0f2bcc07-85c2-41a1-8712-cee71117aff6')\n"
] |
[
[
"numpy.reshape"
]
] |
georgesbarron/qiskit-aqua
|
[
"b38ca9893c5f9509693a2de48680781bed300987"
] |
[
"qiskit/aqua/utils/qp_solver.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" qp solver \"\"\"\n\nfrom typing import Optional, Tuple\nimport logging\n\nimport numpy as np\nimport cvxpy\n\nlogger = logging.getLogger(__name__)\n\n\ndef optimize_svm(kernel_matrix: np.ndarray,\n y: np.ndarray,\n scaling: Optional[float] = None,\n max_iters: int = 500,\n show_progress: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Solving quadratic programming problem for SVM; thus, some constraints are fixed.\n\n Args:\n kernel_matrix: NxN array\n y: Nx1 array\n scaling: the scaling factor to renormalize the `y`, if it is None,\n use L2-norm of `y` for normalization\n max_iters: number of iterations for QP solver\n show_progress: showing the progress of QP solver\n\n Returns:\n np.ndarray: Sx1 array, where S is the number of supports\n np.ndarray: Sx1 array, where S is the number of supports\n np.ndarray: Sx1 array, where S is the number of supports\n \"\"\"\n # pylint: disable=invalid-name, unused-argument\n if y.ndim == 1:\n y = y[:, np.newaxis]\n H = np.outer(y, y) * kernel_matrix\n f = -np.ones(y.shape)\n if scaling is None:\n scaling = np.sum(np.sqrt(f * f))\n f /= scaling\n\n tolerance = 1e-2\n n = kernel_matrix.shape[1]\n\n P = np.array(H)\n q = np.array(f)\n G = -np.eye(n)\n h = np.zeros(n)\n A = y.reshape(y.T.shape)\n b = np.zeros((1, 1))\n x = cvxpy.Variable(n)\n prob = cvxpy.Problem(\n cvxpy.Minimize((1 / 2) * cvxpy.quad_form(x, P) + q.T@x),\n [G@x <= h,\n A@x == b])\n prob.solve(verbose=show_progress)\n result = np.asarray(x.value).reshape((n, 1))\n alpha = result * scaling\n avg_y = np.sum(y)\n avg_mat = (alpha * y).T.dot(kernel_matrix.dot(np.ones(y.shape)))\n b = (avg_y - avg_mat) / n\n\n support = alpha > tolerance\n logger.debug('Solving QP problem is completed.')\n return alpha.flatten(), b.flatten(), support.flatten()\n"
] |
[
[
"numpy.sqrt",
"numpy.asarray",
"numpy.eye",
"numpy.ones",
"numpy.outer",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
smit2k14/text-to-text-transfer-transformer
|
[
"2a3bed10cb36120a3899acd70ee987a4369701bb"
] |
[
"t5/models/mesh_transformer_main.py"
] |
[
"# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Main file for launching training/eval/predictions of mesh-transformer model.\"\"\"\n\nimport importlib\nimport os\nimport sys\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nfrom mesh_tensorflow.transformer import utils\nimport pkg_resources\nimport t5\nfrom t5.models import mtf_model\nimport tensorflow.compat.v1 as tf\n\nflags.DEFINE_string(\n \"tpu_job_name\", None,\n \"Name of TPU worker binary. Only necessary if job name is changed from \"\n \"default tpu_worker.\")\nflags.DEFINE_string(\n \"model_dir\", \"/tmp/transformer_standalone\", \"Estimator model_dir\")\n\n\nflags.DEFINE_string(\n \"tpu\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.\")\n\nflags.DEFINE_string(\n \"gcp_project\",\n None,\n \"Project name for the Cloud TPU-enabled project. If not specified, we \"\n \"will attempt to automatically detect the GCE project from metadata.\")\n\nflags.DEFINE_string(\n \"tpu_zone\", None,\n \"GCE zone where the Cloud TPU is located in. If not specified, we \"\n \"will attempt to automatically detect the GCE project from metadata.\")\n\nflags.DEFINE_multi_string(\n \"module_import\", None,\n \"Modules to import. Use this, for example, to add new `Task`s to the \"\n \"global `TaskRegistry`.\")\n\nflags.DEFINE_string(\n \"t5_tfds_data_dir\", None,\n \"If set, this directory will be used to store datasets prepared by \"\n \"TensorFlow Datasets that are not available in the public TFDS GCS bucket. \"\n \"Note that this flag overrides the `tfds_data_dir` attribute of all \"\n \"`Task`s.\")\n\nflags.DEFINE_list(\n \"additional_task_cache_dirs\", [],\n \"Directories to search for Tasks in addition to defaults.\")\n\nflags.DEFINE_boolean(\"use_model_api\", False,\n \"Use Model API instead of utils.run.\")\n\n# Note: All the args from here on are only used when use_model_api is set\nflags.DEFINE_enum(\"mode\", None,\n [\"train\", \"finetune\", \"eval\", \"predict\", \"export\"],\n \"Mode with which to run the model.\")\n\n# Train mode args\nflags.DEFINE_integer(\"train_steps\", 1000, \"Number of training iterations.\")\n\nflags.DEFINE_string(\"mixture_or_task\", \"wmt_t2t_ende_v003\",\n \"Name of Mixture or Task to use for training/evaluation.\")\nflags.DEFINE_string(\"pretrained_model_dir\", \"\",\n \"Pretrained model dir for finetuning a model.\")\n\n# Eval mode args\nflags.DEFINE_enum(\n \"checkpoint_mode\", \"latest\", [\"all\", \"latest\", \"specific\"],\n \"Checkpoint steps to use when running 'eval', 'predict', 'finetune', and \"\n \"'export' modes. Can specify a list of checkpoints or all or the latest \"\n \"checkpoint. 'finetune' and 'export' modes work with 'latest' or \"\n \"'specific' with a single checkpoint.\")\n\nflags.DEFINE_list(\n \"checkpoint_steps\", [],\n \"Checkpoint step numbers used for 'eval', 'predict', and 'finetune' modes. \"\n \"This argument is only used when which_checkpoint='specific'. \"\n \"For the 'finetune' mode, only a single checkpoint must be specified.\")\n\nflags.DEFINE_string(\"eval_summary_dir\", \"\", \"Path to save eval summaries\")\nflags.DEFINE_string(\"eval_split\", \"validation\",\n \"Dataset split to use for evaluation.\")\n\n# Predict mode args\nflags.DEFINE_string(\"input_file\", \"\", \"Path to input file for decoding.\")\nflags.DEFINE_string(\"output_file\", \"\", \"Path to output file to save decodes.\")\n\n# Export mode args\nflags.DEFINE_string(\n \"export_dir\", \"\",\n \"Directory to export SavedModels to. Will use `model_dir` if unspecified.\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n if FLAGS.module_import:\n for module in FLAGS.module_import:\n importlib.import_module(module)\n\n if FLAGS.t5_tfds_data_dir:\n t5.data.set_tfds_data_dir_override(FLAGS.t5_tfds_data_dir)\n t5.data.add_global_cache_dirs(FLAGS.additional_task_cache_dirs)\n\n # Add search path for gin files stored in package.\n gin.add_config_file_search_path(\n pkg_resources.resource_filename(__name__, \"gin\"))\n try:\n tf.io.gfile.makedirs(FLAGS.model_dir)\n suffix = 0\n command_filename = os.path.join(FLAGS.model_dir, \"command\")\n while tf.io.gfile.exists(command_filename):\n suffix += 1\n command_filename = os.path.join(\n FLAGS.model_dir, \"command.{}\".format(suffix))\n with tf.io.gfile.GFile(command_filename, \"w\") as f:\n f.write(\" \".join(sys.argv))\n except tf.errors.PermissionDeniedError:\n logging.info(\n \"No write access to model directory. Skipping command logging.\")\n\n utils.parse_gin_defaults_and_flags()\n\n if FLAGS.use_model_api:\n model = mtf_model.MtfModel(\n tpu_job_name=FLAGS.tpu_job_name,\n tpu=FLAGS.tpu,\n gcp_project=FLAGS.gcp_project,\n tpu_zone=FLAGS.tpu_zone,\n model_dir=FLAGS.model_dir)\n\n if FLAGS.checkpoint_mode != \"specific\" and FLAGS.checkpoint_steps:\n raise ValueError(\"checkpoint_mode is set to %s and checkpoint_steps is \"\n \"also set. To use a particular checkpoint, please set \"\n \"checkpoint_mode to 'specific'. For other modes, please \"\n \"ensure that checkpoint_steps is not set.\"\n % FLAGS.checkpoint_mode)\n\n if FLAGS.checkpoint_mode == \"latest\":\n checkpoint_steps = -1\n elif FLAGS.checkpoint_mode == \"all\":\n checkpoint_steps = \"all\"\n else:\n checkpoint_steps = [int(c) for c in FLAGS.checkpoint_steps]\n\n if FLAGS.mode == \"train\":\n model.train(mixture_or_task_name=FLAGS.mixture_or_task,\n steps=FLAGS.train_steps)\n elif FLAGS.mode == \"eval\":\n model.eval(mixture_or_task_name=FLAGS.mixture_or_task,\n checkpoint_steps=checkpoint_steps,\n summary_dir=FLAGS.eval_summary_dir,\n split=FLAGS.eval_split)\n elif FLAGS.mode == \"finetune\":\n if not (FLAGS.checkpoint_mode == \"latest\" or\n (FLAGS.checkpoint_mode == \"specific\" and\n len(FLAGS.checkpoint_steps) == 1)):\n raise ValueError(\n \"Must specify a single checkpoint for finetuning a model.\")\n\n if isinstance(checkpoint_steps, list):\n checkpoint_steps = checkpoint_steps[0]\n\n model.finetune(\n mixture_or_task_name=FLAGS.mixture_or_task,\n steps=FLAGS.train_steps,\n pretrained_model_dir=FLAGS.pretrained_model_dir,\n checkpoint_steps=checkpoint_steps)\n elif FLAGS.mode == \"predict\":\n model.predict(\n checkpoint_steps=checkpoint_steps,\n input_file=FLAGS.input_file,\n output_file=FLAGS.output_file)\n elif FLAGS.mode == \"export\":\n if not (FLAGS.checkpoint_mode == \"latest\" or\n (FLAGS.checkpoint_mode == \"specific\" and\n len(FLAGS.checkpoint_steps) == 1)):\n raise ValueError(\n \"Must specify a single checkpoint for exporting a model.\")\n\n if isinstance(checkpoint_steps, list):\n checkpoint_steps = checkpoint_steps[0]\n\n model.export(\n export_dir=FLAGS.export_dir,\n checkpoint_step=checkpoint_steps)\n else:\n raise ValueError(\"--mode flag must be set when using Model API.\")\n else:\n if FLAGS.mode:\n raise ValueError(\"--mode flag should only be set when using Model API.\")\n if not FLAGS.tpu:\n with gin.unlock_config():\n gin.bind_parameter(\"utils.get_variable_dtype.slice_dtype\", \"float32\")\n gin.bind_parameter(\n \"utils.get_variable_dtype.activation_dtype\", \"float32\")\n utils.run(\n tpu_job_name=FLAGS.tpu_job_name,\n tpu=FLAGS.tpu,\n gcp_project=FLAGS.gcp_project,\n tpu_zone=FLAGS.tpu_zone,\n model_dir=FLAGS.model_dir)\n\n\ndef console_entry_point():\n tf.disable_v2_behavior()\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run(main)\n\nif __name__ == \"__main__\":\n console_entry_point()\n"
] |
[
[
"tensorflow.compat.v1.io.gfile.exists",
"tensorflow.compat.v1.io.gfile.makedirs",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.io.gfile.GFile"
]
] |
maxwelljohn/topic-sort
|
[
"212bbd8be6c4bcb32a414f236088eb523913b151"
] |
[
"topic_sort.py"
] |
[
"#!/usr/bin/env python -O\n\nimport argparse\nimport nltk\nimport numpy as np\nimport pytest\nimport re\n\nimport optimizers\nimport order_problem\n\nnltk.download('stopwords', quiet=True)\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\n\nPASSAGE_SEPARATOR = \"\\n\\n\"\nSTOPWORDS = set(nltk.corpus.stopwords.words('english'))\nSTOPWORDS.add('http')\nSTOPWORDS.add('https')\nWORD_RE = re.compile(r'^\\w+$')\nMAX_NGRAM_N = 3\n\n\nclass TopicSortProblem(order_problem.OrderingProblem):\n def __init__(self, passage_file):\n self.passages = passage_file.read().strip().split(PASSAGE_SEPARATOR)\n super().__init__(len(self.passages))\n self.additions_needed = len(self.passages) - 1\n wnl = nltk.WordNetLemmatizer()\n passage_ngrams = {}\n ngram_document_frequency = nltk.FreqDist()\n\n for passage in self.passages:\n lemmas = [wnl.lemmatize(t) for t in\n nltk.word_tokenize(passage.lower())]\n lemmas = [l for l in lemmas if l not in STOPWORDS and\n re.match(WORD_RE, l)]\n\n ngrams = []\n for n in range(1, MAX_NGRAM_N+1):\n ngrams.extend(nltk.ngrams(lemmas, n))\n passage_ngrams[passage] = nltk.FreqDist(ngrams)\n\n unique_ngrams = set(ngrams)\n for ngram in unique_ngrams:\n ngram_document_frequency[ngram] += 1\n\n for index1, passage1 in enumerate(self.passages):\n for offset, passage2 in enumerate(self.passages[index1+1:]):\n index2 = index1+1 + offset\n ngrams1 = passage_ngrams[passage1]\n ngrams2 = passage_ngrams[passage2]\n similarity_score = 0\n for g in ngrams1:\n if ngrams2[g] > 0:\n # TF-IDF weighting\n similarity_score += (1 + np.log(ngrams1[g])) * \\\n (1 + np.log(ngrams2[g])) * \\\n (np.log(len(self.passages) /\n ngram_document_frequency[g]))\n # \"Costs\" are negative; we want similar passages to be close.\n # Scaling by 1000 lets us use a matrix of ints.\n self.costs[index1, index2] = -1000 * similarity_score / \\\n min(len(passage1), len(passage2))\n\n\nclass TopicSortSolution(order_problem.OrderingSolution):\n def __init__(self, problem):\n super().__init__(problem)\n\n def ensure_completion(self):\n super().ensure_completion()\n assert np.sum(self.edges_added, axis=(0, 1)) == (self.dimension - 1)\n assert np.sum(self.node_degrees == 2, axis=0) == (self.dimension - 2)\n assert np.sum(self.node_degrees == 1, axis=0) == 2\n\n def add_edge(self, node_a, node_b):\n super().add_edge(node_a, node_b)\n\n if not self.feasible_edges.any():\n self.finish()\n else:\n self.ensure_validity()\n\n def __str__(self):\n components = self.components()\n assert len(components) == 1\n traversal_order = components[0]\n return PASSAGE_SEPARATOR.join(\n [self.problem.passages[i] for i in traversal_order]\n ) + '\\n'\n\n\nTopicSortProblem.solution_type = TopicSortSolution\n\n\[email protected]\ndef sample_problem():\n with open('sample_text.txt', 'r') as infile:\n result = TopicSortProblem(infile)\n return result\n\n\ndef test_greedy(sample_problem):\n '''\n Verify the greedy solver produces the correct result on the sample text.\n '''\n soln = optimizers.greedy(sample_problem)\n soln.ensure_completion()\n assert str(soln) == \"\"\"apples bananas\n\nbananas oranges\n\noranges pears plums\n\npears plums\n\"\"\"\n\n\ndef test_genetic(sample_problem):\n '''\n Verify the genetic solver produces the correct result on the sample text.\n '''\n soln = optimizers.genetic(sample_problem, 20, 20, 1000)\n soln.ensure_completion()\n assert str(soln) == \"\"\"apples bananas\n\nbananas oranges\n\noranges pears plums\n\npears plums\n\"\"\"\n\n\ndef main(passage_file, slow=False):\n problem = TopicSortProblem(passage_file)\n if slow:\n soln = optimizers.genetic(problem, 20, 20, 1000)\n else:\n soln = optimizers.greedy(problem)\n print(soln)\n\n\nif __name__ == '__main__':\n import sys\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"filepath\",\n help=\"path to the file of text passages to sort by topic; - for stdin\"\n )\n parser.add_argument(\n '-s', \"--slow\",\n action='store_true',\n help=\"sort the file slowly & carefully (quick & dirty is the default)\",\n )\n args = parser.parse_args()\n if args.filepath == '-':\n passage_file = sys.stdin\n main(passage_file, args.slow)\n else:\n passage_filepath = args.filepath\n with open(passage_filepath, 'r') as passage_file:\n main(passage_file, args.slow)\n"
] |
[
[
"numpy.log",
"numpy.sum"
]
] |
hfboyce/MCL-DSCI-571-machine-learning
|
[
"25757369491ac547daa94ff1143ca7389d433a6e"
] |
[
"exercises/en/solution_06_17.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split, cross_validate\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer, make_column_transformer\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.neighbors import KNeighborsRegressor\n\n# Loading in the data\nbball_df = pd.read_csv('data/bball_imp.csv').dropna(subset=['salary'])\n\n# Split the dataset\ndf_train, df_test = train_test_split(bball_df, test_size=0.2, random_state=7)\n\nX_train = df_train[[\"weight\", \"height\", \"draft_year\", \"draft_round\",\n \"draft_peak\", \"team\", \"position\", \"country\"]]\nX_test = df_test[[\"weight\", \"height\", \"draft_year\", \"draft_round\",\n \"draft_peak\", \"team\", \"position\", \"country\"]]\ny_train = df_train['salary']\ny_test = df_test['salary']\n\n\n# Split the numeric and categorical features \nnumeric_features = [ \"weight\",\n \"height\",\n \"draft_year\",\n \"draft_round\",\n \"draft_peak\"]\n\ncategorical_features = [\"team\", \"position\", \"country\"]\n\n# Build a numeric pipeline\nnumeric_transformer = make_pipeline(\n SimpleImputer(strategy=\"median\"),\n StandardScaler())\n\n# Build a categorical pipeline\ncategorical_transformer = make_pipeline(\n SimpleImputer(strategy=\"most_frequent\"),\n OneHotEncoder(handle_unknown=\"ignore\"))\n\n# Build a column transformer\ncol_transformer = make_column_transformer(\n (numeric_transformer, numeric_features),\n (categorical_transformer, categorical_features))\n\n# Build a main pipeline\nmain_pipe = make_pipeline(\n col_transformer,\n KNeighborsRegressor())\n\n# Cross validate\nwith_categorical_scores = cross_validate(main_pipe, X_train, y_train, return_train_score=True)\npd.DataFrame(with_categorical_scores)"
] |
[
[
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.impute.SimpleImputer",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.compose.make_column_transformer",
"sklearn.model_selection.cross_validate",
"sklearn.preprocessing.StandardScaler"
]
] |
Amartya32/Leap_and_Motion_Capture
|
[
"710aaf96068f50c84d6e9461460e2b304dbf5e24"
] |
[
"code/leap_data_extraction_code/leapDataExtraction.py"
] |
[
"\n################################################################################ \n# https://developer.leapmotion.com/sdk_agreement, or another agreement #\n# between Leap Motion and you, your company or other organization. #\n################################################################################\n\nimport os, sys, thread, time, inspect, socket, threading\nsys.path.insert(1, '..\\windows64')\n#sys.path.append('..\\leapWinPack')\nimport Leap\nfrom argparse import ArgumentParser\nimport numpy as np\nfrom sortFingersByName import *\n\n\ndef sanitiseArray(string):\n cleanString = str()\n string = str(string)\n for e in string:\n if e != '(' and e != ')':\n cleanString += e\n return cleanString\n\n\nclass SampleListener(Leap.Listener):\n finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']\n\n def on_connect(self, controller):\n print (\"Connected\")\n\n\n def on_frame(self, controller):\n frame = controller.frame()\n\n print (\"status_qtm: %s, FramePerSec: %d, Frame id: %d, timestamp: %d, hands: %d, fingers: %d\" % (\n status_qtm, frame.current_frames_per_second, frame.id, frame.timestamp, len(frame.hands), len(frame.fingers)))\n\n for hand in frame.hands:\n if hand.is_left:\n print(\"left\")\n for finger in hand.fingers:\n fl.write(\"%s,%d,%d,\" % (status_qtm, frame.current_frames_per_second, frame.id))\n fl.write(\"%d,%d,%d,\" % (frame.timestamp, len(frame.hands), len(frame.fingers)))\n fl.write(\"%d,%f,\" % (finger.id, finger.length))\n fl.write(\"%f,%s,\" % (finger.width, sanitiseArray(hand.arm.direction)))\n fl.write(\"%s,%s,\" % (sanitiseArray(hand.arm.wrist_position), sanitiseArray(hand.arm.elbow_position)))\n fl.write(\"%s,\" % (self.finger_names[finger.type]))\n for b in range(0, 4):\n bone = finger.bone(b)\n fl.write(\"%s,%s,\" % (sanitiseArray(bone.prev_joint), sanitiseArray(bone.next_joint)))\n fl.write(\"%s,\" % (sanitiseArray(bone.direction)))\n fl.write(\"\\n\")\n else:\n print(\"right\")\n for finger in hand.fingers:\n fr.write(\"%s,%d,%d,\" % (status_qtm, frame.current_frames_per_second, frame.id))\n fr.write(\"%d,%d,%d,\" % (frame.timestamp, len(frame.hands), len(frame.fingers)))\n fr.write(\"%d,%f,\" % (finger.id, finger.length))\n fr.write(\"%f,%s,\" % (finger.width, sanitiseArray(hand.arm.direction)))\n fr.write(\"%s,%s,\" % (sanitiseArray(hand.arm.wrist_position), sanitiseArray(hand.arm.elbow_position)))\n fr.write(\"%s,\" % (self.finger_names[finger.type]))\n for b in range(0, 4):\n bone = finger.bone(b)\n fr.write(\"%s,%s,\" % (sanitiseArray(bone.prev_joint), sanitiseArray(bone.next_joint)))\n fr.write(\"%s,\" % (sanitiseArray(bone.direction)))\n fr.write(\"\\n\")\n\n print(\"\\n\")\n\n#background server listening to events from qtm\nclass Server(object):\n def __init__(self, interface, port):\n self.interface = interface\n self.port = port\n thread = threading.Thread(target=self.run, args=())\n thread.daemon = True\n thread.start()\n\n def run(self):\n global status_qtm\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((self.interface, self.port))\n\n while True:\n data, addr = sock.recvfrom(1024)\n print(data)\n status_qtm = data\n\ndef main():\n #Create a sample listener and controller\n listener = SampleListener()\n controller = Leap.Controller()\n\n #Have the sample listener receive events from the controller\n controller.add_listener(listener)\n\n #keep process running in the background\n if not controller.is_policy_set(Leap.Controller.POLICY_BACKGROUND_FRAMES):\n controller.set_policy(Leap.Controller.POLICY_BACKGROUND_FRAMES)\n\n #Keep this process running until Enter is pressed\n print (\"Press Enter to quit...\")\n try:\n sys.stdin.readline()\n except KeyboardInterrupt:\n pass\n finally:\n #Remove the sample listener when done\n controller.remove_listener(listener)\n\n\nif __name__ == \"__main__\":\n #globale variabel status_qtm: 0 = capture stopped 1 = capture started\n status_qtm = 0\n\n #get ip adress, default loopback interface\n parser = ArgumentParser(description=\"leap motion data reader depending on qtm capturing\")\n parser.add_argument(\"--host_qtm\", default=\"127.0.0.1\", help=\"IP address of the network interface of the machine which is running qtm\")\n args = parser.parse_args()\n #---\n\n #open file for left and right hand\n if not os.path.isdir(\"rawData\"):\n os.mkdir(\"rawData\")\n\n countLeft = 1\n while os.path.exists(\"rawData/lefthand%s.txt\" % countLeft):\n countLeft += 1\n\n fl = open(\"rawData/lefthand%s.txt\" % countLeft, \"w\")\n fl.write(\"begin/end,FramePerSec,Frameid,timestamp,hands,fingers,\")\n fl.write(\"ID,length,width,\")\n fl.write(\"ArmDirX,ArmDirY,ArmDirZ,\")\n fl.write(\"WX,WY,WZ,\")\n fl.write(\"ElbPosX,ElbPosY,ElbPosZ,\")\n fl.write(\"name,\")\n fl.write(\"Metacarpal_BBX,Metacarpal_BBY,Metacarpal_BBZ,\")\n fl.write(\"Metacarpal_BHX,Metacarpal_BHY,Metacarpal_BHZ,\")\n fl.write(\"Metacarpal_BDirX,Metacarpal_BDirY,Metacarpal_BDirZ,\")\n fl.write(\"Proximal_BBX,Proximal_BBY,Proximal_BBZ,\")\n fl.write(\"Proximal_BHX,Proximal_BHY,Proximal_BHZ,\")\n fl.write(\"Proximal_BDirX,Proximal_BDirY,Proximal_BDirZ,\")\n fl.write(\"Intermediate_BBX,Intermediate_BBY,Intermediate_BBZ,\")\n fl.write(\"Intermediate_BHX,Intermediate_BHY,Intermediate_BHZ,\")\n fl.write(\"Intermediate_BDirX,Intermediate_BDirY,Intermediate_BDirZ,\")\n fl.write(\"Distal_BBX,Distal_BBY,Distal_BBZ,\")\n fl.write(\"Distal_BHX,Distal_BHY,Distal_BHZ,\")\n fl.write(\"Distal_BDirX,Distal_BDirY,Distal_BDirZ\\n\")\n\n countRight = 1\n while os.path.exists(\"rawData/righthand%s.txt\" % countRight):\n countRight += 1\n\n fr = open(\"rawData/righthand%s.txt\" % countRight, \"w\")\n fr.write(\"begin/end,FramePerSec,Frameid,timestamp,hands,fingers,\")\n fr.write(\"ID,length,width,\")\n fr.write(\"ArmDirX,ArmDirY,ArmDirZ,\")\n fr.write(\"WX,WY,WZ,\")\n fr.write(\"ElbPosX,ElbPosY,ElbPosZ,\")\n fr.write(\"name,\")\n fr.write(\"Metacarpal_BBX,Metacarpal_BBY,Metacarpal_BBZ,\")\n fr.write(\"Metacarpal_BHX,Metacarpal_BHY,Metacarpal_BHZ,\")\n fr.write(\"Metacarpal_BDirX,Metacarpal_BDirY,Metacarpal_BDirZ,\")\n fr.write(\"Proximal_BBX,Proximal_BBY,Proximal_BBZ,\")\n fr.write(\"Proximal_BHX,Proximal_BHY,Proximal_BHZ,\")\n fr.write(\"Proximal_BDirX,Proximal_BDirY,Proximal_BDirZ,\")\n fr.write(\"Intermediate_BBX,Intermediate_BBY,Intermediate_BBZ,\")\n fr.write(\"Intermediate_BHX,Intermediate_BHY,Intermediate_BHZ,\")\n fr.write(\"Intermediate_BDirX,Intermediate_BDirY,Intermediate_BDirZ,\")\n fr.write(\"Distal_BBX,Distal_BBY,Distal_BBZ,\")\n fr.write(\"Distal_BHX,Distal_BHY,Distal_BHZ,\")\n fr.write(\"Distal_BDirX,Distal_BDirY,Distal_BDirZ\\n\")\n\n\n #get instance of background server\n server = Server(args.host_qtm, 8888)\n\n main()\n fl.close()\n fr.close()\n\n #get recorded intervall\n if not os.path.isdir(\"processed\"):\n os.mkdir(\"processed\")\n if not os.path.isdir(\"postProcessed\"):\n os.mkdir(\"postProcessed\")\n\n flp = open(\"processed/lefthandProcessed%s.txt\" % countLeft, \"w\")\n with open(\"rawData/lefthand%s.txt\" % countLeft, \"r\") as fl:\n try:\n flp.write(fl.readline())\n except StopIteration:\n print(\"No data for left hand!\")\n for line in fl:\n recording = abs(np.fromstring(line, dtype=int, sep=','))[0]\n if recording:\n flp.write(line)\n flp.close()\n\n frp = open(\"processed/righthandProcessed%s.txt\" % countRight, \"w\")\n with open(\"rawData/righthand%s.txt\" % countRight, \"r\") as fr:\n try:\n frp.write(fr.readline())\n except StopIteration:\n print(\"No data for right hand!\")\n for line in fr:\n recording = abs(np.fromstring(line, dtype=int, sep=','))[0]\n if recording:\n frp.write(line)\n frp.close()\n\n #sorting\n sortFingersByName(countLeft, countRight)\n"
] |
[
[
"numpy.fromstring"
]
] |
CrazyAlan/nextAI
|
[
"e871b4078e9d591121f9093f2ba022e1c9115f7b"
] |
[
"src/models/network.py"
] |
[
"\"\"\"Functions for building the face recognition network.\n\"\"\"\n# MIT License\n# \n# Copyright (c) 2016 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\n\n\ndef conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0):\n with tf.variable_scope(name):\n l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)\n kernel = tf.get_variable(\"weights\", [kH, kW, nIn, nOut],\n initializer=tf.truncated_normal_initializer(stddev=1e-1),\n regularizer=l2_regularizer, dtype=inpOp.dtype)\n cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType)\n \n if use_batch_norm:\n conv_bn = batch_norm(cnv, phase_train)\n else:\n conv_bn = cnv\n biases = tf.get_variable(\"biases\", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)\n bias = tf.nn.bias_add(conv_bn, biases)\n conv1 = tf.nn.relu(bias)\n return conv1\n\ndef convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0):\n with tf.variable_scope(name):\n l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)\n kernel = tf.get_variable(\"weights\", [kH, kW, nIn, nOut],\n initializer=tf.truncated_normal_initializer(stddev=1e-1),\n regularizer=l2_regularizer, dtype=inpOp.dtype)\n cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType)\n \n if use_batch_norm:\n conv_bn = batch_norm(cnv, phase_train)\n else:\n conv_bn = cnv\n # biases = tf.get_variable(\"biases\", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)\n # bias = tf.nn.bias_add(conv_bn, biases)\n # conv1 = tf.nn.relu(bias)\n return conv_bn\n\ndef convMfm(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0):\n net_1 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_1', phase_train, use_batch_norm, weight_decay)\n net_2 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_2', phase_train, use_batch_norm, weight_decay)\n out = tf.maximum(net_1, net_2)\n\n return out\n\ndef affine(inpOp, nIn, nOut, name, weight_decay=0.0):\n with tf.variable_scope(name):\n l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)\n weights = tf.get_variable(\"weights\", [nIn, nOut],\n initializer=tf.truncated_normal_initializer(stddev=1e-1),\n regularizer=l2_regularizer, dtype=inpOp.dtype)\n biases = tf.get_variable(\"biases\", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)\n affine1 = tf.nn.relu_layer(inpOp, weights, biases)\n return affine1\n\ndef l2_loss(tensor, weight=1.0, scope=None):\n \"\"\"Define a L2Loss, useful for regularize, i.e. weight decay.\n Args:\n tensor: tensor to regularize.\n weight: an optional weight to modulate the loss.\n scope: Optional scope for op_scope.\n Returns:\n the L2 loss op.\n \"\"\"\n with tf.name_scope(scope):\n weight = tf.convert_to_tensor(weight,\n dtype=tensor.dtype.base_dtype,\n name='loss_weight')\n loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')\n return loss\n\ndef lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name):\n with tf.variable_scope(name):\n if pnorm == 2:\n pwr = tf.square(inpOp)\n else:\n pwr = tf.pow(inpOp, pnorm)\n \n subsamp = tf.nn.avg_pool(pwr,\n ksize=[1, kH, kW, 1],\n strides=[1, dH, dW, 1],\n padding=padding)\n subsamp_sum = tf.multiply(subsamp, kH*kW)\n \n if pnorm == 2:\n out = tf.sqrt(subsamp_sum)\n else:\n out = tf.pow(subsamp_sum, 1/pnorm)\n \n return out\n\ndef mpool(inpOp, kH, kW, dH, dW, padding, name):\n with tf.variable_scope(name):\n maxpool = tf.nn.max_pool(inpOp,\n ksize=[1, kH, kW, 1],\n strides=[1, dH, dW, 1],\n padding=padding) \n return maxpool\n\ndef apool(inpOp, kH, kW, dH, dW, padding, name):\n with tf.variable_scope(name):\n avgpool = tf.nn.avg_pool(inpOp,\n ksize=[1, kH, kW, 1],\n strides=[1, dH, dW, 1],\n padding=padding)\n return avgpool\n\n# def mfmpool(input1, input2, name):\n# with tf.variable_scope(name):\n# res = tf.maximum(input1, input2)\n# return res\n\ndef batch_norm(x, phase_train):\n \"\"\"\n Batch normalization on convolutional maps.\n Args:\n x: Tensor, 4D BHWD input maps\n n_out: integer, depth of input maps\n phase_train: boolean tf.Variable, true indicates training phase\n scope: string, variable scope\n affn: whether to affn-transform outputs\n Return:\n normed: batch-normalized maps\n Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow/33950177\n \"\"\"\n name = 'batch_norm'\n with tf.variable_scope(name):\n phase_train = tf.convert_to_tensor(phase_train, dtype=tf.bool)\n n_out = int(x.get_shape()[3])\n beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=x.dtype),\n name=name+'/beta', trainable=True, dtype=x.dtype)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out], dtype=x.dtype),\n name=name+'/gamma', trainable=True, dtype=x.dtype)\n \n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.9)\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n mean, var = control_flow_ops.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed\n\ndef inception(inp, inSize, ks, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2, o4s3, poolType, name, \n phase_train=True, use_batch_norm=True, weight_decay=0.0):\n \n print('name = ', name)\n print('inputSize = ', inSize)\n print('kernelSize = {3,5}')\n print('kernelStride = {%d,%d}' % (ks,ks))\n print('outputSize = {%d,%d}' % (o2s2,o3s2))\n print('reduceSize = {%d,%d,%d,%d}' % (o2s1,o3s1,o4s2,o1s))\n print('pooling = {%s, %d, %d, %d, %d}' % (poolType, o4s1, o4s1, o4s3, o4s3))\n if (o4s2>0):\n o4 = o4s2\n else:\n o4 = inSize\n print('outputSize = ', o1s+o2s2+o3s2+o4)\n print()\n \n net = []\n \n with tf.variable_scope(name):\n with tf.variable_scope('branch1_1x1'):\n if o1s>0:\n conv1 = conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)\n net.append(conv1)\n \n with tf.variable_scope('branch2_3x3'):\n if o2s1>0:\n conv3a = conv(inp, inSize, o2s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)\n conv3 = conv(conv3a, o2s1, o2s2, 3, 3, ks, ks, 'SAME', 'conv3x3', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)\n net.append(conv3)\n \n with tf.variable_scope('branch3_5x5'):\n if o3s1>0:\n conv5a = conv(inp, inSize, o3s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)\n conv5 = conv(conv5a, o3s1, o3s2, 5, 5, ks, ks, 'SAME', 'conv5x5', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)\n net.append(conv5)\n \n with tf.variable_scope('branch4_pool'):\n if poolType=='MAX':\n pool = mpool(inp, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool')\n elif poolType=='L2':\n pool = lppool(inp, 2, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool')\n else:\n raise ValueError('Invalid pooling type \"%s\"' % poolType)\n \n if o4s2>0:\n pool_conv = conv(pool, inSize, o4s2, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)\n else:\n pool_conv = pool\n net.append(pool_conv)\n \n incept = array_ops.concat(net, 3, name=name)\n return incept\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.nn.l2_loss",
"tensorflow.nn.conv2d",
"tensorflow.nn.moments",
"tensorflow.truncated_normal_initializer",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.nn.batch_normalization",
"tensorflow.pow",
"tensorflow.identity",
"tensorflow.nn.avg_pool",
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.multiply",
"tensorflow.nn.relu_layer",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.constant",
"tensorflow.maximum",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.sqrt"
]
] |
marses/tiltx
|
[
"99e25750f3db1c2fb54463a90753e032c572a8b2"
] |
[
"tiltx/data_generator.py"
] |
[
"import numpy\n\n\nclass DataGenerator(object):\n \"\"\"Generate data to work with the package.\"\"\"\n\n def __init__(self,):\n pass\n\n @staticmethod\n def example(i):\n \"\"\"Recreate the values from Example i.\n The data is stored in data/data_i.txt for i in {1,...,6}.\n :returns: tuple(t, angle, angle)\n :rtypes: (array, array, array)\n \"\"\"\n with numpy.errstate(divide='ignore'):\n d = numpy.loadtxt(\"data/data_\"+str(i)+\".txt\")\n return d[:,0], d[:,1], d[:,2]\n \n "
] |
[
[
"numpy.errstate"
]
] |
brosaplanella/liionpack
|
[
"4c3f61b6f28e1419974c8572669d70fc173a6959"
] |
[
"liionpack/solver_utils.py"
] |
[
"#\n# Solver utilities\n#\n\nimport casadi\nimport pybamm\nimport numpy as np\nimport time as ticker\nimport liionpack as lp\nfrom tqdm import tqdm\n\n\ndef _mapped_step(model, solutions, inputs_dict, integrator, variables, t_eval):\n \"\"\"\n Internal function to process the model for one timestep in a mapped way.\n Mapped versions of the integrator and variables functions should already\n have been made.\n\n Parameters\n ----------\n model : :class:`pybamm.lithium_ion.BaseModel`\n The built battery model\n solutions : list of :class:`pybamm.Solution` objects for each battery\n Used to get the last state of the system and use as x0 and z0 for the\n casadi integrator\n inputs_dict : list of inputs_dict objects for each battery\n integrator : mapped casadi.integrator\n Produced by `_create_casadi_objects`\n variables : mapped variables evaluator\n Produced by `_create_casadi_objects`\n t_eval : float array of times to evaluate\n Produced by `_create_casadi_objects`\n\n Returns\n -------\n sol : list\n Solutions that have been stepped forward by one timestep\n var_eval : list\n Evaluated variables for final state of system\n\n \"\"\"\n len_rhs = model.concatenated_rhs.size\n N = len(solutions)\n if solutions[0] is None:\n # First pass\n x0 = casadi.horzcat(*[model.y0[:len_rhs] for i in range(N)])\n z0 = casadi.horzcat(*[model.y0[len_rhs:] for i in range(N)])\n else:\n x0 = casadi.horzcat(*[sol.y[:len_rhs, -1] for sol in solutions])\n z0 = casadi.horzcat(*[sol.y[len_rhs:, -1] for sol in solutions])\n # t_min = [0.0]*N\n t_min = 0.0\n inputs = []\n for temp in inputs_dict:\n inputs.append(casadi.vertcat(*[x for x in temp.values()] + [t_min]))\n ninputs = len(temp.values())\n inputs = casadi.horzcat(*inputs)\n # p = casadi.horzcat(*zip(inputs, external_variables, [t_min]*N))\n # inputs_with_tmin = casadi.vertcat(inputs, np.asarray(t_min))\n # Call the integrator once, with the grid\n timer = pybamm.Timer()\n tic = timer.time()\n casadi_sol = integrator(x0=x0, z0=z0, p=inputs)\n integration_time = timer.time()\n nt = len(t_eval)\n xf = casadi_sol[\"xf\"]\n # zf = casadi_sol[\"zf\"]\n sol = []\n xend = []\n for i in range(N):\n start = i * nt\n y_sol = xf[:, start:start + nt]\n xend.append(y_sol[:, -1])\n # Not sure how to index into zf - need an example\n sol.append(pybamm.Solution(t_eval, y_sol, model, inputs_dict[i]))\n sol[-1].integration_time = integration_time\n toc = timer.time()\n lp.logger.debug(f\"Mapped step completed in {toc - tic}\")\n xend = casadi.horzcat(*xend)\n var_eval = variables(0, xend, 0, inputs[0:ninputs, :])\n return sol, var_eval\n\n\ndef _create_casadi_objects(I_init, htc, sim, dt, Nspm, nproc, variable_names):\n \"\"\"\n Internal function to produce the casadi objects in their mapped form for\n parallel evaluation\n\n Parameters\n ----------\n I_init : float\n initial guess for current of a battery (not used for simulation).\n htc : float\n initial guess for htc of a battery (not used for simulation).\n sim : :class:`pybamm.Simulation`\n A PyBaMM simulation object that contains the model, parameter values,\n solver, solution etc.\n dt : float\n The time interval (in seconds) for a single timestep. Fixed throughout\n the simulation\n Nspm : int\n Number of individual batteries in the pack.\n nproc : int\n Number of parallel processes to map to.\n variable_names : list\n Variables to evaluate during solve. Must be a valid key in the\n model.variables\n\n Returns\n -------\n integrator : mapped casadi.integrator\n Solves an initial value problem (IVP) coupled to a terminal value\n problem with differential equation given as an implicit ODE coupled\n to an algebraic equation and a set of quadratures\n variables_fn : mapped variables evaluator\n evaluates the simulation and output variables. see casadi function\n t_eval : float array of times to evaluate\n times to evaluate in a single step, starting at zero for each step\n\n \"\"\"\n inputs = {\n \"Current function [A]\": I_init,\n \"Total heat transfer coefficient [W.m-2.K-1]\": htc,\n }\n solver = sim.solver\n\n # Initial solution - this builds the model behind the scenes\n # solve model for 1 second to initialise the circuit\n t_eval = np.linspace(0, 1, 2)\n sim.solve(t_eval, inputs=inputs)\n\n # Step model forward dt seconds\n t_eval = np.linspace(0, dt, 11)\n t_eval_ndim = t_eval / sim.model.timescale.evaluate()\n\n # No external variables - Temperature solved as lumped model in pybamm\n # External variables could (and should) be used if battery thermal problem\n # Includes conduction with any other circuits or neighboring batteries\n # inp_and_ext.update(external_variables)\n inp_and_ext = inputs\n\n # Code to create mapped integrator\n integrator = solver.create_integrator(\n sim.built_model, inputs=inp_and_ext, t_eval=t_eval_ndim\n )\n integrator = integrator.map(Nspm, \"thread\", nproc)\n\n # Variables function for parallel evaluation\n casadi_objs = sim.built_model.export_casadi_objects(variable_names=variable_names)\n variables = casadi_objs[\"variables\"]\n t, x, z, p = (\n casadi_objs[\"t\"],\n casadi_objs[\"x\"],\n casadi_objs[\"z\"],\n casadi_objs[\"inputs\"],\n )\n variables_stacked = casadi.vertcat(*variables.values())\n variables_fn = casadi.Function(\"variables\", [t, x, z, p], [variables_stacked])\n variables_fn = variables_fn.map(Nspm, \"thread\", nproc)\n return integrator, variables_fn, t_eval\n\n\ndef solve(\n netlist=None,\n parameter_values=None,\n experiment=None,\n I_init=1.0,\n htc=None,\n initial_soc=0.5,\n nproc=12,\n output_variables=None,\n):\n \"\"\"\n Solves a pack simulation\n\n Parameters\n ----------\n netlist : pandas.DataFrame\n A netlist of circuit elements with format. desc, node1, node2, value.\n Produced by liionpack.read_netlist or liionpack.setup_circuit\n parameter_values : pybamm.ParameterValues class\n A dictionary of all the model parameters\n experiment : pybamm.Experiment class\n The experiment to be simulated. experiment.period is used to\n determine the length of each timestep.\n I_init : float, optional\n Initial guess for single battery current [A]. The default is 1.0.\n htc : float array, optional\n Heat transfer coefficient array of length Nspm. The default is None.\n initial_soc : float\n The initial state of charge for every battery. The default is 0.5\n nproc : int, optional\n Number of processes to start in parallel for mapping. The default is 12.\n output_variables : list, optional\n Variables to evaluate during solve. Must be a valid key in the\n model.variables\n\n Raises\n ------\n Exception\n DESCRIPTION.\n\n Returns\n -------\n output : ndarray shape [# variable, # steps, # batteries]\n simulation output array\n\n \"\"\"\n\n if netlist is None or parameter_values is None or experiment is None:\n raise Exception(\"Please supply a netlist, paramater_values, and experiment\")\n\n # Get netlist indices for resistors, voltage sources, current sources\n Ri_map = netlist[\"desc\"].str.find(\"Ri\") > -1\n V_map = netlist[\"desc\"].str.find(\"V\") > -1\n I_map = netlist[\"desc\"].str.find(\"I\") > -1\n Terminal_Node = np.array(netlist[I_map].node1)\n Nspm = np.sum(V_map)\n\n # Generate the protocol from the supplied experiment\n protocol = lp.generate_protocol_from_experiment(experiment)\n dt = experiment.period\n Nsteps = len(protocol)\n\n # Solve the circuit to initialise the electrochemical models\n V_node, I_batt = lp.solve_circuit(netlist)\n\n # Create battery simulation and update initial state of charge\n sim = lp.create_simulation(parameter_values, make_inputs=True)\n lp.update_init_conc(sim, SoC=initial_soc)\n\n # The simulation output variables calculated at each step for each battery\n # Must be a 0D variable i.e. battery wide volume average - or X-averaged for 1D model\n variable_names = [\n \"Terminal voltage [V]\",\n \"Measured battery open circuit voltage [V]\",\n ]\n if output_variables is not None:\n for out in output_variables:\n if out not in variable_names:\n variable_names.append(out)\n # variable_names = variable_names + output_variables\n Nvar = len(variable_names)\n\n # Storage variables for simulation data\n shm_i_app = np.zeros([Nsteps, Nspm], dtype=float)\n shm_Ri = np.zeros([Nsteps, Nspm], dtype=float)\n output = np.zeros([Nvar, Nsteps, Nspm], dtype=float)\n\n # Initialize currents in battery models\n shm_i_app[0, :] = I_batt * -1\n\n # Set up integrator\n integrator, variables_fn, t_eval = _create_casadi_objects(\n I_init, htc[0], sim, dt, Nspm, nproc, variable_names\n )\n\n # Step forward in time\n time = 0\n end_time = dt * Nsteps\n step_solutions = [None] * Nspm\n V_terminal = []\n record_times = []\n\n v_cut_lower = parameter_values[\"Lower voltage cut-off [V]\"]\n v_cut_higher = parameter_values[\"Upper voltage cut-off [V]\"]\n\n sim_start_time = ticker.time()\n\n for step in tqdm(range(Nsteps), desc='Solving Pack'):\n # Step the individual battery models\n step_solutions, var_eval = _mapped_step(\n sim.built_model,\n step_solutions,\n lp.build_inputs_dict(shm_i_app[step, :], htc),\n integrator,\n variables_fn,\n t_eval,\n )\n output[:, step, :] = var_eval\n\n time += dt\n\n # Calculate internal resistance and update netlist\n temp_v = output[0, step, :]\n temp_ocv = output[1, step, :]\n # temp_Ri = output[2, step, :]\n # This could be used instead of Equivalent ECM resistance which has\n # been changing definition\n temp_Ri = (temp_ocv - temp_v) / shm_i_app[step, :]\n # Make Ri more stable\n current_cutoff = np.abs(shm_i_app[step, :]) < 1e-6\n temp_Ri[current_cutoff] = 1e-12\n # temp_Ri = 1e-12\n shm_Ri[step, :] = temp_Ri\n\n netlist.loc[V_map, (\"value\")] = temp_ocv\n netlist.loc[Ri_map, (\"value\")] = temp_Ri\n netlist.loc[I_map, (\"value\")] = protocol[step]\n\n # Stop if voltage limits are reached\n if np.any(temp_v < v_cut_lower):\n print(\"Low voltage limit reached\")\n break\n if np.any(temp_v > v_cut_higher):\n print(\"High voltage limit reached\")\n break\n\n if time <= end_time:\n record_times.append(time)\n V_node, I_batt = lp.solve_circuit(netlist)\n V_terminal.append(V_node[Terminal_Node][0])\n if time < end_time:\n shm_i_app[step + 1, :] = I_batt[:] * -1\n\n # Collect outputs\n all_output = {}\n all_output[\"Time [s]\"] = np.asarray(record_times)\n all_output[\"Pack current [A]\"] = np.asarray(protocol[: step + 1])\n all_output[\"Pack terminal voltage [V]\"] = np.asarray(V_terminal)\n all_output[\"Cell current [A]\"] = shm_i_app[: step + 1, :]\n for j in range(Nvar):\n all_output[variable_names[j]] = output[j, : step + 1, :]\n\n toc = ticker.time()\n lp.logger.notice(\n \"Solve circuit time \" + str(np.around(toc - sim_start_time, 3)) + \"s\"\n )\n return all_output\n"
] |
[
[
"numpy.abs",
"numpy.linspace",
"numpy.asarray",
"numpy.around",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
FireBERT-author/FireBERT
|
[
"c9e977e2863a51d179dc1072b468040e419c82ca"
] |
[
"firebert_fve.py"
] |
[
"# coding=utf-8\n# Copyright 2020 FireBERT authors. All rights reserved.\n#\n# Licensed under the MIT license\n# See https://github.com/FireBERT-author/FireBERT/blob/master/LICENSE for details\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n# FVE as a subclass\n# overrides extend_batch_examples_test() used in forward()\n#\n\nimport torch\nfrom torch import nn\nimport numpy as np\nimport argparse\n\nfrom switch import SWITCH\nfrom firebert_base import FireBERT_base\n\nclass FireBERT_FVE(FireBERT_base):\n\n def __init__(self, load_from=None, processor=None, hparams=None):\n super(FireBERT_FVE, self).__init__(load_from=load_from, processor=processor, hparams=hparams)\n\n # need SWITCH to tell us what the important words are\n self.switch = SWITCH(hparams=hparams, model=self, tokenizer=self.tokenizer, device=self.device)\n\n # merge passed hparams over default hparams\n hdict = self.get_default_hparams()\n hdict.update(hparams)\n self.hparams = argparse.Namespace(**hdict)\n self.actualize_hparams()\n return\n\n\n # methods to set hparams on the fly\n def update_hparams(self, new_hparams):\n hdict = vars(self.hparams)\n hdict.update(new_hparams)\n self.hparams = argparse.Namespace(**hdict)\n self.switch.update_hparams(new_hparams)\n super().update_hparams(new_hparams)\n self.actualize_hparams()\n\n def actualize_hparams(self):\n # remember some hparams a little better\n self.count = self.hparams.vector_count\n self.perturb_words = self.hparams.perturb_words\n self.std = self.hparams.std\n return\n\n #\n # here are some useful defaults\n #\n def get_default_hparams(self):\n d = FireBERT_base.get_default_hparams(self)\n d.update({\n # these are for SWITCH\n 'use_USE':False,\n 'stop_words':True,\n 'perturb_words':2,\n # this is for base\n 'verbose':False,\n 'vote_avg_logits':True,\n # this is for us\n 'std':0.05,\n 'vector_count':10\n })\n return d\n\n\n \n\n # this fills in the hook prepared in the base class\n def extend_batch_examples_eval(self, input_ids=None, attention_mask=None, token_type_ids=None, \n position_ids=None, head_mask=None, inputs_embeds=None, example_idx=None):\n group_ids = None\n group_sizes = []\n\n inputs_embeds_results = inputs_embeds\n \n # we do need the examples for SWITCH\n if example_idx is not None:\n # let's get the embeddings for the original samples\n inputs_embeds = self.bert.get_input_embeddings()(input_ids).detach()\n inputs_embeds_results = inputs_embeds\n\n # in groups, we keep track of samples tha belong together for voting\n group_ids = list(range(0,len(example_idx)))\n group_sizes = []\n current_group = 0\n \n # gotta go through one by one. Yes, batch logic is nicer, but SWICH is a one-by-one thing, anyway.\n for i, idx in enumerate(example_idx):\n example = self.processor.all_examples[idx] \n # call the perturbation method individually for each example \n perturbed_inputs_embeds, sample_attention_mask, sample_token_type_ids, _, _, _ = \\\n self.perturb_example(example,\n sample_index = i,\n input_ids=input_ids, \n attention_mask=attention_mask, \n token_type_ids=token_type_ids)\n\n # Put tensors together\n # print(\"perturbed examples: \", perturbed_inputs_embeds[0])\n if perturbed_inputs_embeds is None:\n # didn't get any important words to perturb, so no batch extensions\n group_sizes.append(1) # pus one for the original\n else:\n inputs_embeds_results = torch.cat((inputs_embeds_results, perturbed_inputs_embeds), dim=0)\n attention_mask = torch.cat((attention_mask, sample_attention_mask), dim=0)\n token_type_ids = torch.cat((token_type_ids, sample_token_type_ids), dim=0)\n group_ids += [current_group]*(self.count)\n group_sizes.append(self.count+1) # plus one for the original\n\n current_group += 1\n\n \n # need to erase the tokens (input_ids) so that BERT will use the embeddings\n input_ids = None\n # we probably received these as None, but if we don't set them to that, we might have to \n # adjust them for the new batch size and that would be tedious\n head_mask = None\n position_ids = None\n # won't need these anymore\n example_idx = None\n\n return input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds_results, example_idx, group_ids, group_sizes\n\n\n #\n # this perturbs an example and returns it in a batch with others\n #\n\n def perturb_example(self,example, count=None, std=None, sample_index=0, input_ids=None, attention_mask=None, token_type_ids=None):\n if count is None:\n count = self.count\n if std is None:\n std = self.std\n if input_ids is None:\n # convert example into features\n input_ids, attention_mask, token_type_ids, _ = self.processor.create_feature_tensors([example], device=self.device)\n sample_index = 0\n\n # use SWITCH to figure out word importance within the list\n word_indices, token_indices, word_list = \\\n self.switch.get_important_indices_from_example(example, \n input_ids[sample_index].unsqueeze(0), \n token_type_ids[sample_index].unsqueeze(0),\n attention_mask[sample_index].unsqueeze(0))\n\n # filter out useless stuff\n word_indices = list(filter(lambda x:x!=-1, word_indices))\n token_indices = list(filter(lambda x:x!=-1, token_indices))\n\n # identify most important words\n important_words = [word_list[i] for i in word_indices[:self.perturb_words]]\n token_indices = token_indices[:self.perturb_words]\n\n # get embeddings from BERT for the whole sample (set of words)\n embeddings = self.bert.get_input_embeddings()(input_ids[sample_index]).detach()\n #print(\"embeddings:\",embeddings.shape)\n #print(embeddings[0,:20])\n \n batch = None\n points = None\n\n for token_index in token_indices:\n # get the embedding vector for the most important word\n v = embeddings[token_index].clone().detach()\n #print(\"vector:\", v.shape)\n #print(v)\n\n # scale the single sample set of tokens/embeddings up to a whole batch\n batch = embeddings.repeat(count,1,1)\n # hopefully give some GPU memory back\n #del embeddings\n \n # scatter a region around this vector\n points = self.region_around(v, std=std, count=count)\n #print(\"region:\", points.shape)\n #print(points[0])\n\n #print(\"batch embeddings before clobber:\",batch.shape)\n #print(batch[0,:20])\n\n # clobber the tensor for the region of perturbed vectors in there\n batch[:,token_index,:] = points\n #print(\"batch embeddings after clobber:\",batch.shape)\n #print(batch[0,:20])\n\n attention_mask = attention_mask[sample_index].repeat(count, 1)\n token_type_ids = token_type_ids[sample_index].repeat(count, 1)\n return batch, attention_mask, token_type_ids, points, important_words, None\n\n #\n # helper methods\n #\n\n #\n # make a field of Gaussian-perturbed vectors around a given vector v\n #\n def region_around(self, vector, std, count, device=None):\n vectors = vector.repeat(count, 1)\n #print(\"vectors:\",vectors.shape)\n region = torch.normal(mean=vectors, std=std).cpu()\n #print(\"region:\",region.shape)\n return region\n\n def get_single_vector(self, word):\n #tbd\n return None\n\n def get_hparam(self, name):\n return self.hparams[name]\n\n\n\n\n\n\n#\n# Tests. \n#\n\ndef test_FireBERT_FVE(task, set, reps=1, sample=1, hparams_default={}, hparams_lists=None, lightning=''):\n\n # prepare hyperparameters\n hparams = hparams_default\n\n # load the right processor class\n if task == \"MNLI\":\n processor = MnliProcessor({'sample_percent':sample}) # negative number means abs number of samples, not percent\n elif task == \"IMDB\":\n processor = ImdbProcessor({'sample_percent':sample})\n\n # now instantiate the models\n model = FireBERT_FVE(load_from='resources/models/'+task+lightning+'/pytorch_model.bin', \n processor=processor, \n hparams=hparams_default)\n processor.set_tokenizer(model.tokenizer)\n\n dataset, examples = processor.load_and_cache_examples(\"data/\"+task, example_set=set)\n model.set_test_dataset(dataset, examples)\n\n #adv set\n # load the right processor class\n if task == \"MNLI\":\n adv_processor = MnliProcessor({'sample_percent':sample}) # negative number means abs number of samples, not percent\n elif task == \"IMDB\":\n adv_processor = ImdbProcessor({'sample_percent':sample})\n\n model_adv = FireBERT_FVE(load_from='resources/models/'+task+lightning+'/pytorch_model.bin', \n processor=processor, \n hparams=hparams_default)\n adv_processor.set_tokenizer(model.tokenizer)\n\n dataset_adv, examples_adv = adv_processor.load_and_cache_examples(\"data/\"+task, example_set=\"adv_\"+set)\n model_adv.set_test_dataset(dataset_adv, examples_adv)\n\n for i in range(reps):\n if hparams_lists is None:\n print(\"FireBERT_FVE specific test\", task, set)\n else:\n print(\"FireBERT_FVE hparam test\", task, set)\n print(\"{\")\n for item in hparams_lists.items():\n key = item[0]\n values = item[1]\n hparams[key] = random.choice(values)\n print(\" '\"+key+\"':\",str(hparams[key])+\",\")\n print(\"}\")\n\n # set the new hparams\n model.update_hparams(hparams)\n model_adv.update_hparams(hparams)\n\n trainer = pl.Trainer(gpus=(-1 if torch.cuda.is_available() else None))\n trainer.test(model)\n result1 = trainer.tqdm_metrics\n\n trainer = pl.Trainer(gpus=(-1 if torch.cuda.is_available() else None))\n trainer.test(model_adv)\n result2 = trainer.tqdm_metrics\n\n f = open(\"results/five/hparams-results.csv\", \"a+\")\n print(task, \",\", \"adv_\"+set, \",\", sample, ',\"',hparams,'\",',result1['avg_test_acc'],\",\",result2['avg_test_acc'], sep=\"\", file=f)\n f.close()\n\n print(\"iteration\",i,\"logged.\")\n elapsed_time()\n print()\n \n if hparams_lists is None:\n break\n\ndef elapsed_time():\n global t_start\n t_now = time.time()\n t = t_now-t_start\n print(\"elapsed time: \",round(t,2), \"s\")\n t_start = t_now\n return t\n\n \n\nif __name__ == \"__main__\":\n import random\n import time\n import pytorch_lightning as pl\n from processors import MnliProcessor, ImdbProcessor\n\n t_start = time.time()\n # prepare hyperparameters\n hparams_default = {\n 'batch_size':8,\n # these are for SWITCH\n 'use_USE':False,\n 'stop_words':True,\n 'perturb_words':2,\n # this is for base\n 'verbose':False,\n 'vote_avg_logits':True,\n # this is for us\n 'std':0.05,\n 'vector_count':10\n }\n\n hparams_lists = {\n # these are for SWITCH\n 'stop_words':[True, False],\n 'perturb_words':range(1,20),\n # this is for base\n 'vote_avg_logits':[True, False],\n # this is for us\n 'std':np.arange(0.1,10,0.01),\n 'vector_count':range(3,15)\n }\n\n\n # parameter search\n sample = 15\n #test_FireBERT_FVE(\"IMDB\", \"dev\", reps=500, sample=sample, hparams_default=hparams_default, hparams_lists=hparams_lists)\n #test_FireBERT_FVE(\"MNLI\", \"dev\", reps=500, sample=sample, hparams_default=hparams_default, hparams_lists=hparams_lists)\n\n # Monday, on lightning model\n best_IMDB_lightning = {'batch_size': 8, 'use_USE': False, 'stop_words': True, 'perturb_words': 1,\n 'verbose': False, 'vote_avg_logits': False, 'std': 8.4, 'vector_count': 11}\n best_mnli_lightning = {'batch_size': 8, 'use_USE': False, 'stop_words': False, 'perturb_words': 1, \n 'verbose': False, 'vote_avg_logits': False, 'std': 2.31, 'vector_count': 8}\n\n # Tuesday, on paper model\n best_mnli = {'batch_size': 8, 'use_USE': False, 'stop_words': True, 'perturb_words': 1, \n 'verbose': False, 'vote_avg_logits': True, 'std': 8.14, 'vector_count': 8}\n best_IMDB={'batch_size': 8, 'use_USE': False, 'stop_words': True, 'perturb_words': 1, \n 'verbose': False, 'vote_avg_logits': True, 'std': 2.29, 'vector_count': 10}\n\n # actual test runs\n #test_FireBERT_FVE(\"IMDB\", \"test\", reps=1, sample=100, hparams_default=best_IMDB)\n test_FireBERT_FVE(\"MNLI\", \"test\", reps=1, sample=100, hparams_default=best_mnli)\n"
] |
[
[
"torch.normal",
"numpy.arange",
"torch.cuda.is_available",
"torch.cat"
]
] |
takashi-matsushita/lab
|
[
"894e5762f58046c68e665d7463db3d7359c15fda"
] |
[
"dnn/ddqn.py"
] |
[
"import random\n\nimport collections\n\nimport numpy as np\n\nimport keras\nfrom keras import layers\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import optimizers\n\nimport tensorflow as tf\n\n\n\"\"\" double-DQN \"\"\"\nclass DDQN:\n def huber_loss(y_true, y_pred, delta=1.0):\n error = y_true - y_pred\n\n condition = K.abs(error) < delta\n square = 0.5*K.square(error)\n linear = delta*(K.abs(error) - 0.5*delta)\n\n loss = tf.where(condition, linear, square)\n return K.mean(loss)\n\n def __init__(self, nstate, naction):\n self.nstate = nstate\n self.naction = naction\n self.lr = 1.e-3\n\n self.model = self.build_model()\n self.target = self.build_model() \n self.soft_update(1.) # copy model weights to target\n\n def build_model(self):\n states = layers.Input(shape=(self.nstate,))\n\n ## value network\n net = layers.Dense(16)(states)\n net = layers.Activation(\"relu\")(net)\n net = layers.Dense(16)(net)\n net = layers.Activation(\"relu\")(net)\n net = layers.Dense(self.naction)(net)\n actions = layers.Activation(\"linear\")(net)\n\n model = Model(inputs=states, outputs=actions)\n optimizer = optimizers.Adam(lr=self.lr)\n model.compile(optimizer=optimizer, loss=DDQN.huber_loss)\n\n return model\n\n def train(self, x, y, batch_size=32, verbose=0):\n self.model.fit(x, y, batch_size=batch_size, verbose=verbose)\n\n def predict(self, state, target=False):\n if len(state.shape) == 1:\n state = np.expand_dims(state, axis=0)\n\n rc = None\n if target:\n rc = self.target.predict(state)\n else:\n rc = self.model.predict(state)\n return rc\n\n def soft_update(self, tau=0.2):\n weights = np.array(self.model.get_weights())\n target_weights = np.array(self.target.get_weights())\n\n next_weights = tau * weights + (1. - tau) * target_weights\n self.target.set_weights(next_weights)\n\n\nclass ReplayBuffer:\n def __init__(self, buf_size):\n self.buf_size = buf_size\n self.buffer = collections.deque(maxlen=buf_size)\n self.experience = collections.namedtuple(\"Experience\",\n field_names=[\"s0\", \"a\", \"r\", \"s1\", \"d\"])\n\n def add(self, state, action, reward, next_state, d=None):\n e = self.experience(state, action, reward, next_state, d)\n self.buffer.append(e)\n\n def sample(self, batch_size=32):\n return random.sample(self.buffer, k=batch_size)\n\n def __len__(self):\n return len(self.buffer)\n\n\nclass Agent:\n steps = 0\n min_epsilon = 0.01\n scale = 1. - min_epsilon\n Lambda = 0.001\n update_target_frequency = 1000\n\n def __init__(self, nstate, naction, buf_size=10000):\n self.nstate = nstate\n self.naction = naction\n self.buf_size = buf_size\n self.epsilon = 1.0\n\n self.ddqn = DDQN(nstate, naction)\n self.memory = ReplayBuffer(buf_size)\n self.gamma = 0.99\n \n def act(self, state, training=True):\n rc = None\n if training and random.random() < self.epsilon:\n rc = random.randint(0, self.naction-1)\n else:\n rc = np.argmax(self.ddqn.predict(state))\n\n self.steps += 1\n self.epsilon = Agent.min_epsilon + Agent.scale * np.exp(-Agent.Lambda * self.steps)\n return rc\n\n def fit(self, batch_size=32):\n def to_list(array, key):\n return [getattr(x, key) for x in array]\n\n experiences = self.memory.sample(batch_size)\n states = np.array(to_list(experiences, 's0'))\n next_states = np.array(to_list(experiences, 's1'))\n actions = np.array(to_list(experiences, 'a'))\n rewards = np.array(to_list(experiences, 'r'))\n dones = np.array(to_list(experiences, 'd'))\n\n p0 = np.squeeze(self.ddqn.predict(states))\n p1 = np.squeeze(self.ddqn.predict(next_states, target=True))\n p2 = np.squeeze(self.ddqn.predict(next_states))\n\n y = np.array(p0)\n mask_y = keras.utils.to_categorical(actions, num_classes=self.naction).astype(np.bool)\n mask_p1 = keras.utils.to_categorical(np.argmax(p2, axis=1), num_classes=self.naction).astype(np.bool)\n y[mask_y.tolist()] = rewards + self.gamma * p1[mask_p1.tolist()] * (1 - dones)\n self.ddqn.train(states, y, batch_size=batch_size)\n self.ddqn.soft_update(0.1)\n\n\ndef run_episode(env, agent, render=False, training=True):\n done = False\n batch_size = 128\n total_reward = 0 \n\n s0 = env.reset()\n while not done: \n a = agent.act(s0, training)\n s1, r, done, info = env.step(a)\n if render: env.render()\n\n total_reward += r\n agent.memory.add(s0, a, r, s1, done)\n if len(agent.memory) > batch_size:\n agent.fit(batch_size)\n\n s0 = s1\n run_episode.counter += 1\n total_reward += r\n\n if done: break\n\n return total_reward\n\n\nif __name__ == '__main__':\n import gym\n\n model = 'CartPole-v0'\n maxlen = 100\n min_average = 195\n\n env = gym.make(model)\n nstate = env.env.observation_space.shape[0]\n naction = env.env.action_space.n\n\n agent = Agent(nstate, naction)\n\n run_episode.counter = 0\n rb = collections.deque(maxlen=maxlen)\n counter = 0\n while True:\n r = run_episode(env, agent, counter%maxlen==0)\n rb.append(r)\n counter += 1\n average = np.mean(rb)\n if counter % (maxlen/10) == 0:\n print('epoch = {:4d} running mean = {:6.2f}'.format(counter, average))\n\n if counter > maxlen-1 and average > min_average:\n print('done ', counter, average)\n break\n\n reward = run_episode(env, agent, render=True, training=False)\n print(reward)\n\n# eof\n"
] |
[
[
"numpy.expand_dims",
"numpy.argmax",
"numpy.mean",
"tensorflow.where",
"numpy.array",
"numpy.exp"
]
] |
KEVINYZY/plume
|
[
"dbd523861bfb9abad8a52b1de28de85c0f128807"
] |
[
"plume/svm.py"
] |
[
"\"\"\"\n利用数值优化算法实现的 SVM,在 toysvm.py 中有对应的\nSMO 算法实现。\n\"\"\"\n\nimport numpy as np\nimport math\nimport scipy.optimize as opt\nfrom functools import partial\nfrom numba import jit\n\n\nclass LinearSVC(object):\n def __init__(self, C: float = 1.0):\n \"\"\"\n :param C: float. Penalty parameters.\n \"\"\"\n self.C = C\n self.sign = np.vectorize(lambda x: 1 if x >= 0 else -1)\n\n def cal_weight(self) -> np.array:\n \"\"\"Get Weight\n :return: shape=[n_features]\n \"\"\"\n return self.alpha * self.y @ self.X\n\n def cal_bias(self) -> float:\n for i, alpha in enumerate(self.alpha):\n if 0 < alpha < self.C:\n return self.y[i] - self.y * self.alpha @ (self.X[i] @ self.X.T)\n\n def minfunc(self, alpha: np.array) -> float:\n X_ = (alpha * self.y * self.X.T).T\n return 0.5 * (np.sum(X_ @ X_.T) - np.sum(alpha))\n\n def optimize(self):\n \"\"\"Optimize by SciPy\n :return: Alphas. shape = [n_samples]\n \"\"\"\n bound = ((0, self.C),) * self.X.shape[0]\n con = ({'type': 'eq', 'fun': lambda alpha: np.sum(alpha * self.y)},)\n result = opt.minimize(self.minfunc, np.zeros(self.X.shape[0]), bounds=bound, constraints=con)\n return result['x']\n\n def fit(self, X, y):\n \"\"\"\n :param X_: shape = [n_samples, n_features] \n :param y: shape = [n_samples] \n :return: self\n \"\"\"\n self.X = X\n self.y = y\n self.alpha = self.optimize()\n self.weight = self.cal_weight()\n self.bias = self.cal_bias()\n\n def predict(self, X) -> np.array:\n \"\"\"\n :param X: Test vectors. shape=[n_samples, n_features] \n :return: shape=[n_samples] \n \"\"\"\n return self.sign(self.weight @ X.T + self.bias)\n\n\nclass SVC(object):\n def __init__(self, kernel='poly', smo=False, C=3.0, **kwargs):\n if kernel == 'poly':\n self.kernel = partial(SVC.polynomial, p=kwargs.get('p', 3))\n else:\n self.kernel = partial(SVC.gaussian, var=kwargs.get('var', 1))\n self.C = C\n\n @staticmethod\n def polynomial(X, x, p):\n \"\"\"多项式核函数\n :param x: 一个样本\n :param X: 样本集\n :param p: 指数\n :return: \n \"\"\"\n return (X @ x + 1) ** p\n\n @staticmethod\n def gaussian(X, x, var):\n \"\"\"高斯核函数\n :param x: 一个样本\n :param X: 样本集\n :param var: 方差\n :return: \n \"\"\"\n return np.exp(-np.linalg.norm(X - x, axis=1) ** 2 / (2 * var * var))\n\n def get_wx(self, x, alpha):\n return np.sum(alpha * self.y * self.kernel(x, self.X))\n\n @jit\n def minfunc(self, alpha: np.array) -> float:\n \"\"\"优化的目标函数\n :param alpha: 拉格朗日乘子\n :return: \n \"\"\"\n ans = 0.0\n for i in range(self.X.shape[0]):\n ans += alpha[i] * self.y[i] * self.get_wx(self.X[i], alpha)\n return 0.5 * ans - alpha.sum()\n\n def cal_bias(self) -> float:\n \"\"\"求偏置\n :return: bias\n \"\"\"\n for i, alpha in enumerate(self.alpha):\n if 0 < alpha < self.C:\n ans = self.y[i]\n ans -= self.get_wx(self.X[i], self.alpha)\n return ans\n\n def optimize(self):\n \"\"\"Optimize by SciPy\n :return: Alphas. shape = [n_samples]\n \"\"\"\n bound = ((0, self.C),) * self.X.shape[0]\n con = ({'type': 'eq', 'fun': lambda alpha: np.sum(alpha * self.y)},)\n result = opt.minimize(self.minfunc, np.zeros(self.X.shape[0]), bounds=bound, constraints=con)\n return result['x']\n\n def fit(self, X, y):\n \"\"\"\n :param X_: shape = [n_samples, n_features] \n :param y: shape = [n_samples] \n :return: self\n \"\"\"\n self.X = X\n self.y = y\n self.alpha = self.optimize()\n self.bias = self.cal_bias()\n\n def predict_one(self, x):\n return 1 if self.get_wx(x, self.alpha) + self.bias > 0 else -1\n\n def predict(self, X) -> np.array:\n return np.array([self.predict_one(i) for i in X])\n\n\n"
] |
[
[
"numpy.vectorize",
"numpy.zeros",
"numpy.sum",
"numpy.linalg.norm"
]
] |
tiesanguaixia/gconv-pytorch-deeplab-xception
|
[
"009fad814b13622fd2027330df2f00d9a31d0594"
] |
[
"modeling/gfunc/plot/plot_p4m.py"
] |
[
"import matplotlib.pyplot as plt\r\nfrom matplotlib.lines import Line2D\r\nfrom matplotlib.patches import FancyArrowPatch\r\n\r\nfrom gfunc.plot.plot_z2 import plot_z2\r\n\r\n\r\n# Miniature plot:\r\n# plot_p4m(imf.reshape(2, 4, 7, 7), rlabels='cayley2', fontsize=10,\r\n# labelpad_factor_1= .2, labelpad_factor_2=.8, labelpad_factor_3=0.5, labelpad_factor_4=1.2, figsize=(2.5, 2.5)\r\n\r\n\r\ndef plot_p4m(f, fignum=None, rlabels='cayley_mr', rcolor='red', mcolor='blue', rlinestyle='-', mlinestyle='-',\r\n fontsize=20, labelpad_factor_1=1.5, labelpad_factor_2=1.5, labelpad_factor_3=2.5, labelpad_factor_4=2.5,\r\n figsize=(3, 3)):\r\n \"\"\"\r\n Plot a function f : p4m -> R or f : p4m -> R^3.\r\n\r\n :param f: array of shape (2, 4, nx, ny) or (2, 4, nx, ny, 3) for a color plot.\r\n :param fignum: which figure the plot to.\r\n :param rlabels: the type of labels to use for the 8 patches.\r\n :param rcolor: the color of the rotation arrows.\r\n :param mcolor: the color of the mirror lines.\r\n :param rlinestyle: the linestyle of the rotation arrows.\r\n :param mlinestyle: the linestyle of the mirror lines.\r\n :param fontsize: size of the font used to label the 8 patches.\r\n :param labelpad_factor_1: tweak the position of the label.\r\n :param figsize: size of figure.\r\n \"\"\"\r\n\r\n assert f.shape[0] == 2\r\n assert f.shape[1] == 4\r\n assert f.ndim == 4 or f.ndim == 5\r\n ny, nx = f.shape[2:4]\r\n\r\n rlabel_names = {\r\n 'cayley_rm': ['$e$', '$r$', '$r^2$', '$r^3$', '$m$', '$r^3m$', '$r^2m$', '$rm$'],\r\n 'cayley_mr': ['$e$', '$r$', '$r^2$', '$r^3$', '$m$', '$mr$', '$mr^2$', '$mr^3$'],\r\n 'cayley2': ['$e$', '$r$', '$r^2$', '$r^3$', '$m$', '$mr$\\n$=$\\n$r^3m$', '$r^2m = mr^2$', '$mr^3$\\n$=$\\n$rm$'],\r\n 'none': ['', '', '', '', '', '', '', '']\r\n }\r\n\r\n fig = plt.figure(fignum, figsize=(2 * f.shape[1], 2 * f.shape[2]))\r\n fignum = fig.number\r\n main_ax = fig.gca()\r\n\r\n # Inner ring\r\n ax_e = fig.add_subplot(5, 5, 8)\r\n plot_z2(f[0, 0], fignum=fignum)\r\n ax_e.xaxis.set_label_position('bottom')\r\n ax_e.set_xlabel(\r\n rlabel_names[rlabels][0],\r\n fontsize=fontsize,\r\n labelpad=labelpad_factor_1 * fontsize)\r\n ax_e.set_xticks([])\r\n ax_e.set_yticks([])\r\n\r\n ax_r = fig.add_subplot(5, 5, 12)\r\n plot_z2(f[0, 1], fignum=fignum)\r\n ax_r.yaxis.set_label_position('right')\r\n ax_r.set_ylabel(\r\n rlabel_names[rlabels][1],\r\n fontsize=fontsize,\r\n rotation='horizontal',\r\n va='center',\r\n labelpad=labelpad_factor_2 * fontsize)\r\n ax_r.set_xticks([])\r\n ax_r.set_yticks([])\r\n\r\n ax_r2 = fig.add_subplot(5, 5, 18)\r\n plot_z2(f[0, 2], fignum=fignum)\r\n ax_r2.xaxis.set_label_position('top')\r\n ax_r2.set_xlabel(\r\n rlabel_names[rlabels][2],\r\n fontsize=fontsize,\r\n labelpad=labelpad_factor_1 * fontsize)\r\n ax_r2.set_xticks([])\r\n ax_r2.set_yticks([])\r\n\r\n ax_r3 = fig.add_subplot(5, 5, 14)\r\n plot_z2(f[0, 3], fignum=fignum)\r\n ax_r3.yaxis.set_label_position('left')\r\n ax_r3.set_ylabel(\r\n rlabel_names[rlabels][3],\r\n fontsize=fontsize,\r\n rotation=0,\r\n va='center',\r\n labelpad=labelpad_factor_2 * fontsize)\r\n ax_r3.set_xticks([])\r\n ax_r3.set_yticks([])\r\n\r\n # Outer ring\r\n ax_m = fig.add_subplot(5, 5, 3)\r\n plot_z2(f[1, 0], fignum=fignum)\r\n ax_m.xaxis.set_label_position('top')\r\n ax_m.set_xlabel(\r\n rlabel_names[rlabels][4],\r\n fontsize=fontsize,\r\n labelpad=labelpad_factor_3 * fontsize)\r\n ax_m.set_xticks([])\r\n ax_m.set_yticks([])\r\n\r\n ax_mr3 = fig.add_subplot(5, 5, 11)\r\n plot_z2(f[1, 1], fignum=fignum)\r\n ax_mr3.yaxis.set_label_position('left')\r\n ax_mr3.set_ylabel(\r\n rlabel_names[rlabels][5],\r\n fontsize=fontsize,\r\n rotation='horizontal',\r\n va='center',\r\n labelpad=labelpad_factor_4 * fontsize)\r\n ax_mr3.set_xticks([])\r\n ax_mr3.set_yticks([])\r\n\r\n ax_mr2 = fig.add_subplot(5, 5, 23)\r\n plot_z2(f[1, 2], fignum=fignum)\r\n ax_mr2.xaxis.set_label_position('bottom')\r\n ax_mr2.set_xlabel(\r\n rlabel_names[rlabels][6],\r\n fontsize=fontsize,\r\n labelpad=labelpad_factor_3 * fontsize)\r\n ax_mr2.set_xticks([])\r\n ax_mr2.set_yticks([])\r\n\r\n ax_mr = fig.add_subplot(5, 5, 15)\r\n plot_z2(f[1, 3], fignum=fignum)\r\n ax_mr.yaxis.set_label_position('right')\r\n ax_mr.set_ylabel(\r\n rlabel_names[rlabels][7],\r\n fontsize=fontsize,\r\n rotation=0,\r\n va='center',\r\n labelpad=labelpad_factor_4 * fontsize)\r\n ax_mr.set_xticks([])\r\n ax_mr.set_yticks([])\r\n\r\n # Create pixel coordinate in the subplot coordinate systems for each beginning and enpoint of the arrows\r\n pt_right = (nx - 0.25, ny // 2)\r\n pt_top = (nx // 2, -0.75)\r\n pt_bottom = (nx // 2, ny - 0.25)\r\n pt_left = (-0.75, ny // 2)\r\n pt_center = (nx // 2, ny // 2)\r\n\r\n figtr = fig.transFigure.inverted() # Display -> Figure\r\n\r\n # Transform to figure coordinates\r\n # Forward rotation arrows\r\n pt_e_r = figtr.transform(ax_e.transData.transform(pt_left))\r\n pt_r_e = figtr.transform(ax_r.transData.transform(pt_top))\r\n\r\n pt_r_r2 = figtr.transform(ax_r.transData.transform(pt_bottom))\r\n pt_r2_r = figtr.transform(ax_r2.transData.transform(pt_left))\r\n\r\n pt_r2_r3 = figtr.transform(ax_r2.transData.transform(pt_right))\r\n pt_r3_r2 = figtr.transform(ax_r3.transData.transform(pt_bottom))\r\n\r\n pt_r3_e = figtr.transform(ax_r3.transData.transform(pt_top))\r\n pt_e_r3 = figtr.transform(ax_e.transData.transform(pt_right))\r\n\r\n # Mirrored rotation arrows\r\n pt_m_mr = figtr.transform(ax_m.transData.transform(pt_right))\r\n pt_mr_m = figtr.transform(ax_mr.transData.transform(pt_top))\r\n\r\n pt_mr_mr2 = figtr.transform(ax_mr.transData.transform(pt_bottom))\r\n pt_mr2_mr = figtr.transform(ax_mr2.transData.transform(pt_right))\r\n\r\n pt_mr2_mr3 = figtr.transform(ax_mr2.transData.transform(pt_left))\r\n pt_mr3_mr2 = figtr.transform(ax_mr3.transData.transform(pt_bottom))\r\n\r\n pt_mr3_m = figtr.transform(ax_mr3.transData.transform(pt_top))\r\n pt_m_mr3 = figtr.transform(ax_m.transData.transform(pt_left))\r\n\r\n # Mirroring lines\r\n pt_e_m = figtr.transform(ax_e.transData.transform(pt_center))\r\n pt_m_e = figtr.transform(ax_m.transData.transform(pt_center))\r\n\r\n pt_r_mr3 = figtr.transform(ax_r.transData.transform(pt_center))\r\n pt_mr3_r = figtr.transform(ax_mr3.transData.transform(pt_center))\r\n\r\n pt_r2_mr2 = figtr.transform(ax_r2.transData.transform(pt_center))\r\n pt_mr2_r2 = figtr.transform(ax_mr2.transData.transform(pt_center))\r\n\r\n pt_r3_mr = figtr.transform(ax_r3.transData.transform(pt_center))\r\n pt_mr_r3 = figtr.transform(ax_mr.transData.transform(pt_center))\r\n\r\n # Draw rotation arrows\r\n arrow = FancyArrowPatch(\r\n pt_e_r,\r\n pt_r_e,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=10, angleB=-100',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_r_r2,\r\n pt_r2_r,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=100, angleB=170',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_r2_r3,\r\n pt_r3_r2,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=190, angleB=260',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_r3_e,\r\n pt_e_r3,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=280, angleB=-10',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_m_mr,\r\n pt_mr_m,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=170, angleB=280',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_mr_mr2,\r\n pt_mr2_mr,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=260, angleB=10',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_mr2_mr3,\r\n pt_mr3_mr2,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=-10, angleB=100',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n arrow = FancyArrowPatch(\r\n pt_mr3_m,\r\n pt_m_mr3,\r\n transform=fig.transFigure,\r\n connectionstyle='angle3, angleA=260, angleB=10',\r\n arrowstyle='->,head_length=3.5,head_width=2.5',\r\n lw='2.0',\r\n color=rcolor,\r\n linestyle=rlinestyle\r\n )\r\n fig.patches.append(arrow)\r\n\r\n # Draw mirror lines\r\n main_ax.add_line(Line2D((pt_e_m[0], pt_m_e[0]), (pt_e_m[1], pt_m_e[1]), zorder=0, linewidth=4, color=mcolor, transform=fig.transFigure, linestyle=mlinestyle))\r\n main_ax.add_line(Line2D((pt_r_mr3[0], pt_mr3_r[0]), (pt_r_mr3[1], pt_mr3_r[1]), zorder=0, linewidth=4, color=mcolor, transform=fig.transFigure, linestyle=mlinestyle))\r\n main_ax.add_line(Line2D((pt_r2_mr2[0], pt_mr2_r2[0]), (pt_r2_mr2[1], pt_mr2_r2[1]), zorder=0, linewidth=4, color=mcolor, transform=fig.transFigure, linestyle=mlinestyle))\r\n main_ax.add_line(Line2D((pt_r3_mr[0], pt_mr_r3[0]), (pt_r3_mr[1], pt_mr_r3[1]), zorder=0, linewidth=4, color=mcolor, transform=fig.transFigure, linestyle=mlinestyle))\r\n\r\n main_ax.axis('off')\r\n\r\n fig.set_size_inches(figsize, forward=True)\r\n"
] |
[
[
"matplotlib.lines.Line2D",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.pyplot.figure"
]
] |
ApeMocker/CSM-for-fetal-HC-measurement
|
[
"a354d2ebd46eee6a3df11bf3fd413a340432c5b4"
] |
[
"code/ellip_fit.py"
] |
[
"\"\"\"\r\nThis script is used for ellipse fitting from edge images.\r\nRequirement: postprocess.py has been executed so edge images exist.\r\n\"\"\"\r\nimport pandas as pd\r\nimport os\r\nimport cv2\r\nfrom modules import ellip_fit\r\nimport numpy as np\r\n\r\n# Postprocess results folder\r\nedge_folder = '../results/predictions_edge/'\r\n\r\n# To save ellipse parameters\r\nresults = []\r\nname = ['filename', 'center_x(pixel)', 'center_y(pixel)', 'semi_axes_a(pixel)',\r\n 'semi_axes_b(pixel)', 'HC(pixel)', 'angle(rad)']\r\n\r\n# Filename of ellipse parameters file\r\nsave_ellip_para_file = '../results/ellip_params.csv'\r\n\r\n# upsample factor\r\nu = 16\r\n\r\n# Ellipse fitting to obtain parameters\r\ndirs = os.listdir(edge_folder)\r\nfor i in range(len(dirs)):\r\n print('Ellip fitting: Image = %d / %d' % (i + 1, len(dirs)))\r\n img_name = dirs[i]\r\n img_path = edge_folder + img_name\r\n\r\n edge_img = cv2.imread(img_path, 0)\r\n xc, yc, theta, a, b = ellip_fit(edge_img)\r\n\r\n # Restore to original size with the factor 'u'\r\n xc = (xc + 0.5) * u - 0.5\r\n yc = (yc + 0.5) * u - 0.5\r\n a = a * u\r\n b = b * u\r\n hc = 2 * np.pi * b + 4 * (a - b) # HC\r\n\r\n results.append([img_name, xc, yc, a, b, hc, theta])\r\n\r\n# Save\r\npredict_results = pd.DataFrame(columns=name, data=results)\r\npredict_results.to_csv(save_ellip_para_file, index=False)\r\n\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
nqbinh17/memo_track
|
[
"4f6a749a1cca4368ef41b4cf70be6acdaa267b47"
] |
[
"fairseq/custom_transformer/fnet.py"
] |
[
"import torch.nn as nn\nimport torch\n\nclass FNet(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n x = torch.fft.fft(torch.fft.fft(x, dim=-1), dim=-2).real\n return x"
] |
[
[
"torch.fft.fft"
]
] |
tattaka/ukiyoe
|
[
"4a1024ddf30737e68923dcdd6a50580d912e076d"
] |
[
"src/cls_models/commons.py"
] |
[
"import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass Swish(nn.Module):\n def forward(self, x):\n return x * torch.sigmoid(x)\n \nclass MishFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return x * torch.tanh(F.softplus(x)) # x * tanh(ln(1 + exp(x)))\n\n @staticmethod\n def backward(ctx, grad_output):\n x = ctx.saved_variables[0]\n sigmoid = torch.sigmoid(x)\n tanh_sp = torch.tanh(F.softplus(x)) \n return grad_output * (tanh_sp + x * sigmoid * (1 - tanh_sp * tanh_sp))\n\nclass Mish(nn.Module):\n def forward(self, x):\n return MishFunction.apply(x)\n \nclass Flatten(nn.Module):\n \"\"\"\n Simple class for flattening layer.\n \"\"\"\n def forward(self, x):\n return x.view(x.size()[0], -1)\n \nclass Conv2dReLU(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, padding=0,\n stride=1, use_batchnorm=True, **batchnorm_params):\n\n super().__init__()\n\n layers = [\n nn.Conv2d(in_channels, out_channels, kernel_size,\n stride=stride, padding=padding, bias=not (use_batchnorm)),\n nn.ReLU(inplace=True),\n ]\n\n if use_batchnorm:\n layers.insert(1, nn.BatchNorm2d(out_channels, **batchnorm_params))\n\n self.block = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.block(x)\n \nsigmoid = lambda x: 1 / (1 + torch.exp(-x))\n\nclass Normalize(nn.Module):\n r\"\"\"Performs :math:`L_p` normalization of inputs over specified dimension.\n Does:\n .. math::\n v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}\n for each subtensor v over dimension dim of input. Each subtensor is\n flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix\n norm.\n With default arguments normalizes over the second dimension with Euclidean\n norm.\n Args:\n p (float): the exponent value in the norm formulation. Default: 2\n dim (int): the dimension to reduce. Default: 1\n \"\"\"\n def __init__(self, p=2, dim=1):\n super(Normalize, self).__init__()\n self.p = p\n self.dim = dim\n\n def forward(self, x):\n return F.normalize(x, self.p, self.dim, eps=1e-8)\n\n\nclass PyramidPooling(nn.Module):\n \"\"\"\n Reference:\n Zhao, Hengshuang, et al. *\"Pyramid scene parsing network.\"*\n \"\"\"\n def __init__(self, in_channels, norm_layer, up_kwargs):\n super(PyramidPooling, self).__init__()\n self.pool1 = AdaptiveAvgPool2d(1)\n self.pool2 = AdaptiveAvgPool2d(2)\n self.pool3 = AdaptiveAvgPool2d(3)\n self.pool4 = AdaptiveAvgPool2d(6)\n\n out_channels = int(in_channels/4)\n self.conv1 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),\n norm_layer(out_channels),\n ReLU(True))\n self.conv2 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),\n norm_layer(out_channels),\n ReLU(True))\n self.conv3 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),\n norm_layer(out_channels),\n ReLU(True))\n self.conv4 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),\n norm_layer(out_channels),\n ReLU(True))\n # bilinear upsample options\n self._up_kwargs = up_kwargs\n\n def forward(self, x):\n _, _, h, w = x.size()\n feat1 = F.upsample(self.conv1(self.pool1(x)), (h, w), **self._up_kwargs)\n feat2 = F.upsample(self.conv2(self.pool2(x)), (h, w), **self._up_kwargs)\n feat3 = F.upsample(self.conv3(self.pool3(x)), (h, w), **self._up_kwargs)\n feat4 = F.upsample(self.conv4(self.pool4(x)), (h, w), **self._up_kwargs)\n return torch.cat((x, feat1, feat2, feat3, feat4), 1)\n\n\nclass SeparableConv2d(nn.Module):\n def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, BatchNorm=nn.BatchNorm2d):\n super(SeparableConv2d, self).__init__()\n\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)\n self.bn = BatchNorm(inplanes)\n self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn(x)\n x = self.pointwise(x)\n return x\n\n\nclass JPU(nn.Module):\n def __init__(self, in_channels, width=512):\n super(JPU, self).__init__()\n self.conv5 = nn.Sequential(\n nn.Conv2d(in_channels[0], width, 3, padding=1, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n self.conv4 = nn.Sequential(\n nn.Conv2d(in_channels[1], width, 3, padding=1, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n self.conv3 = nn.Sequential(\n nn.Conv2d(in_channels[2], width, 3, padding=1, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n\n self.dilation1 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=1, dilation=1, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n self.dilation2 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=2, dilation=2, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n self.dilation3 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=4, dilation=4, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n self.dilation4 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=8, dilation=8, bias=False),\n nn.BatchNorm2d(width),\n nn.ReLU(inplace=True))\n\n def forward(self, *inputs):\n feats = [self.conv5(inputs[0]), self.conv4(inputs[1]), self.conv3(inputs[2])]\n _, _, h, w = feats[-1].size()\n feats[-2] = F.interpolate(feats[-2], size=(h, w), mode='bilinear', align_corners=True)\n feats[-3] = F.interpolate(feats[-3], size=(h, w), mode='bilinear', align_corners=True)\n feat = torch.cat(feats, dim=1)\n feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1)\n return feat\n# return inputs[0], inputs[1], inputs[2], feat\n\nclass Mean(nn.Module):\n def __init__(self, dim, keep_dim=False):\n super(Mean, self).__init__()\n self.dim = dim\n self.keep_dim = keep_dim\n\n def forward(self, input):\n return input.mean(self.dim, self.keep_dim)\n \nclass _ASPPModule(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, padding, dilation):\n super(_ASPPModule, self).__init__()\n planes = int(planes)\n self.atrous_conv = nn.Conv2d(inplanes, int(planes), kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)\n self.bn = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU()\n\n self._init_weight()\n\n def forward(self, x):\n x = self.atrous_conv(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\nclass ASPP(nn.Module):\n def __init__(self, inplanes=512, mid_c=256, dilations=[1, 6, 12, 18]):\n super(ASPP, self).__init__()\n self.aspp1 = _ASPPModule(inplanes, mid_c, 1, padding=0, dilation=dilations[0])\n self.aspp2 = _ASPPModule(inplanes, mid_c, 3, padding=dilations[1], dilation=dilations[1])\n self.aspp3 = _ASPPModule(inplanes, mid_c, 3, padding=dilations[2], dilation=dilations[2])\n self.aspp4 = _ASPPModule(inplanes, mid_c, 3, padding=dilations[3], dilation=dilations[3])\n mid_c = int(mid_c)\n self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(inplanes, mid_c, 1, stride=1, bias=False),\n nn.BatchNorm2d(mid_c),\n nn.ReLU())\n self._init_weight()\n\n def forward(self, x):\n x1 = self.aspp1(x)\n x2 = self.aspp2(x)\n x3 = self.aspp3(x)\n x4 = self.aspp4(x)\n x5 = self.global_avg_pool(x)\n x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n return x\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \nclass AdaptiveConcatPool2d(nn.Module):\n def __init__(self, sz=None):\n super().__init__()\n sz = sz or (1,1)\n self.ap = nn.AdaptiveAvgPool2d(sz)\n self.mp = nn.AdaptiveMaxPool2d(sz)\n def forward(self, x):\n return torch.cat([self.mp(x), self.ap(x)], 1)"
] |
[
[
"torch.nn.functional.normalize",
"torch.nn.Sequential",
"torch.sigmoid",
"torch.nn.AdaptiveMaxPool2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.exp",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.softplus",
"torch.nn.init.kaiming_normal_"
]
] |
ahmedhamdy90/Deep-Learning-Neural-Network
|
[
"4a015b385958c313310aa9e30f6482431b1b845b"
] |
[
"Tensorflow Hello World/tensorflow_example.py"
] |
[
"import tensorflow as tf\nhello = tf.constant('Hello, TensorFlow!')\nsession = tf.Session()\nprint(session.run(hello))\nsession.close()"
] |
[
[
"tensorflow.constant",
"tensorflow.Session"
]
] |
X-kimna/Music2Dance
|
[
"f09932aeb7e5944902c82790dffca904d6e9dfab"
] |
[
"PoseNet/converter/tfjs2python.py"
] |
[
"import json\nimport struct\nimport tensorflow as tf\nimport cv2\nimport numpy as np\nimport os\nimport yaml\nimport sys\n\nf = open(\"config.yaml\", \"r+\")\ncfg = yaml.load(f)\ncheckpoints = cfg['checkpoints']\nimageSize = cfg['imageSize']\nchk = cfg['chk']\noutputStride = cfg['outputStride']\nchkpoint = checkpoints[chk]\n\nif chkpoint == 'mobilenet_v1_050':\n mobileNetArchitectures = cfg['mobileNet50Architecture']\nelif chkpoint == 'mobilenet_v1_075':\n mobileNetArchitectures = cfg['mobileNet75Architecture']\nelse:\n mobileNetArchitectures = cfg['mobileNet100Architecture']\n\nwidth = imageSize\nheight = imageSize\n\ndef toOutputStridedLayers(convolutionDefinition, outputStride):\n currentStride = 1\n rate = 1\n blockId = 0\n buff = []\n for _a in convolutionDefinition:\n convType = _a[0]\n stride = _a[1]\n \n if (currentStride == outputStride):\n layerStride = 1\n layerRate = rate\n rate *= stride\n else:\n layerStride = stride\n layerRate = 1\n currentStride *= stride\n \n buff.append({\n 'blockId': blockId,\n 'convType': convType,\n 'stride': layerStride,\n 'rate': layerRate,\n 'outputStride': currentStride\n })\n blockId += 1\n\n return buff\n\nlayers = toOutputStridedLayers(mobileNetArchitectures, outputStride)\n\nf = open(os.path.join('./waits/', chkpoint, \"manifest.json\"))\nvariables = json.load(f)\nf.close()\n\n# with tf.variable_scope(None, 'MobilenetV1'):\nfor x in variables:\n filename = variables[x][\"filename\"]\n byte = open( os.path.join('./waits/', chkpoint, filename),'rb').read()\n fmt = str (int (len(byte) / struct.calcsize('f'))) + 'f'\n d = struct.unpack(fmt, byte) \n # d = np.array(d,dtype=np.float32)\n d = tf.cast(d, tf.float32)\n d = tf.reshape(d,variables[x][\"shape\"])\n variables[x][\"x\"] = tf.Variable(d,name=x)\n\ndef read_imgfile(path, width, height):\n img = cv2.imread(path)\n img = cv2.resize(img, (width,height))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(float)\n img = img * (2.0 / 255.0) - 1.0\n return img\n\ndef convToOutput(mobileNetOutput, outputLayerName):\n w = tf.nn.conv2d(mobileNetOutput,weights(outputLayerName),[1,1,1,1],padding='SAME')\n w = tf.nn.bias_add(w,biases(outputLayerName), name=outputLayerName)\n return w\n\ndef conv(inputs, stride, blockId):\n return tf.nn.relu6(\n tf.nn.conv2d(inputs,weights(\"Conv2d_\" + str(blockId)), stride, padding='SAME') \n + biases(\"Conv2d_\" + str(blockId)))\n\ndef weights(layerName):\n return variables[\"MobilenetV1/\" + layerName + \"/weights\"]['x']\n\ndef biases(layerName):\n return variables[\"MobilenetV1/\" + layerName + \"/biases\"]['x']\n\ndef depthwiseWeights(layerName):\n return variables[\"MobilenetV1/\" + layerName + \"/depthwise_weights\"]['x']\n\ndef separableConv(inputs, stride, blockID, dilations):\n if (dilations == None):\n dilations = [1,1]\n \n dwLayer = \"Conv2d_\" + str(blockID) + \"_depthwise\"\n pwLayer = \"Conv2d_\" + str(blockID) + \"_pointwise\"\n \n w = tf.nn.depthwise_conv2d(inputs,depthwiseWeights(dwLayer),stride, 'SAME',rate=dilations, data_format='NHWC')\n w = tf.nn.bias_add(w,biases(dwLayer))\n w = tf.nn.relu6(w)\n\n w = tf.nn.conv2d(w,weights(pwLayer), [1,1,1,1], padding='SAME')\n w = tf.nn.bias_add(w,biases(pwLayer))\n w = tf.nn.relu6(w)\n\n return w\n\n\nimage = tf.placeholder(tf.float32, shape=[1, None, None, 3],name='image')\n\nx = image\nrate = [1,1]\nbuff = []\n# conv_res = {}\nwith tf.variable_scope(None, 'MobilenetV1'):\n \n for m in layers:\n strinde = [1,m['stride'],m['stride'],1]\n rate = [m['rate'],m['rate']]\n if (m['convType'] == \"conv2d\"):\n x = conv(x,strinde,m['blockId'])\n buff.append(x)\n elif (m['convType'] == \"separableConv\"):\n x = separableConv(x,strinde,m['blockId'],rate)\n buff.append(x)\n\nheatmaps = convToOutput(x, 'heatmap_2')\noffsets = convToOutput(x, 'offset_2')\ndisplacementFwd = convToOutput(x, 'displacement_fwd_2')\ndisplacementBwd = convToOutput(x, 'displacement_bwd_2')\nheatmaps = tf.sigmoid(heatmaps,'heatmap')\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n sess.run(init)\n saver = tf.train.Saver()\n\n ans = sess.run([heatmaps,offsets,displacementFwd,displacementBwd], feed_dict={\n image: [np.ndarray(shape=(width, height, 3),dtype=np.float32)]\n }\n )\n\n save_dir = './checkpoints'\n save_path = os.path.join(save_dir, 'model.ckpt')\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n save_path = saver.save(sess, save_path)\n\n tf.train.write_graph(sess.graph,\"./models/\",\"model.pbtxt\")\n\n # Result\n input_image = read_imgfile(\"./images/tennis_in_crowd.jpg\",width,height)\n input_image = np.array(input_image,dtype=np.float32)\n input_image = input_image.reshape(1,width,height,3)\n mobileNetOutput = sess.run(x, feed_dict={ image: input_image } )\n\n heatmaps_result,offsets_result,displacementFwd_result,displacementBwd_result = sess.run(\n [heatmaps,offsets,displacementFwd,displacementBwd], feed_dict={ image: input_image } )\n\n #print(input_image)\n print(input_image.shape)\n print(np.mean(input_image))\n\n count = 0\n for b in buff:\n conv_result = sess.run(b, feed_dict={ image: input_image } )\n print(\"========\")\n print(count)\n print(conv_result[0:1, 0:1, :])\n print(conv_result.shape)\n print(np.mean(conv_result))\n count += 1\n\n\n print(\"========\")\n print(\"mobileNetOutput\")\n print(mobileNetOutput[0:1, 0:1, :])\n print(mobileNetOutput.shape)\n print(np.mean(mobileNetOutput))\n \n heatmaps_result = heatmaps_result[0]\n\n print(\"========\")\n print(\"heatmaps\")\n print(heatmaps_result[0:1, 0:1, :])\n print(heatmaps_result.shape)\n print(np.mean(heatmaps_result))"
] |
[
[
"tensorflow.nn.relu6",
"tensorflow.Variable",
"tensorflow.cast",
"tensorflow.sigmoid",
"tensorflow.placeholder",
"tensorflow.reshape",
"numpy.ndarray",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"numpy.mean",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.array",
"tensorflow.train.write_graph"
]
] |
andrewwarrington/cost-optimal-particle-filter
|
[
"a6acb60ca90c7f7b984182891d39adeb7e05724f"
] |
[
"inhomogenousPaths/boSolverInhomogenous.py"
] |
[
"# MIT License\n#\n# Copyright (c) 2018, Andrew Warrington.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nboSolver.py\nAW\n\nTL;DR -\n\"\"\"\n\n# Import stock modules.\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport GPy\nimport GPyOpt\nfrom functools import partial\n\n# Import custom modules.\nimport inhomogenousPaths.generateRandomCourse as grc\nimport inhomogenousPaths.evaluateScheduleAndCourse as esac\n\nsamplesToMake = 1\n\ncourse = grc.generate_course(2)\nplt.figure()\nplt.scatter(course['x'][:, 0], course['x'][:, 1])\nplt.axis('equal')\nplt.pause(0.1)\n\n# Set up gpyopt stuff.\ndomain = [{'name': 'samples', 'type': 'continuous', 'domain': (1, grc.t_max), 'dimensionality': samplesToMake}]\n\n\ndef f(_s):\n\treturn esac.evaluate(_s, course, _return_just_value=True)\n\n\nmyBopt = GPyOpt.methods.BayesianOptimization(f=f, domain=domain, maximize=True)\nmyBopt.run_optimization(max_iter=100)\nmyBopt.plot_acquisition()\n\nY_s = np.squeeze(myBopt.Y)\nm = np.argmax(Y_s)\nY_max = Y_s[m]\nX_max = myBopt.X[m, :]\n\n\n\n\np = 0\n\n"
] |
[
[
"matplotlib.pyplot.scatter",
"numpy.squeeze",
"numpy.argmax",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
Kilichbek/artemis-m2-transformer
|
[
"99f7e797965710bf2565283d6b5028a6fe32664c"
] |
[
"data/field.py"
] |
[
"# coding: utf8\nimport base64\nimport csv\nimport os\nimport pickle\nimport shutil\nimport sys\nimport warnings\nfrom collections import Counter, OrderedDict\nfrom itertools import chain\n\nimport numpy as np\nimport six\nimport torch\nfrom torch.utils.data.dataloader import default_collate\nfrom tqdm import tqdm\n\nfrom .dataset import Dataset\nfrom .utils import get_tokenizer\nfrom .vocab import Vocab\n\ncsv.field_size_limit(sys.maxsize)\n\nclass RawField(object):\n \"\"\" Defines a general datatype.\n\n Every dataset consists of one or more types of data. For instance,\n a machine translation dataset contains paired examples of text, while\n an image captioning dataset contains images and texts.\n Each of these types of data is represented by a RawField object.\n An RawField object does not assume any property of the data type and\n it holds parameters relating to how a datatype should be processed.\n\n Attributes:\n preprocessing: The Pipeline that will be applied to examples\n using this field before creating an example.\n Default: None.\n postprocessing: A Pipeline that will be applied to a list of examples\n using this field before assigning to a batch.\n Function signature: (batch(list)) -> object\n Default: None.\n \"\"\"\n\n def __init__(self, preprocessing=None, postprocessing=None):\n self.preprocessing = preprocessing\n self.postprocessing = postprocessing\n\n def preprocess(self, x):\n \"\"\" Preprocess an example if the `preprocessing` Pipeline is provided. \"\"\"\n if self.preprocessing is not None:\n return self.preprocessing(x)\n else:\n return x\n\n def process(self, batch, *args, **kwargs):\n \"\"\" Process a list of examples to create a batch.\n\n Postprocess the batch with user-provided Pipeline.\n\n Args:\n batch (list(object)): A list of object from a batch of examples.\n Returns:\n object: Processed object given the input and custom\n postprocessing Pipeline.\n \"\"\"\n if self.postprocessing is not None:\n batch = self.postprocessing(batch)\n return default_collate(batch)\n\n\nclass Merge(RawField):\n def __init__(self, *fields):\n super(Merge, self).__init__()\n self.fields = fields\n\n def preprocess(self, x):\n return tuple(f.preprocess(x) for f in self.fields)\n\n def process(self, batch, *args, **kwargs):\n if len(self.fields) == 1:\n batch = [batch, ]\n else:\n batch = list(zip(*batch))\n\n out = list(f.process(b, *args, **kwargs) for f, b in zip(self.fields, batch))\n return out\n\nclass TextField(RawField):\n vocab_cls = Vocab\n # Dictionary mapping PyTorch tensor dtypes to the appropriate Python\n # numeric type.\n dtypes = {\n torch.float32: float,\n torch.float: float,\n torch.float64: float,\n torch.double: float,\n torch.float16: float,\n torch.half: float,\n\n torch.uint8: int,\n torch.int8: int,\n torch.int16: int,\n torch.short: int,\n torch.int32: int,\n torch.int: int,\n torch.int64: int,\n torch.long: int,\n }\n punctuations = [\"''\", \"'\", \"``\", \"`\", \"-LRB-\", \"-RRB-\", \"-LCB-\", \"-RCB-\", \\\n \".\", \"?\", \"!\", \",\", \":\", \"-\", \"--\", \"...\", \";\"]\n\n def __init__(self, use_vocab=True, init_token=None, eos_token=None, fix_length=None, dtype=torch.long,\n preprocessing=None, postprocessing=None, lower=False, tokenize=(lambda s: s.split()),\n remove_punctuation=False, include_lengths=False, batch_first=True, pad_token=\"<pad>\",\n unk_token=\"<unk>\", pad_first=False, truncate_first=False, vectors=None, nopoints=True):\n self.use_vocab = use_vocab\n self.init_token = init_token\n self.eos_token = eos_token\n self.fix_length = fix_length\n self.dtype = dtype\n self.lower = lower\n self.tokenize = get_tokenizer(tokenize)\n self.remove_punctuation = remove_punctuation\n self.include_lengths = include_lengths\n self.batch_first = batch_first\n self.pad_token = pad_token\n self.unk_token = unk_token\n self.pad_first = pad_first\n self.truncate_first = truncate_first\n self.vocab = None\n self.vectors = vectors\n if nopoints:\n self.punctuations.append(\"..\")\n\n super(TextField, self).__init__(preprocessing, postprocessing)\n\n def preprocess(self, x):\n if six.PY2 and isinstance(x, six.string_types) and not isinstance(x, six.text_type):\n x = six.text_type(x, encoding='utf-8')\n if self.lower:\n x = six.text_type.lower(x)\n x = self.tokenize(x.rstrip('\\n'))\n if self.remove_punctuation:\n x = [w for w in x if w not in self.punctuations]\n if self.preprocessing is not None:\n return self.preprocessing(x)\n else:\n return x\n\n def process(self, batch, device=None):\n padded = self.pad(batch)\n tensor = self.numericalize(padded, device=device)\n return tensor\n\n def build_vocab(self, *args, **kwargs):\n counter = Counter()\n sources = []\n for arg in args:\n if isinstance(arg, Dataset):\n sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self]\n else:\n sources.append(arg)\n\n for data in sources:\n for x in data:\n x = self.preprocess(x)\n try:\n counter.update(x)\n except TypeError:\n counter.update(chain.from_iterable(x))\n\n specials = list(OrderedDict.fromkeys([\n tok for tok in [self.unk_token, self.pad_token, self.init_token,\n self.eos_token]\n if tok is not None]))\n self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)\n\n def pad(self, minibatch):\n \"\"\"Pad a batch of examples using this field.\n Pads to self.fix_length if provided, otherwise pads to the length of\n the longest example in the batch. Prepends self.init_token and appends\n self.eos_token if those attributes are not None. Returns a tuple of the\n padded list and a list containing lengths of each example if\n `self.include_lengths` is `True`, else just\n returns the padded list.\n \"\"\"\n minibatch = list(minibatch)\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n for x in minibatch:\n if self.pad_first:\n padded.append(\n [self.pad_token] * max(0, max_len - len(x)) +\n ([] if self.init_token is None else [self.init_token]) +\n list(x[-max_len:] if self.truncate_first else x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]))\n else:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[-max_len:] if self.truncate_first else x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return padded, lengths\n return padded\n\n def numericalize(self, arr, device=None):\n \"\"\"Turn a batch of examples that use this field into a list of Variables.\n If the field has include_lengths=True, a tensor of lengths will be\n included in the return value.\n Arguments:\n arr (List[List[str]], or tuple of (List[List[str]], List[int])):\n List of tokenized and padded examples, or tuple of List of\n tokenized and padded examples and List of lengths of each\n example if self.include_lengths is True.\n device (str or torch.device): A string or instance of `torch.device`\n specifying which device the Variables are going to be created on.\n If left as default, the tensors will be created on cpu. Default: None.\n \"\"\"\n if self.include_lengths and not isinstance(arr, tuple):\n raise ValueError(\"Field has include_lengths set to True, but \"\n \"input data is not a tuple of \"\n \"(data batch, batch lengths).\")\n if isinstance(arr, tuple):\n arr, lengths = arr\n lengths = torch.tensor(lengths, dtype=self.dtype, device=device)\n\n if self.use_vocab:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n\n if self.postprocessing is not None:\n arr = self.postprocessing(arr, self.vocab)\n\n var = torch.tensor(arr, dtype=self.dtype, device=device)\n else:\n if self.vectors:\n arr = [[self.vectors[x] for x in ex] for ex in arr]\n if self.dtype not in self.dtypes:\n raise ValueError(\n \"Specified Field dtype {} can not be used with \"\n \"use_vocab=False because we do not know how to numericalize it. \"\n \"Please raise an issue at \"\n \"https://github.com/pytorch/text/issues\".format(self.dtype))\n numericalization_func = self.dtypes[self.dtype]\n # It doesn't make sense to explictly coerce to a numeric type if\n # the data is sequential, since it's unclear how to coerce padding tokens\n # to a numeric type.\n arr = [numericalization_func(x) if isinstance(x, six.string_types)\n else x for x in arr]\n\n if self.postprocessing is not None:\n arr = self.postprocessing(arr, None)\n\n var = torch.cat([torch.cat([a.unsqueeze(0) for a in ar]).unsqueeze(0) for ar in arr])\n\n # var = torch.tensor(arr, dtype=self.dtype, device=device)\n if not self.batch_first:\n var.t_()\n var = var.contiguous()\n\n if self.include_lengths:\n return var, lengths\n return var\n\n def decode(self, word_idxs, join_words=True):\n if isinstance(word_idxs, list) and len(word_idxs) == 0:\n return self.decode([word_idxs, ], join_words)[0]\n if isinstance(word_idxs, list) and isinstance(word_idxs[0], int):\n return self.decode([word_idxs, ], join_words)[0]\n elif isinstance(word_idxs, np.ndarray) and word_idxs.ndim == 1:\n return self.decode(word_idxs.reshape((1, -1)), join_words)[0]\n elif isinstance(word_idxs, torch.Tensor) and word_idxs.ndimension() == 1:\n return self.decode(word_idxs.unsqueeze(0), join_words)[0]\n\n captions = []\n for wis in word_idxs:\n caption = []\n for wi in wis:\n word = self.vocab.itos[int(wi)]\n if word == self.eos_token:\n break\n caption.append(word)\n if join_words:\n caption = ' '.join(caption)\n captions.append(caption)\n return captions\n\nclass ArtEmisDetectionsField(RawField):\n def __init__(self, preprocessing=None, postprocessing=None, detections_path=None, max_detections=100):\n self.max_detections = max_detections\n \n self.detections_path = detections_path\n self.FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']\n self.features = dict()\n\n # load the list of ('genre/img_names', id) and create dictionary\n with open(os.path.join(detections_path,'wikiart_split.pkl'),'rb') as file:\n self.paints_ids_dict = dict(pickle.load(file))\n \n with open(os.path.join(detections_path,'tmp.csv'), \"r+\") as tsv_in_file:\n reader = csv.DictReader(tsv_in_file, delimiter='\\t', fieldnames = self.FIELDNAMES)\n for item in reader:\n item['image_id'] = int(item['image_id'])\n item['image_h'] = int(item['image_h'])\n item['image_w'] = int(item['image_w'])\n item['num_boxes'] = int(item['num_boxes'])\n for field in ['boxes', 'features']:\n data = item[field]\n # buf = base64.decodestring(data)\n buf = base64.b64decode(data[1:])\n temp = np.frombuffer(buf, dtype=np.float32)\n item[field] = temp.reshape((item['num_boxes'],-1))\n self.features[item['image_id']] = item['features']\n \n self.not_found = set()\n super(ArtEmisDetectionsField, self).__init__(preprocessing, postprocessing)\n\n def preprocess(self, x):\n\n id = self.paints_ids_dict[x]\n \n try:\n precomp_data = self.features[id]\n except KeyError:\n self.not_found.add(id)\n warnings.warn('Could not find detections for %s (Total Missing: %d)' % (x, len(self.not_found)))\n precomp_data = np.random.rand(10, 2048)\n \n delta = self.max_detections - precomp_data.shape[0]\n if delta > 0:\n precomp_data = np.concatenate([precomp_data, np.zeros((delta, precomp_data.shape[1]))], axis=0)\n elif delta < 0:\n precomp_data = precomp_data[:self.max_detections]\n\n return precomp_data.astype(np.float32)\n\nclass EmotionField(RawField):\n def __init__(self, preprocessing=None, postprocessing=None, emotions=None):\n \n self.emotion_mapping = { key: value for value, key in enumerate(emotions)}\n super(EmotionField, self).__init__(preprocessing, postprocessing)\n\n def preprocess(self, x):\n return self.emotion_mapping[x]\n"
] |
[
[
"torch.tensor",
"numpy.frombuffer",
"numpy.random.rand",
"numpy.zeros",
"torch.utils.data.dataloader.default_collate"
]
] |
onkar-sima-ai/tvm
|
[
"2d321202fb2683edc5b18179ac564b5218e2fcbf"
] |
[
"python/tvm/runtime/vm.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin\n\"\"\"\nThe Relay Virtual Machine runtime.\n\nImplements a Python interface to executing the compiled VM object.\n\"\"\"\nimport numpy as np\n\nimport tvm\nfrom tvm.runtime import Module\nfrom tvm._ffi.runtime_ctypes import TVMByteArray\nfrom tvm._ffi import base as _base\nfrom .object import Object\nfrom . import _ffi_api, container\nfrom ..rpc.base import RPC_SESS_MASK\n\n\ndef _convert(arg, cargs):\n if isinstance(arg, Object):\n cargs.append(arg)\n elif isinstance(arg, np.ndarray):\n nd_arr = tvm.nd.array(arg, device=tvm.cpu(0))\n cargs.append(nd_arr)\n elif isinstance(arg, tvm.runtime.NDArray):\n cargs.append(arg)\n elif isinstance(arg, (tuple, list)):\n field_args = []\n for field in arg:\n _convert(field, field_args)\n cargs.append(container.tuple_object(field_args))\n elif isinstance(arg, (_base.numeric_types, bool)):\n dtype = \"int32\" if isinstance(arg, (_base.integer_types, bool)) else \"float32\"\n value = tvm.nd.array(np.array(arg, dtype=dtype), device=tvm.cpu(0))\n cargs.append(value)\n elif isinstance(arg, str):\n cargs.append(arg)\n else:\n raise TypeError(\"Unsupported type: %s\" % (type(arg)))\n\n\ndef convert(args):\n cargs = []\n for arg in args:\n _convert(arg, cargs)\n\n return cargs\n\n\nclass Executable(object):\n \"\"\"Relay VM executable\"\"\"\n\n def __init__(self, mod):\n self.mod = mod\n self._function_params = {}\n self._save = self.mod[\"save\"]\n self._get_lib = self.mod[\"get_lib\"]\n self._get_bytecode = self.mod[\"get_bytecode\"]\n self._get_constants = self.mod[\"get_constants\"]\n self._get_virtual_devices = self.mod[\"get_virtual_devices\"]\n self._get_primitives = self.mod[\"get_primitives\"]\n self._get_stats = self.mod[\"get_stats\"]\n self._get_function_arity = self.mod[\"get_function_arity\"]\n self._get_function_param_name = self.mod[\"get_function_param_name\"]\n\n def save(self):\n \"\"\"Save the Relay VM Executable.\n\n Returns\n -------\n code : bytearray\n The binary blob representing a serialized Relay VM executable. It\n can then be saved to disk and later deserialized into a new\n Executable.\n\n lib : :py:class:`~tvm.runtime.Module`\n The runtime module that contains the generated code. It is\n basically a library that is composed of hardware dependent code.\n\n Notes\n -----\n The returned code is organized with the following sections in order.\n - Global section. This section contains the globals used by the\n virtual machine.\n\n - Constant section. This section is used to store the constant pool of\n a virtual machine.\n\n - Primitive name section. This section is introduced to accommodate\n the list of primitive operator names that will be invoked by the\n virtual machine.\n\n - Code section. The VM functions, including bytecode, are sitting in\n this section.\n\n Examples\n --------\n\n .. code-block:: python\n\n import numpy as np\n import tvm\n from tvm import te\n from tvm import relay\n # define a simple network.\n x = relay.var('x', shape=(10, 10))\n f = relay.Function([x], x + x)\n mod = tvm.IRModule({\"main\": f})\n # create a Relay VM.\n dev = tvm.cpu()\n target = \"llvm\"\n executable = relay.vm.compile(mod, target)\n code, lib = executable.save()\n # save and load the code and lib file.\n tmp = tvm.contrib.utils.tempdir()\n path_lib = tmp.relpath(\"lib.so\")\n lib.export_library(path_lib)\n with open(tmp.relpath(\"code.ro\"), \"wb\") as fo:\n fo.write(code)\n loaded_lib = tvm.runtime.load_module(path_lib)\n loaded_code = bytearray(open(tmp.relpath(\"code.ro\"), \"rb\").read())\n # deserialize.\n des_exec = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib)\n # execute the deserialized executable.\n x_data = np.random.rand(10, 10).astype('float32')\n des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev)\n res = des_vm.run(x_data)\n print(res.numpy())\n \"\"\"\n return self._save(), self._get_lib()\n\n @staticmethod\n def load_exec(bytecode, lib):\n \"\"\"Construct an executable from saved artifacts.\n\n Parameters\n ----------\n bytecode : bytearray\n The binary blob representing a the Relay VM bytecode.\n\n lib : :py:class:`~tvm.runtime.Module`\n The runtime module that contains the generated code.\n\n Returns\n -------\n exec: Executable\n An executable constructed using the provided artifacts.\n \"\"\"\n if isinstance(bytecode, (bytes, str)):\n code = bytearray(bytecode)\n elif not isinstance(bytecode, (bytearray, TVMByteArray)):\n raise TypeError(\n \"bytecode is expected to be the type of bytearray \"\n + \"or TVMByteArray, but received {}\".format(type(code))\n )\n\n if lib is not None and not isinstance(lib, tvm.runtime.Module):\n raise TypeError(\n \"lib is expected to be the type of tvm.runtime.Module\"\n + \", but received {}\".format(type(lib))\n )\n\n return Executable(_ffi_api.Load_Executable(bytecode, lib))\n\n @property\n def lib(self):\n \"\"\"Get the library that contains hardware dependent code.\n\n Returns\n -------\n ret : :py:class:`~tvm.runtime.Module`\n The runtime module that contains hardware dependent code.\n \"\"\"\n return self._get_lib()\n\n @property\n def stats(self):\n \"\"\"Get the statistics of the Relay VM executable.\n\n Returns\n -------\n ret : String\n The statistic information of the VM executable.\n \"\"\"\n return self._get_stats()\n\n @property\n def primitive_ops(self):\n \"\"\"Get the name of the primitive ops contained in the executable.\n\n Returns\n -------\n ret : List[String]\n The list of primitive ops.\n \"\"\"\n ret = []\n num_primitives = _ffi_api.GetNumOfPrimitives(self.module)\n for i in range(num_primitives):\n ret.append(_ffi_api.GetPrimitiveFields(self.module, i))\n return ret\n\n @property\n def bytecode(self):\n \"\"\"Get the bytecode of the Relay VM executable.\n\n Returns\n -------\n ret : String\n The bytecode of the executable.\n\n Notes\n -----\n The bytecode is in the following format:\n func_name reg_file_size num_instructions\n\n param1 param2 ... paramM\n\n instruction1\n\n instruction2\n\n ...\n\n instructionN\n\n Each instruction is printed in the following format:\n hash opcode field1 ... fieldX # The text format.\n\n The part starting from # is only used for visualization and debugging.\n The real serialized code doesn't contain it, therefore the deserializer\n doesn't need to deal with it as well.\n \"\"\"\n return self._get_bytecode()\n\n @property\n def constants(self):\n \"\"\"Returns a human-readable description of all the constants in the executable.\n Useful for debugging and diffing generated executables in unit tests.\"\"\"\n return self._get_constants()\n\n @property\n def virtual_devices(self):\n \"\"\"Returns a human-readable description of all the (virtual) devices in the executable.\"\"\"\n return self._get_virtual_devices()\n\n @property\n def primitive(self):\n \"\"\"Returns a human-readable dencription of all the primitives (ie PackedFuncs) in the\n executable\"\"\"\n return self._get_primitives()\n\n @property\n def globals(self):\n \"\"\"Get the globals used by the Relay VM executable.\n\n Returns\n -------\n ret : List[String]\n The globals contained in the executable.\n \"\"\"\n ret = []\n num_globals = _ffi_api.GetNumOfGlobals(self.module)\n for i in range(num_globals):\n ret.append(_ffi_api.GetGlobalFields(self.module, i))\n return ret\n\n @property\n def module(self):\n \"\"\"Return the runtime module contained in a virtual machine executable.\"\"\"\n return self.mod\n\n def get_function_params(self, func_name):\n \"\"\"Get VM Function parameters\"\"\"\n if func_name in self._function_params:\n return self._function_params[func_name]\n arity = self._get_function_arity(func_name)\n assert arity >= 0\n params = []\n for i in range(arity):\n p = self._get_function_param_name(func_name, i)\n assert p\n params.append(p)\n self._function_params[func_name] = params\n return params\n\n\nclass VirtualMachine(object):\n \"\"\"Relay VM runtime.\n\n Parameters\n ----------\n exe : Executable\n The VM executable.\n\n device : tvm.runtime.Device or List[tvm.runtime.Device]\n The device(s) on which the model will run.\n Currently at most one device per device type is supported.\n\n memory_cfg : str or Dict[tvm.runtime.Device, str], optional\n Config the type of memory allocator. The allocator type can be [\"naive\",\n \"pooled\"]. If memory_cfg is None, all devices will use pooled allocator\n by default. If memory_cfg is string, all devices will use the specified\n allocator type. If memory_cfg is a dict, each device uses the allocator\n type specified in the dict, or pooled allocator if not specified in the\n dict.\n \"\"\"\n\n NAIVE_ALLOCATOR = 1\n POOLED_ALLOCATOR = 2\n\n def __init__(self, exe, device, memory_cfg=None):\n \"\"\"\n Construct a VirtualMachine wrapper class which provides a simple\n interface over the raw C++ Module based API.\n\n Parameters\n ----------\n exe: Union[Executable, Module]\n The executable either with the wrapper Python type or the raw runtime.Module.\n\n In most cases this will be the Python wrapper class tvm.runtime.vm.Executable but\n if you instead get the underlying runtime.Module subclass (i.e `exe.mod`) you\n can directly pass it to this method.\n\n This case can occur when doing things such as RPC where TVM's module APIs\n return the raw modules, not the wrapped modules. This constructor will\n handle this internally.\n\n device: Union[Device, List[Device]]\n The device, or devices on which to execute the VM code.\n\n memory_cfg: Optional[str]\n The allocator behavior to use for the VM.\n\n Returns\n -------\n vm: VirtualMachine\n A VM wrapper object.\n \"\"\"\n if not isinstance(exe, Executable) and not isinstance(exe, Module):\n raise TypeError(\n \"exe is expected to be the type of Executable, \"\n + \"but received {}\".format(type(exe))\n )\n\n if not isinstance(exe, Executable):\n exe = Executable(exe)\n\n self.module = exe.mod[\"vm_load_executable\"]()\n self._exec = exe\n self._init = self.module[\"init\"]\n self._invoke = self.module[\"invoke\"]\n self._invoke_stateful = self.module[\"invoke_stateful\"]\n self._get_output = self.module[\"get_output\"]\n self._get_num_outputs = self.module[\"get_num_outputs\"]\n self._get_input_index = self.module[\"get_input_index\"]\n self._set_input = self.module[\"set_input\"]\n self._setup_device(device, memory_cfg)\n\n def _setup_device(self, dev, memory_cfg):\n \"\"\"Init devices and allocators.\"\"\"\n devs = dev\n if not isinstance(dev, (list, tuple)):\n if not isinstance(dev, tvm.runtime.Device):\n raise TypeError(\"dev is expected to be Device or List[Device]\")\n devs = [dev]\n\n # CPU is required for executing shape functions\n if not any(c.device_type % RPC_SESS_MASK == tvm.cpu().device_type for c in devs):\n devs.append(tvm.cpu())\n\n default_alloc_type = VirtualMachine.POOLED_ALLOCATOR\n if memory_cfg is None:\n memory_cfg = {}\n elif isinstance(memory_cfg, str):\n assert memory_cfg in [\"naive\", \"pooled\"]\n if memory_cfg == \"naive\":\n default_alloc_type = VirtualMachine.NAIVE_ALLOCATOR\n memory_cfg = {}\n elif not isinstance(memory_cfg, dict):\n raise TypeError(\n \"memory_cfg is expected be string or dictionary, \"\n + \"but received {}\".format(type(memory_cfg))\n )\n init_args = []\n for device in devs:\n init_args.append(device.device_type % RPC_SESS_MASK)\n init_args.append(device.device_id)\n alloc_type = memory_cfg[device] if device in memory_cfg else default_alloc_type\n init_args.append(alloc_type)\n self._init(*init_args)\n\n def set_input(self, func_name, *args, **kwargs):\n \"\"\"Set the input to a function.\n\n Parameters\n ----------\n func_name : str\n The name of the function.\n\n args : list[tvm.runtime.NDArray] or list[np.ndarray]\n The arguments to the function.\n\n kwargs: dict of str to tvm.runtime.NDArray or np.ndarray\n Named arguments to the function.\n \"\"\"\n if kwargs:\n # kwargs is a super set of the required function parameters. We\n # only find the ones that are needed.\n func_params = self._exec.get_function_params(func_name)\n new_args = [None] * len(func_params)\n cnt = 0\n for k in kwargs:\n if k in func_params:\n idx = func_params.index(k)\n new_args[idx] = kwargs[k]\n cnt += 1\n assert len(args) + cnt == len(func_params)\n idx = 0\n for i, arg in enumerate(new_args):\n if arg is None:\n new_args[i] = args[idx]\n idx += 1\n args = new_args\n cargs = convert(args)\n self._set_input(func_name, *cargs)\n\n def invoke(self, func_name, *args, **kwargs):\n \"\"\"Invoke a function.\n\n Parameters\n ----------\n func_name : str\n The name of the function.\n\n args : list[tvm.runtime.NDArray] or list[np.ndarray]\n The arguments to the function.\n\n kwargs: dict of str to tvm.runtime.NDArray or np.ndarray\n Named arguments to the function.\n\n Returns\n -------\n result : Object\n The output.\n \"\"\"\n if args or kwargs:\n self.set_input(func_name, *args, **kwargs)\n return self._invoke(func_name)\n\n def run(self, *args, **kwargs):\n \"\"\"Run the main function.\n\n Parameters\n ----------\n args : list[tvm.runtime.NDArray] or list[np.ndarray]\n The arguments to the function.\n\n kwargs: dict of str to tvm.runtime.NDArray or np.ndarray\n Named arguments to the function.\n\n Returns\n -------\n result : Object\n The output.\n \"\"\"\n return self.invoke(\"main\", *args, **kwargs)\n\n def invoke_stateful(self, func_name, *args, **kwargs):\n \"\"\"Invoke a function and ignore the returned result.\n\n Use this function when running over rpc because it is currently\n impossible to return a ADT object over rpc. To get the outputs, use\n :py:func`get_outputs`.\n\n Parameters\n ----------\n func_name : str\n The name of the function.\n\n args : list[tvm.runtime.NDArray] or list[np.ndarray]\n The arguments to the function.\n\n kwargs: dict of str to tvm.runtime.NDArray or np.ndarray\n Named arguments to the function.\n \"\"\"\n if args or kwargs:\n self.set_input(func_name, *args, **kwargs)\n self._invoke_stateful(func_name)\n\n def get_outputs(self):\n \"\"\"Get the outputs from a call to :py:func`invoke_stateful`.\n\n Returns\n -------\n outputs : List[NDArray]\n \"\"\"\n return [self._get_output(i) for i in range(self._get_num_outputs())]\n\n def get_input_index(self, input_name, func_name=\"main\"):\n \"\"\"Get inputs index via input name.\n Parameters\n ----------\n name : str\n The input key name\n func_name : str\n The function name\n\n Returns\n -------\n index: int\n The input index. -1 will be returned if the given input name is not found.\n \"\"\"\n return self._get_input_index(input_name, func_name)\n\n def benchmark(\n self,\n device,\n *args,\n func_name=\"main\",\n repeat=5,\n number=5,\n min_repeat_ms=None,\n end_to_end=False,\n **kwargs,\n ):\n \"\"\"Calculate runtime of a function by repeatedly calling it.\n\n Use this function to get an accurate measurement of the runtime of a function. The function\n is run multiple times in order to account for variability in measurements, processor speed\n or other external factors. Mean, median, standard deviation, min and max runtime are all\n reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that\n synchonization and data transfer operations are not counted towards the runtime. This allows\n for fair comparison of runtimes across different functions and models. The `end_to_end` flag\n switches this behavior to include data transfer operations in the runtime.\n\n The benchmarking loop looks approximately like so:\n\n .. code-block:: python\n\n for r in range(repeat):\n time_start = now()\n for n in range(number):\n func_name()\n time_end = now()\n total_times.append((time_end - time_start)/number)\n\n\n Parameters\n ----------\n func_name : str\n The function to benchmark\n\n repeat : int\n Number of times to run the outer loop of the timing code (see above). The output will\n contain `repeat` number of datapoints.\n\n number : int\n Number of times to run the inner loop of the timing code. This inner loop is run in\n between the timer starting and stopping. In order to amortize any timing overhead,\n `number` should be increased when the runtime of the function is small (less than a 1/10\n of a millisecond).\n\n min_repeat_ms : Optional[float]\n If set, the inner loop will be run until it takes longer than `min_repeat_ms`\n milliseconds. This can be used to ensure that the function is run enough to get an\n accurate measurement.\n\n end_to_end : bool\n If set, include time to transfer input tensors to the device and time to transfer\n returned tensors in the total runtime. This will give accurate timings for end to end\n workloads.\n\n args : Sequence[Object]\n Arguments to the function. These are cached before running timing code, so that data\n transfer costs are not counted in the runtime.\n\n kwargs : Dict[str, Object]\n Named arguments to the function. These are cached like `args`.\n\n Returns\n -------\n timing_results : BenchmarkResult\n Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to\n access the individual runtimes (in seconds).\n \"\"\"\n min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms\n if end_to_end:\n # We need to unpack keyword arguments into positional arguments\n packed_args = list(args)\n for k, v in kwargs.items():\n i = self.get_input_index(k, func_name)\n if i < 0:\n raise TypeError(f\"{func_name}() got an unexpected keyword argument '{k}'\")\n while i >= len(packed_args):\n packed_args.append(None)\n packed_args[i] = v\n return self.module.time_evaluator(\n \"invoke_return_to_device\",\n device,\n repeat=repeat,\n number=number,\n min_repeat_ms=min_repeat_ms,\n )(func_name, device.device_type % RPC_SESS_MASK, device.device_id, *packed_args)\n if args or kwargs:\n self.set_input(func_name, *args, **kwargs)\n return self.module.time_evaluator(\n \"invoke\", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms\n )(func_name)\n"
] |
[
[
"numpy.array"
]
] |
SmaugTheTerrible/cvtest
|
[
"ebca2da96d4d03d8ef5c5f64ae997de9a7e8dde0"
] |
[
"cvtest.py"
] |
[
"import cv2\nimport numpy\nimport pyautogui\nimport threading\n\n\ndef match(template, source=None, method=cv2.TM_CCOEFF_NORMED, threshold=0.95):\n if (source is None):\n source = screenshot()\n\n res = cv2.matchTemplate(source, template, method)\n loc = numpy.where(res >= threshold)\n return zip(*loc[::-1])\n\n\ndef screenshot():\n img = pyautogui.screenshot()\n return cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)\n\n\ndef region(topLeftX, topLeftY, width, height):\n img = pyautogui.screenshot(region=(topLeftX, topLeftY, width, height))\n return cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)\n\n\ndef fromFile(path):\n res = cv2.imread(path)\n if res is None:\n raise Exception(\"Cant read image from file\")\n return res\n\n\ndef waitUntil(img, timeout=20):\n worker = MatchWorker(img)\n worker.start()\n worker.join(timeout)\n return worker.result\n\n\ndef waitWhile(img, timeout=20):\n worker = MatchWorker(img, True)\n worker.start()\n worker.join(timeout)\n return worker.result\n\n\ndef isOnScreen(template):\n res = match(template)\n return len(res)>0\n\n\ndef centers(template):\n h,w,_ = template.shape\n locs = match(template)\n for loc in locs:\n x, y = loc\n loc = x+w/2, y+h/2\n return locs\n\n\ndef click(x, y, clicks=1):\n pyautogui.click(x,y, clicks=clicks)\n\n\nclass MatchWorker(threading.Thread):\n def __init__(self, template, invert=False):\n super(MatchWorker, self).__init__()\n self.template = template\n self.result = None\n self.invert = invert\n \n def run(self):\n self.result = self.matchLoop(self.template, self.invert)\n\n def matchLoop(self, template, invert):\n while(True):\n res = match(template)\n hasResult = len(res)>0\n condition = invert != hasResult # xor\n if (condition): \n break\n \n return res\n\n\nclass Storage():\n def __init__(self, values={}):\n self._internal = values\n \n\n def __getitem__(self, key):\n return self._internal[key]\n\n\n def __setitem__(self, key, value):\n self._internal[key] = value\n"
] |
[
[
"numpy.array",
"numpy.where"
]
] |
trituenhantaoio/anfis-pytorch
|
[
"7a6bf123d69b550e46abeddd5b4a776243d43aa6"
] |
[
"jang_pendulum_example.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n ANFIS in torch: Control examples from Jang's book, chapter 17.\n Section 17.6.2: Recurrent learning, inverted pendulum case study.\n or \"Self Learning of Fuzzy Controllers Based on Temporal Back Propagation\"\n IEEE Trans on Neural Networks 3(5), Sept 1992.\n @author: James Power <[email protected]> May 8 2019\n'''\n\n# The PendulumSystem module is based very roughly on:\n# #pytorch-control-flow-weight-sharing from\n# https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\n\nimport anfis\nfrom membership import make_bell_mfs\nimport fileio.astext\n\ndtype = torch.float\nnp.random.seed(0)\n\n\nclass Pendulum():\n '''\n Represents the physical model of the pendulum.\n The internal state is a pair (theta, dtheta) - or a tensor of these.\n The mass of the cart and mass/length of the pole are hardwired.\n '''\n def __init__(self, theta=0, dtheta=0):\n '''\n Set up the pendulum; the initial state is (theta, dtheta)\n Assume theta/dtheta are measured *in degrees*.\n '''\n self._state = torch.tensor((theta, dtheta), dtype=dtype).reshape(1, 2)\n self.m_c = 1.0 # mass of cart in kg\n self.m = 0.1 # mass of pole in kg\n self.len = 0.5 # half-length of pole, in m\n\n _g = 9.81 # acceleration due to gravity in m/s\n\n @property\n def theta(self):\n return self._state[:, 0]\n\n @property\n def dtheta(self):\n return self._state[:, 1]\n\n @property\n def state(self):\n return self._state\n\n @state.setter\n def state(self, new_state):\n self._state = new_state\n\n def _theta_dot_dot_radians(self, rtheta, rdtheta, force):\n '''\n The physics bit: use the diff equations to calculate ddtheta\n N.B. all angles here (theta, dtheta, ddtheta) are in radians.\n '''\n tot_mass = self.m_c + self.m\n numer = Pendulum._g * torch.sin(rtheta) + torch.cos(rtheta) * (\n (-force - (self.m * self.len * rdtheta**2 * torch.sin(rtheta)))\n / tot_mass)\n denom = self.len * ((4./3.) -\n ((self.m * torch.cos(rtheta)**2) / tot_mass))\n return (numer / denom)\n\n def theta_dot_dot(self, force):\n '''\n Calculate and return ddtheta (assume we're working in degrees).\n '''\n rtheta = self.theta * (np.pi / 180.)\n rdtheta = self.dtheta * (np.pi / 180.)\n rddtheta = self._theta_dot_dot_radians(rtheta, rdtheta, force)\n return rddtheta * (180. / np.pi)\n\n def take_step(self, force, h=10e-3):\n '''\n Update theta/dtheta to new values based on given force.\n h is the step size in seconds (so, default is 10ms).\n For convenience, return the current state.\n '''\n ddtheta = self.theta_dot_dot(force.squeeze(1)) # uses current state\n delta = torch.stack((self.dtheta, ddtheta), dim=1)\n self._state = self._state + (h * delta)\n return self.state\n\n\ndef initial_anfis():\n '''\n Build and return a (non-trained) anfis model: (theta, dtheta) -> force\n Assume range for theta is (-20, 20) and dtheta is (-50, 50)\n Use 2 Bell MFs for each input, and non-hybrid learning.\n '''\n invardefs = [\n ('theta', make_bell_mfs(20, 2, [-20, 20])),\n ('dtheta', make_bell_mfs(50, 2, [-50, 50])),\n ]\n outvars = ['force']\n anf = anfis.AnfisNet('Pendulum Controller',\n invardefs, outvars, hybrid=False)\n return anf\n\n\ndef jang_traned_anfis():\n '''\n This is the trained ANFIS model from Jang's book (pg 474)\n '''\n invardefs = [\n ('theta', make_bell_mfs(-1.59, 2.34, [-19.49, 19.49])),\n ('dtheta', make_bell_mfs(85.51, 1.94, [-23.21, 23.21])),\n ]\n outvars = ['force']\n coeffs = torch.tensor([\n [0.0502, 0.1646, -10.09],\n [0.0083, 0.0119, -1.09],\n [0.0083, 0.0119, 1.09],\n [0.0502, 0.1646, 10.09],\n ], dtype=dtype).unsqueeze(1)\n anf = anfis.AnfisNet('Pendulum Controller',\n invardefs, outvars, hybrid=False)\n anf.coeff = coeffs\n return anf\n\n\nclass PendulumSystem(torch.nn.Module):\n '''\n The pendulum system consists of an ANFIS controller and a pendulum.\n We make one copy of the ANFIS controller for each time interval.\n But: only one ANFIS object, so only one set of parameters to train.\n '''\n def __init__(self, theta=0, dtheta=0):\n super(PendulumSystem, self).__init__()\n self.anfis = initial_anfis()\n self.pendulum = Pendulum(theta, dtheta)\n self.interval = 100 # Actually the number of time intervals\n\n def forward(self, x):\n '''\n Run the anfis/pendulum pairing self.interval times.\n Return a tensor of the trajectory: (theta, dtheta, force) values.\n x.shape: n_cases * 2 (== pendulum.state.shape)\n force.shape: n_cases * 1\n this_pass.shape: n_cases * 3 (= theta, dtheta, force)\n trajectory.shape: n_cases * 3 * self.interval\n '''\n # Create an empty trajectory first, and then fill in the values:\n trajectory = torch.empty((x.shape[0], 3, self.interval))\n self.pendulum.state = x\n for i in range(self.interval):\n # First run the anfis to get the force, then apply to pendulum:\n force = self.anfis(self.pendulum.state)\n self.pendulum.take_step(force)\n # Make trajectory for this pass, and store it in the result:\n this_pass = torch.cat((self.pendulum.state, force), dim=1)\n trajectory[:, :, i] = this_pass\n return trajectory\n\n\ndef loss_from(trajectory, desired_trajectory, lam=10):\n '''\n This is a more generalised loss function for the pendulum system.\n It's basically a combination of the (sum-squared) theta and force.\n We minimise force, so the force target is always 0 (no subtraction).\n The parameter lam(bda) is the weigting given to minimising force.\n trajectory.shape: n_cases * 3 * self.interval\n '''\n traj_err = torch.sum((trajectory[:, 0]-desired_trajectory)**2, dim=1)\n force_err = torch.sum(trajectory[:, 2]**2, dim=1)\n sum_sq_err = traj_err + (lam * force_err)\n # I average these over all the input cases:\n return torch.mean(sum_sq_err)\n\n\ndef loss_from_upright(trajectory, lam=10):\n '''\n This is the default loss function for the pendulum system.\n Target is zero angle and zero force.\n trajectory.shape: n_cases * 3 * self.interval\n '''\n # Desired trajectory is just zero always:\n desired_trajectory = torch.zeros(trajectory.shape[2])\n return loss_from(trajectory, desired_trajectory, lam)\n\n\ndef plot_errors(errors):\n '''\n Plot the given list of error rates against no. of epochs\n '''\n plt.plot(range(len(errors)), errors, '-ro', label='errors')\n plt.ylabel('Training Error')\n plt.xlabel('Epoch')\n plt.show()\n\n\ndef plot_thetas(x_data, y_pred):\n '''\n Plot the predicted values for theta (should go towards zero)\n '''\n # Plot the zero line:\n plt.hlines(y=0, xmin=0, xmax=y_pred.shape[2], linestyle=':', color='grey')\n for i in range(y_pred.shape[0]):\n init_theta = x_data[i][0]\n init_dtheta = x_data[i][1]\n legend = 'TC{}: ({}, {})'.format(i, init_theta, init_dtheta)\n thetas = y_pred[i, 0, :].tolist()\n plt.plot(range(len(thetas)), thetas, 'b', label=legend)\n plt.legend(loc='upper right')\n plt.xlabel('Time in 10ms intervals')\n plt.ylabel('Theta in degrees')\n plt.show()\n\n\ndef train_pendulum(model, x_data, optimizer,\n epochs=500, show_plots=False,\n loss_lambda=10):\n '''\n Train the given model using the given (x,y) data.\n '''\n errors = [] # Keep a list of these for plotting afterwards\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n print(x_data.shape)\n print('### Training for {} epochs, training size = {} cases'.\n format(epochs, x_data.shape[0]))\n for t in range(epochs):\n y_pred = model(x_data)\n # Compute and print loss\n loss = loss_from_upright(y_pred, loss_lambda)\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n errors.append(loss.item())\n # Print some progress information as the net is trained:\n if epochs < 30 or t % 10 == 0:\n print('epoch {:4d}: loss={:.5f}'.format(t, loss.item()))\n # End of training, so graph the results:\n if show_plots:\n plot_errors(errors)\n y_pred = model(x_data)\n plot_thetas(x_data, y_pred)\n\n\nif __name__ == '__main__':\n model = PendulumSystem()\n want_training = True\n if want_training:\n print('### TRAINING ###')\n training_data = torch.tensor([[10, 10], [-10, 0]], dtype=dtype)\n optimizer = torch.optim.Rprop(model.parameters(), lr=1e-2)\n train_pendulum(model, training_data, optimizer, 3, True)\n else: # Use the following if you want to use Jang's trained model:\n model.anfis = jang_traned_anfis()\n\n print('### TESTING ###')\n test_data = torch.tensor([[10, 20], [15, 30], [20, 40]], dtype=dtype)\n model.interval = 200\n y_pred = model(test_data)\n plot_thetas(test_data, y_pred)\n\n print('### TRAINED MODEL ###')\n fileio.astext.show(model.anfis)\n"
] |
[
[
"torch.mean",
"matplotlib.pyplot.legend",
"torch.empty",
"numpy.random.seed",
"torch.zeros",
"torch.cat",
"torch.sin",
"matplotlib.pyplot.hlines",
"torch.sum",
"torch.tensor",
"torch.stack",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"torch.cos",
"matplotlib.pyplot.ylabel"
]
] |
aryanmangal769/UGV-DTU_ROS_Stack
|
[
"6a00c83d076361bdf171c1ad4ef383ad262da4e6"
] |
[
"Scripts/Prototypes/img_sub_lanes_pub.py"
] |
[
"#!/usr/bin/env python\n\n# Python libs\nimport sys, time\n\n# numpy and scipy\nimport numpy as np\nimport math\nfrom scipy.ndimage import filters\n\n# OpenCV\nimport cv2\n\n# Ros libraries\nimport roslib\nimport rospy\n\n# Ros Messages\nfrom sensor_msgs.msg import CompressedImage\nfrom sensor_msgs.msg import LaserScan\n'''\nTO understand\nMinimize all the function except the main,\nthen start reading from there \nthen start opening function as you encounter one.\n\nimagesShow() is complimentary if you want to see all the images\n\n'''\n#=========================================================================================\n#=========================================================================================\n# openCV imshow Plots of all the images\ndef imagesShow():\n\n cv2.imshow('masked_img', masked_img) #masked\n cv2.imshow('warped_img', warped_img) #wrapped\n cv2.imshow('thresh', thresh) #Binary Thresholded\n cv2.imshow('reconst_img', reconst_img) #reconstructed\n cv2.imshow('Final Image', final_img)\n\n print(\"In ImgShow\")\n print(\"Type of final Image = \",type(final_img))\n\n cv2.waitKey(0) \n cv2.destroyAllWindows()\n \n print(\"Image Show Successful !!\")\n\n#Searches white pixels in an input image\ndef whitePixelSearch(img):\n #global IMAGE_H\n #global IMAGE_W\n \n indices=[]\n for i in range(0,IMAGE_H):\n j = 0\n while j < IMAGE_W:\n if img[i,j]==255.0: # If the edge of lane is hit.\n indices.append([j+20,i]) # Assumung lane is 40px wide.\n #print(j)\n j += 100 # To skip useless search in blank space between lanes \n #print(j)\n #print(\"#################\")\n j+= 1\n #print(type(indices))\n #print(len(indices)) \n\n return indices\n\n#Creates and publishes Final Image message to rviz\ndef Img_msg_Publisher():\n\n global count #just fancy stuff (for loop counter)\n count += 1\n\n #------------------Publish Final Image to rviz-----------------------\n\n image_pub = rospy.Publisher(\"/ugvbot/image_processed/compressed\",CompressedImage, queue_size = 1)\n\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', final_img)[1]).tostring()\n \n # Publish Final Image\n image_pub.publish(msg)\n\n print(\"#######################-\"+ str(count) +\"-############################\")\n if VERBOSE :\n rospy.loginfo(\"From Image Msg Publisher\")\n print(\"Msg Format = \",msg.format)\n print(\"Shape of Image = \",final_img.shape)\n print(\"-------------------------------------------------\")\n else :\n print(\"Publishing Final Image.....Bitches!\")\n\n #rate = rospy.Rate(20)\n #rate.sleep()\n\n#Processes Image and Calls Img_msg_Publisher at end.\ndef Img_Processor(ros_data):\n\n global warped_img \n global masked_img\n global warped_img\n global thresh\n global reconst_img\n global final_img\n \n global IMAGE_H\n global IMAGE_W\n\n\n if VERBOSE :\n print(\"-------------------------------------------------\")\n rospy.loginfo(\"From Image Processor\")\n print(\"Type of Received Image = \"+ str(ros_data.format))\n print(\"Shape of Recieved Image = \",ros_data.shape)\n \n\n #### direct conversion to CV2 ####\n np_arr = np.fromstring(ros_data.data, np.uint8)\n img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:\n\n #Image Dimensions\n IMAGE_H = img.shape[0] # assuming 720\n IMAGE_W = img.shape[1] # assuming 1280 \n #print(IMAGE_W)\n\n #-----------------------Masking the Image----------------------------\n #[0,0]------->--------[W,0]\n # | |\n # ^ v\n # | |\n #[0,H]-------<-------[W,H]\n \n edges = np.array([[0, 0], [IMAGE_W, 0], [IMAGE_W, IMAGE_H], [0, IMAGE_H]]) #full image polygon\n \n #ROI Start \n\n src_H = 275\n #src_H = 325\n\n stencil = np.zeros_like(img[:,:,0])\n polygon = np.array([[0, src_H], [IMAGE_W, src_H], [IMAGE_W, 400], [0, 400]]) # specify coordinates of the ROI polygon\n cv2.fillConvexPoly(stencil, polygon, 1) # fill polygon with ones\n masked_img = cv2.bitwise_and(img[:,:,:], img[:,:,:], mask=stencil) # masking the image \n\n #------------------------Transforming ROI ----------------------------\n a=500\n\n src = np.float32([[0, src_H], [IMAGE_W, src_H], [IMAGE_W, 400], [0, 400]]) #Source Polygon coordinates\n dst = np.float32([[0, src_H], [IMAGE_W, src_H], [IMAGE_W-a, 400], [a, 400]]) #Destination Polygon coordinates\n\n M = cv2.getPerspectiveTransform(src, dst) # The prespective transformation matrix\n Minv = cv2.getPerspectiveTransform(dst, src) # Inverse transformation matrix \n\n warped_img = cv2.warpPerspective(masked_img, M, (IMAGE_W, IMAGE_H))\n reconst_img = cv2.warpPerspective(warped_img,Minv,(IMAGE_W, IMAGE_H))\n\n #------------------------Lanes Extraction------------------------------\n #Making Hough Lines on ROI\n\n _, thresh = cv2.threshold(cv2.cvtColor(warped_img, cv2.COLOR_BGR2GRAY), 80, 255, cv2.THRESH_BINARY)\n lines = cv2.HoughLinesP(thresh, 1, np.pi/180, 30 ,minLineLength=100, maxLineGap=10)\n\n # create a copy of size of the thresholded frame\n final_img = np.zeros(thresh.shape)\n\n # draw Hough lines\n if( lines is None):\n pass\n else:\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(final_img, (x1, y1), (x2, y2), (255, 255, 255), 5)\n\n #Multiple figures\n #imagesShow()\n\n Img_msg_Publisher()\n\n laser_processor()\n\n#=========================================================================================\n#=========================================================================================\n# Calculates Polar Cordinates for 3 given cartesian coordinates\ndef calculatePolarCord(x3, y3): \n '''\n the x coordinates increases from left to right\n while the y coordinates increses from top to bottom which \n leaves us with no standard coordinate system so \n to do polar angle and radius calculation we make the image appear\n in 4th quadrant where x increase left to right and y increase top to bottom but in the negative range\n so the index of y coordinate is negated everywhere \n '''\n # C(i , -j) 0 0 B (IMAGE_W/2 , 0)---BOT's HEADING\n # \\ |\n # \\ |\n # \\|\n # 0 A (IMAGE_W/2 , -IMAGE_H)---BOT's POSITION\n\n #bot position in image (IMAGE_W/2 , IMAGE_H)\n x1 ,y1 = IMAGE_W/2 , -IMAGE_H\n #Reference Point for angle\n x2 ,y2 = IMAGE_W/2 , 0\n # White lanes cordinate in image\n x3 ,y3 = x3 , -y3\n\n # Find direction ratio of line AB \n ABx = x2 - x1 \n ABy = y2 - y1 \n\n # Find direction ratio of line AC \n ACx = x3 - x1 \n ACy = y3 - y1 \n\n # Find the dotProduct of lines AB & AC \n dotProduct = (ABx * ACx + ABy * ACy)\n\n # square of magnitude of line AB and AC \n magABsq = (ABx * ABx + ABy * ABy ) \n magACsq = (ACx * ACx + ACy * ACy )\n\n # cosine of the angle formed by line AB and AC \n angle = dotProduct\n angle /= math.sqrt(magABsq * magACsq)\n\n angle = math.acos(angle)\n\n if x3 < IMAGE_W/2:\n angle = angle \n if x3 >= IMAGE_W/2:\n angle = -1*angle\n\n return [angle, math.sqrt(magACsq)]\n\n#Indirect Function for calculatePolarCord of a list of points\ndef cartToPolar(cart):\n indices =[]\n\n for i in cart:\n index = calculatePolarCord(i[0],i[1])\n indices.append(index)\n\n return indices\n\n#Converts distaces using functions (from image distance to actual distances)\ndef converterForRviz(image_polar_list):\n # y = a.exp(-b.t) <-- input image \n # a = 1012\n # b = 0.0156\n\n real_polar=[]\n\n for i in image_polar_list:\n\n theta = i[0]\n radius = i[1]\n\n rcos = radius*math.cos(theta) \n\n # Not accurate but gets work done under --no-obstacle--only-lanes-- conditions\n # Got the Y direction function by curve fitting \n # and the X distance function is just rough estimate as its not that important\n\n ar_cos = 1012* math.exp(-1*0.0156*(720-rcos)) #Got by measuring how actual distances in the vertical dir. of image\n\n ar_sin = radius*math.sin(theta) #Got by measuring how actual distances in the horizontal dir. of image\n ar_sin = (ar_sin*3)/125 #change of 3m from centre to 125px left.\n \n \n actual_theta = math.atan(ar_sin/ar_cos)\n actual_radius = math.sqrt(ar_sin*ar_sin + ar_cos*ar_cos)\n \n real_polar.append([actual_theta, actual_radius])\n\n return real_polar\n\n#Creates and publishes Laser message for lanes to rviz \ndef laser_msg_publisher(ranges_list):\n #------------------Publish Fake Laser to rviz-----------------------\n laser_pub = rospy.Publisher('ugvbot/fake_scan', LaserScan, queue_size=1)\n\n angle_min=-(math.pi)/2\n angle_max=math.pi/2\n intensities=[]\n\n scan_rate = 10\n rate=rospy.Rate(scan_rate)\n\n start_time = rospy.Time.now()\n angle_increment=(angle_max-angle_min)/360\n time_increment=1/scan_rate\n\n msg=LaserScan()\n msg.header.stamp = start_time\n msg.header.frame_id = \"laser\"\n msg.angle_min=angle_min\n msg.angle_max=angle_max\n msg.angle_increment=angle_increment # Angle Increment\n msg.time_increment=time_increment # Time Increment\n msg.range_min=0 # If range < MinRange range = 0 \n msg.range_max=100 # If range > MaxRange range = inf \n msg.ranges=ranges_list # Range of Lanes pixel\n msg.intensities=intensities # Intensities empty\n\n # Publish fake LaserScan \n laser_pub.publish(msg)\n\n if VERBOSE :\n rospy.loginfo(\"From Laser message Publisher\")\n print(\"Angle Increment\", msg.angle_increment)\n print(\"length of Range list = \",len(msg.ranges))\n else :\n print(\" Fake Laser......Bitches!\")\n\n rate.sleep()\n\n#Processes Final Image and finds polar cordinates wrt to bot\n#Also calls laser_msg_publisher at end\ndef laser_processor():\n\n if type(final_img) is np.ndarray:\n\n # Indices of white pixels form image\n cart = whitePixelSearch(final_img)\n #cart_plot() \n\n # Cartesian coordinates from image to polar cordinates of lanes wrt robot.\n polar = cartToPolar(cart)\n #polar_plot()\n\n actual_polar = converterForRviz(polar)\n #polar_plot(actual_polar)\n\n ranges=[1000]*360 # Cross Verify length from laser msg Defination in laser_msg_publisher\n \n #degree 0.5\n for i in actual_polar:\n degree = ((i[0])*180/np.pi + 90)*2\n ranges[int(round(degree,0))] = i[1]\n\n laser_msg_publisher(ranges)\n\n else :\n print(\"##################################\")\n print(\"Shit Happened in laser_processor!!\")\n print(\"Type of final Image = \",type(final_img))\n exit(1)\n \n#=========================================================================================\n#=========================================================================================\nif __name__ == '__main__':\n #-----------------Global Variables Used-------------------\n #For Image processing\n # masked_img Masked with ROI\n # warped_img prespective transformed \n # thresh thresholded to 0.0 or 255.0\n # reconst_img Reconstructed From Inverse Prespective\n # final_img Image with Hough lines on thresh \n # IMAGE_H Image Height 720\n # IMAGE_W Image Width 1280\n\n\n count = 0\n VERBOSE = False\n\n rospy.init_node('Lanes_Processor_N_Publisher', anonymous=False)\n rospy.Subscriber(\"/ugvbot/image_raw/compressed\",CompressedImage, Img_Processor, queue_size = 1)\n\n try:\n print(\"inside main's try\")\n #os.system('clear') #cleans the mess we made on last iteration\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down ROS Lanes_Processor_N_Publisher module\")\n"
] |
[
[
"numpy.fromstring",
"numpy.zeros_like",
"numpy.float32",
"numpy.array",
"numpy.zeros"
]
] |
lkbr1808/DIF-Net
|
[
"538858c8c89e1b4f37c24533ebd6651ae8eb413b"
] |
[
"modules.py"
] |
[
"'''Define basic blocks\n'''\n\nimport torch\nfrom torch import nn\nfrom torchmeta.modules import (MetaModule, MetaSequential)\n# from torchmeta.modules.utils import get_subdict\nimport numpy as np\nfrom collections import OrderedDict\nimport math\nimport torch.nn.functional as F\n\n'''Adapted from the SIREN repository https://github.com/vsitzmann/siren\n'''\n\nclass BatchLinear(nn.Linear,MetaModule):\n '''A linear meta-layer that can deal with batched weight matrices and biases, as for instance output by a\n hypernetwork.\n '''\n __doc__ = nn.Linear.__doc__\n\n def forward(self, input, params=None):\n\n if params is None:\n return nn.Linear.forward(self,input)\n\n else:\n\n bias = params.get('bias', None)\n weight = params['weight']\n\n output = input.matmul(weight.permute(*[i for i in range(len(weight.shape) - 2)], -1, -2))\n output += bias.unsqueeze(-2)\n return output\n\nclass Sine(nn.Module):\n def __init(self):\n super().__init__()\n\n def forward(self, input):\n return torch.sin(30 * input)\n\nclass FCBlock(MetaModule):\n '''A fully connected neural network that also allows swapping out the weights when used with a hypernetwork.\n Can be used just as a normal neural network though, as well.\n '''\n\n def __init__(self, in_features, out_features, num_hidden_layers, hidden_features,\n outermost_linear=False, nonlinearity='relu', weight_init=None):\n super().__init__()\n\n self.first_layer_init = None\n\n # Dictionary that maps nonlinearity name to the respective function, initialization, and, if applicable,\n # special first-layer initialization scheme\n nls_and_inits = {'sine':(Sine(), sine_init, first_layer_sine_init,last_layer_sine_init),\n 'relu':(nn.ReLU(inplace=True), init_weights_normal, None,None),\n 'sigmoid':(nn.Sigmoid(), init_weights_xavier, None,None),\n 'tanh':(nn.Tanh(), init_weights_xavier, None,None),\n 'selu':(nn.SELU(inplace=True), init_weights_selu, None,None),\n 'softplus':(nn.Softplus(), init_weights_normal, None,None),\n 'elu':(nn.ELU(inplace=True), init_weights_elu, None,None)}\n\n nl, nl_weight_init, first_layer_init,last_layer_init = nls_and_inits[nonlinearity]\n\n if weight_init is not None: # Overwrite weight init if passed\n self.weight_init = weight_init\n else:\n self.weight_init = nl_weight_init\n\n self.net = []\n self.net.append(MetaSequential(\n BatchLinear(in_features, hidden_features), nl\n ))\n\n for i in range(num_hidden_layers):\n self.net.append(MetaSequential(\n BatchLinear(hidden_features, hidden_features), nl\n ))\n\n if outermost_linear:\n self.net.append(MetaSequential(BatchLinear(hidden_features, out_features)))\n else:\n self.net.append(MetaSequential(\n BatchLinear(hidden_features, out_features), nl\n ))\n\n self.net = MetaSequential(*self.net)\n if self.weight_init is not None:\n self.net.apply(self.weight_init)\n\n if first_layer_init is not None: # Apply special initialization to first layer, if applicable.\n self.net[0].apply(first_layer_init)\n\n if last_layer_init is not None:\n self.net[-1].apply(last_layer_init)\n\n def forward(self, coords, params=None, **kwargs):\n if params is not None:\n params = self.get_subdict(params, 'net')\n\n output = self.net(coords, params = params)\n return output\n\nclass SingleBVPNet(MetaModule):\n '''A canonical representation network for a BVP.'''\n\n def __init__(self, out_features=1, type='sine', in_features=2,\n mode='mlp', hidden_features=256, num_hidden_layers=3, **kwargs):\n super().__init__()\n self.mode = mode\n self.net = FCBlock(in_features=in_features, out_features=out_features, num_hidden_layers=num_hidden_layers,\n hidden_features=hidden_features, outermost_linear=True, nonlinearity=type)\n print(self)\n\n def forward(self, model_input, params=None):\n\n # Enables us to compute gradients w.r.t. coordinates\n coords_org = model_input['coords'].requires_grad_(True)\n coords = coords_org\n\n # various input processing methods for different applications\n output = self.net(coords_org, self.get_subdict(params, 'net'))\n return {'model_in': coords_org, 'model_out': output}\n\n\ndef init_weights_normal(m):\n if type(m) == BatchLinear or type(m) == nn.Linear:\n if hasattr(m, 'weight'):\n nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')\n\n\ndef init_weights_selu(m):\n if type(m) == BatchLinear or type(m) == nn.Linear:\n if hasattr(m, 'weight'):\n num_input = m.weight.size(-1)\n nn.init.normal_(m.weight, std=1 / math.sqrt(num_input))\n\n\ndef init_weights_elu(m):\n if type(m) == BatchLinear or type(m) == nn.Linear:\n if hasattr(m, 'weight'):\n num_input = m.weight.size(-1)\n nn.init.normal_(m.weight, std=math.sqrt(1.5505188080679277) / math.sqrt(num_input))\n\n\ndef init_weights_xavier(m):\n if type(m) == BatchLinear or type(m) == nn.Linear:\n if hasattr(m, 'weight'):\n nn.init.xavier_normal_(m.weight)\n\n\ndef sine_init(m):\n with torch.no_grad():\n if hasattr(m, 'weight'):\n num_input = m.weight.size(-1)\n m.weight.uniform_(-np.sqrt(6 / num_input) / 30, np.sqrt(6 / num_input) / 30)\n\n\ndef first_layer_sine_init(m):\n with torch.no_grad():\n if hasattr(m, 'weight'):\n num_input = m.weight.size(-1)\n m.weight.uniform_(-1 / num_input, 1 / num_input)\n\ndef last_layer_sine_init(m):\n with torch.no_grad():\n if hasattr(m, 'weight'):\n num_input = m.weight.size(-1)\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)"
] |
[
[
"torch.nn.Linear.forward",
"torch.nn.Softplus",
"numpy.sqrt",
"torch.sin",
"torch.nn.ELU",
"torch.nn.init.xavier_normal_",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.no_grad",
"torch.nn.SELU",
"torch.nn.init.zeros_",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
0x8b/HackerRank
|
[
"45e1a0e2be68950505c0a75218715bd3132a428b"
] |
[
"aoc2016/03.py"
] |
[
"#!/usr/bin/env python\n\nimport fileinput\n\nimport numpy as np\n\ndata = []\n\nfor line in fileinput.input():\n data.append(list(map(int, line.strip().split())))\n\n\ndef count(data):\n triangles = 0\n\n for a, b, c in data:\n if a + b > c and a + c > b and b + c > a:\n triangles += 1\n\n return triangles\n\n\nassert count(data) == 982\n\ndata = np.reshape(np.array(data).T, (-1, 3))\n\nassert count(data) == 1826\n"
] |
[
[
"numpy.array"
]
] |
jzabl/mpdaf
|
[
"806baab8b793ba1cdbde4ce9ea13116f8ca327ee",
"806baab8b793ba1cdbde4ce9ea13116f8ca327ee"
] |
[
"lib/mpdaf/sdetect/tests/test_segmap.py",
"lib/mpdaf/obj/image.py"
] |
[
"\"\"\"\nCopyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon\nCopyright (c) 2019 Simon Conseil <[email protected]>\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nimport numpy as np\nimport pytest\n\nfrom astropy.io import fits\nfrom glob import glob\nfrom mpdaf.obj import Image\nfrom mpdaf.sdetect import Segmap, create_masks_from_segmap\nfrom mpdaf.tests.utils import get_data_file\nfrom numpy.testing import assert_array_equal\n\ntry:\n import joblib # noqa\nexcept ImportError:\n HAS_JOBLIB = False\nelse:\n HAS_JOBLIB = True\n\n\ndef test_segmap():\n segfile = get_data_file('segmap', 'segmap.fits')\n img = Image(segfile)\n refdata = np.arange(14)\n\n for arg in (segfile, img, img.data):\n segmap = Segmap(arg)\n assert segmap.img.shape == (90, 90)\n assert str(segmap.img.data.dtype) == '>i8'\n assert np.max(segmap.img._data) == 13\n assert_array_equal(np.unique(segmap.img._data), refdata)\n\n assert_array_equal(segmap.copy().img.data, segmap.img.data)\n\n cmap = segmap.cmap()\n assert cmap.N == 14 # nb of values in the segmap\n\n\ndef test_align_segmap():\n segmap = Segmap(get_data_file('segmap', 'segmap.fits'))\n ref = Image(get_data_file('segmap', 'image.fits'))\n aligned = segmap.align_with_image(ref, truncate=True)\n assert aligned.img.shape == ref.shape\n assert (aligned.img.wcs.get_rot() - ref.wcs.get_rot()) < 1e-3\n\n\ndef test_cut_header():\n segmap = Segmap(get_data_file('segmap', 'segmap.fits'),\n cut_header_after='NAXIS2')\n assert 'RADESYS' not in segmap.img.primary_header\n assert 'RADESYS' not in segmap.img.data_header\n\n\[email protected](not HAS_JOBLIB, reason=\"requires joblib\")\ndef test_create_masks(tmpdir):\n segfile = get_data_file('segmap', 'segmap.fits')\n reffile = get_data_file('segmap', 'image.fits')\n catalog = get_data_file('segmap', 'catalog.fits')\n\n create_masks_from_segmap(\n segfile, catalog, reffile, n_jobs=1,\n masksky_name=str(tmpdir.join('mask-sky.fits')),\n maskobj_name=str(tmpdir.join('mask-source-%05d.fits')),\n idname='id', raname='ra', decname='dec', margin=5, mask_size=(10, 10))\n\n assert len(glob(str(tmpdir.join('mask-source*')))) == 13\n assert len(glob(str(tmpdir.join('mask-sky*')))) == 1\n\n mask = fits.getdata(str(tmpdir.join('mask-source-00001.fits')))\n assert mask.shape == (50, 50)\n assert mask.sum() == 56\n\n # test skip_existing\n create_masks_from_segmap(\n segfile, catalog, reffile, n_jobs=1, skip_existing=True,\n masksky_name=str(tmpdir.join('mask-sky.fits')),\n maskobj_name=str(tmpdir.join('mask-source-%05d.fits')),\n idname='id', raname='ra', decname='dec', margin=5, mask_size=(10, 10),\n convolve_fwhm=0)\n\n # test convolve_fwhm and callables for mask filenames\n masksky_func = lambda: str(tmpdir.join('mask2-sky.fits'))\n maskobj_func = lambda x: str(tmpdir.join('mask2-source-%05d.fits' % x))\n create_masks_from_segmap(\n segfile, catalog, reffile, n_jobs=1, skip_existing=True,\n masksky_name=masksky_func, maskobj_name=maskobj_func,\n idname='id', raname='ra', decname='dec', margin=5, mask_size=(10, 10),\n convolve_fwhm=1, psf_threshold=0.5)\n\n mask = fits.getdata(str(tmpdir.join('mask2-source-00001.fits')))\n assert mask.shape == (50, 50)\n assert mask.sum() == 106\n",
"\"\"\"\nCopyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon\nCopyright (c) 2012-2017 Laure Piqueras <[email protected]>\nCopyright (c) 2012-2017 Roland Bacon <[email protected]>\nCopyright (c) 2014-2016 Johan Richard <[email protected]>\nCopyright (c) 2014-2019 Simon Conseil <[email protected]>\nCopyright (c) 2015 Floriane Leclercq <[email protected]>\nCopyright (c) 2016 Martin Shepherd <[email protected]>\nCopyright (c) 2017 Ole Streicher <[email protected]>\nCopyright (c) 2018-2019 David Carton <[email protected]>\nCopyright (c) 2018 Yannick Roehlly <[email protected]>\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport numpy as np\nfrom numpy import ma\n\nimport astropy.units as u\nfrom astropy.io import fits\nfrom astropy.stats import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma\nfrom scipy import interpolate, signal\nfrom scipy import ndimage as ndi\nfrom scipy.ndimage.interpolation import affine_transform\nfrom scipy.optimize import leastsq\n\nfrom .arithmetic import ArithmeticMixin\nfrom .coords import WCS\nfrom .data import DataArray\nfrom .fitting import Gauss2D, Moffat2D\nfrom .objs import is_int, is_number, bounding_box, UnitMaskedArray, UnitArray\nfrom .plot import FormatCoord, get_plot_norm\n\n__all__ = ('Image', 'gauss_image', 'moffat_image', 'SpatialFrequencyLimits')\n\n\nclass Image(ArithmeticMixin, DataArray):\n\n \"\"\"Manage image, optionally including a variance and a bad pixel mask.\n\n Parameters\n ----------\n filename : str\n Possible filename (.fits, .png or .bmp).\n ext : int or (int,int) or string or (string,string)\n Number/name of the data extension or numbers/names\n of the data and variance extensions.\n wcs : `mpdaf.obj.WCS`\n World coordinates.\n unit : str or `astropy.units.Unit`\n The physical units of the data values. Defaults to\n `astropy.units.dimensionless_unscaled`.\n data : float array\n Array containing the pixel values of the image. None by default.\n var : float array\n Array containing the variance. None by default.\n copy : bool\n If true (default), then the data and variance arrays are copied.\n dtype : numpy.dtype\n Type of the data (int, float)\n\n Attributes\n ----------\n filename : str\n Possible FITS filename.\n primary_header : `astropy.io.fits.Header`\n FITS primary header instance.\n data_header : `astropy.io.fits.Header`\n FITS data header instance.\n wcs : `mpdaf.obj.WCS`\n World coordinates.\n unit : `astropy.units.Unit`\n Physical units of the data values.\n dtype : numpy.dtype\n Type of the data (int, float)\n\n \"\"\"\n\n _ndim_required = 2\n _has_wcs = True\n\n def __init__(self, filename=None, ext=None, wcs=None, data=None, var=None,\n unit=u.dimensionless_unscaled, copy=True, dtype=None,\n **kwargs):\n self._spflims = None\n\n super(Image, self).__init__(\n filename=filename, ext=ext, wcs=wcs, unit=unit, data=data, var=var,\n copy=copy, dtype=dtype, **kwargs)\n\n def copy(self):\n \"\"\"Return a new copy of an Image object.\"\"\"\n obj = super(Image, self).copy()\n\n # Make a deep copy of the spatial-frequency limits.\n if self._spflims is not None:\n obj._spflims = self._spflims.deepcopy()\n return obj\n\n def get_step(self, unit=None):\n \"\"\"Return the angular height and width of a pixel along the\n Y and X axes of the image array.\n\n In MPDAF, images are sampled on a regular grid of square\n pixels that represent a flat projection of the celestial\n sphere. The get_step() method returns the angular width and\n height of these pixels on the sky.\n\n See also get_axis_increments().\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n The angular units of the returned values.\n\n Returns\n -------\n out : numpy.ndarray\n (dy,dx). These are the angular height and width of pixels\n along the Y and X axes of the image. The returned values are\n either in the unit specified by the 'unit' input parameter,\n or in the unit specified by the self.unit property.\n \"\"\"\n\n if self.wcs is not None:\n return self.wcs.get_step(unit)\n\n def get_axis_increments(self, unit=None):\n \"\"\"Return the displacements on the sky that result from\n incrementing the array indexes of the image by one along the Y\n and X axes, respectively.\n\n In MPDAF, images are sampled on a regular grid of square\n pixels that represent a flat projection of the celestial\n sphere. The get_axis_increments() method returns the angular\n width and height of these pixels on the sky, with signs that\n indicate whether the angle increases or decreases as one\n increments the array indexes. To keep plots consistent,\n regardless of the rotation angle of the image on the sky, the\n returned height is always positive, but the returned width is\n negative if a plot of the image with pixel 0,0 at the bottom\n left would place east anticlockwise of north, and positive\n otherwise.\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n The angular units of the returned values.\n\n Returns\n -------\n out : numpy.ndarray\n (dy,dx). These are the angular increments of pixels along\n the Y and X axes of the image. The returned values are\n either in the unit specified by the 'unit' input parameter,\n or in the unit specified by the self.unit property.\n\n \"\"\"\n\n if self.wcs is not None:\n return self.wcs.get_axis_increments(unit)\n\n def get_range(self, unit=None):\n \"\"\"Return the minimum and maximum right-ascensions and declinations\n in the image array.\n\n Specifically a list is returned with the following contents:\n\n [dec_min, ra_min, dec_max, ra_max]\n\n Note that if the Y axis of the image is not parallel to the\n declination axis, then the 4 returned values will all come\n from different corners of the image. In particular, note that\n this means that the coordinates [dec_min,ra_min] and\n [dec_max,ra_max] will only coincide with pixels in the image\n if the Y axis is aligned with the declination axis. Otherwise\n they will be outside the bounds of the image.\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n The units of the returned angles.\n\n Returns\n -------\n out : numpy.ndarray\n The range of right ascensions and declinations, arranged as\n [dec_min, ra_min, dec_max, ra_max]. The returned values are\n either in the units specified in the 'unit' input parameter,\n or in the units stored in the self.unit property.\n\n\n \"\"\"\n if self.wcs is not None:\n return self.wcs.get_range(unit)\n\n def get_start(self, unit=None):\n \"\"\"Return [y,x] corresponding to pixel (0,0).\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n type of the world coordinates\n\n Returns\n -------\n out : float array\n \"\"\"\n if self.wcs is not None:\n return self.wcs.get_start(unit)\n\n def get_end(self, unit=None):\n \"\"\"Return [y,x] corresponding to pixel (-1,-1).\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n type of the world coordinates\n\n Returns\n -------\n out : float array\n \"\"\"\n if self.wcs is not None:\n return self.wcs.get_end(unit)\n\n def get_rot(self, unit=u.deg):\n \"\"\"Return the rotation angle of the image, defined such that a\n rotation angle of zero aligns north along the positive Y axis,\n and a positive rotation angle rotates north away from the Y\n axis, in the sense of a rotation from north to east.\n\n Note that the rotation angle is defined in a flat\n map-projection of the sky. It is what would be seen if\n the pixels of the image were drawn with their pixel\n widths scaled by the angular pixel increments returned\n by the get_axis_increments() method.\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n The unit to give the returned angle (degrees by default).\n\n Returns\n -------\n out : float\n The angle between celestial north and the Y axis of\n the image, in the sense of an eastward rotation of\n celestial north from the Y-axis.\n\n \"\"\"\n\n if self.wcs is not None:\n return self.wcs.get_rot(unit)\n\n def mask_region(self, center, radius, unit_center=u.deg,\n unit_radius=u.arcsec, inside=True, posangle=0.0):\n \"\"\"Mask values inside or outside a circular or rectangular region.\n\n Parameters\n ----------\n center : (float,float)\n Center (y,x) of the region, where y,x are usually celestial\n coordinates along the Y and X axes of the image, but are\n interpretted as Y,X array-indexes if unit_center is changed\n to None.\n radius : float or (float,float)\n The radius of a circular region, or the half-width and\n half-height of a rectangular region, respectively.\n unit_center : `astropy.units.Unit`\n The units of the coordinates of the center argument\n (degrees by default). If None, the units of the center\n argument are assumed to be pixels.\n unit_radius : `astropy.units.Unit`\n The units of the radius argument (arcseconds by default).\n If None, the units are assumed to be pixels.\n inside : bool\n If inside is True, pixels inside the region are masked.\n If inside is False, pixels outside the region are masked.\n posangle : float\n When the region is rectangular, this is the counter-clockwise\n rotation angle of the rectangle in degrees. When posangle is\n 0.0 (the default), the X and Y axes of the ellipse are along\n the X and Y axes of the image.\n\n \"\"\"\n center = np.array(center)\n\n # If the radius argument is a scalar value, this requests\n # that a circular region be masked. Delegate this to mask_ellipse().\n if np.isscalar(radius):\n return self.mask_ellipse(center=center, radius=radius,\n posangle=0.0, unit_center=unit_center,\n unit_radius=unit_radius, inside=inside)\n\n if unit_center is not None:\n center = self.wcs.sky2pix(center, unit=unit_center)[0]\n\n # Get the pixel sizes in the units of the radius argument.\n if unit_radius is None:\n step = np.array([1.0, 1.0]) # Pixel counts\n else:\n step = self.wcs.get_step(unit=unit_radius)\n\n # Treat rotated rectangles as polygons.\n if not np.isclose(posangle, 0.0):\n cos = np.cos(np.radians(posangle))\n sin = np.sin(np.radians(posangle))\n hw, hh = radius\n poly = np.array([[-hw * sin - hh * cos, -hw * cos + hh * sin],\n [-hw * sin + hh * cos, -hw * cos - hh * sin],\n [+hw * sin + hh * cos, +hw * cos - hh * sin],\n [+hw * sin - hh * cos, +hw * cos + hh * sin]])\n return self.mask_polygon(poly / step + center, unit=None,\n inside=inside)\n\n # Get Y-axis and X-axis slice objects that bound the rectangular area.\n sy, sx = bounding_box(form=\"rectangle\", center=center,\n radii=radius, shape=self.shape, step=step)[0]\n\n if inside:\n self.data[sy, sx] = np.ma.masked\n else:\n self.data[0:sy.start, :] = np.ma.masked\n self.data[sy.stop:, :] = np.ma.masked\n self.data[sy, 0:sx.start] = np.ma.masked\n self.data[sy, sx.stop:] = np.ma.masked\n\n def mask_ellipse(self, center, radius, posangle, unit_center=u.deg,\n unit_radius=u.arcsec, inside=True):\n \"\"\"Mask values inside or outside an elliptical region.\n\n Parameters\n ----------\n center : (float,float)\n Center (y,x) of the region, where y,x are usually celestial\n coordinates along the Y and X axes of the image, but are\n interpretted as Y,X array-indexes if unit_center is changed\n to None.\n radius : (float,float)\n The radii of the two orthogonal axes of the ellipse.\n When posangle is zero, radius[0] is the radius along\n the X axis of the image-array, and radius[1] is\n the radius along the Y axis of the image-array.\n posangle : float\n The counter-clockwise rotation angle of the ellipse in\n degrees. When posangle is zero, the X and Y axes of the\n ellipse are along the X and Y axes of the image.\n unit_center : `astropy.units.Unit`\n The units of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_radius : `astropy.units.Unit`\n The units of the radius argument. Arcseconds by default.\n (use None for radius in pixels)\n inside : bool\n If inside is True, pixels inside the described region are masked.\n If inside is False, pixels outside the described region are masked.\n\n \"\"\"\n center = np.array(center)\n if unit_center is not None:\n center = self.wcs.sky2pix(center, unit=unit_center)[0]\n\n # Get the pixel sizes in the units of the radius argument.\n if unit_radius is None:\n step = np.array([1.0, 1.0]) # Pixel counts\n else:\n step = self.wcs.get_step(unit=unit_radius)\n\n # Get the two radii in the form of a numpy array.\n if np.isscalar(radius):\n radii = np.array([radius, radius])\n else:\n radii = np.asarray(radius)\n\n # Obtain Y and X axis slice objects that select the rectangular\n # region that just encloses the rotated ellipse.\n [sy, sx], _, center = bounding_box(\n form=\"ellipse\", center=center, radii=radii,\n shape=self.shape, posangle=posangle, step=step)\n\n # Precompute the sine and cosine of the position angle.\n cospa = np.cos(np.radians(posangle))\n sinpa = np.sin(np.radians(posangle))\n\n # When the position angle is zero, such that the\n # xe and ye axes of the ellipse are along the X and Y axes\n # of the image-array, the equation of the ellipse is:\n #\n # (xe / rx)**2 + (ye / ry)**2 = 1\n #\n # Before we can use this equation with the rotated ellipse, we\n # have to rotate the pixel coordinates clockwise by the\n # counterclockwise position angle of the ellipse to align the\n # rotated axes of the ellipse along the image X and Y axes:\n #\n # xp = | cos(pa), sin(pa)| |x|\n # yp |-sin(pa), cos(pa)| |y|\n #\n # The value of k returned by the following equation will then\n # be < 1 for pixels inside the ellipse, == 1 for pixels on the\n # ellipse and > 1 for pixels outside the ellipse.\n #\n # k = (xp / rx)**2 + (yp / ry)**2\n x, y = np.meshgrid((np.arange(sx.start, sx.stop) - center[1]) * step[1],\n (np.arange(sy.start, sy.stop) - center[0]) * step[0])\n ksel = (((x * cospa + y * sinpa) / radii[0]) ** 2 +\n ((y * cospa - x * sinpa) / radii[1]) ** 2)\n\n if inside:\n self.data[sy, sx][ksel < 1] = np.ma.masked\n else:\n self.data[0:sy.start, :] = np.ma.masked\n self.data[sy.stop:, :] = np.ma.masked\n self.data[sy, 0:sx.start] = np.ma.masked\n self.data[sy, sx.stop:] = np.ma.masked\n self.data[sy, sx][ksel > 1] = np.ma.masked\n\n def mask_polygon(self, poly, unit=u.deg, inside=True):\n \"\"\"Mask values inside or outside a polygonal region.\n\n Parameters\n ----------\n poly : (float, float)\n An array of (float,float) containing a set of (p,q) or (dec,ra)\n values for the polygon vertices.\n unit : `astropy.units.Unit`\n The units of the polygon coordinates (by default in degrees).\n Use unit=None to have polygon coordinates in pixels.\n inside : bool\n If inside is True, pixels inside the polygonal region are masked.\n If inside is False, pixels outside the polygonal region are masked.\n\n \"\"\"\n\n # Convert DEC,RA (deg) values coming from poly into Y,X value (pixels)\n if unit is not None:\n poly = np.array([\n [self.wcs.sky2pix((val[0], val[1]), unit=unit)[0][0],\n self.wcs.sky2pix((val[0], val[1]), unit=unit)[0][1]]\n for val in poly])\n\n b = np.mgrid[:self.shape[0], :self.shape[1]].reshape(2, -1).T\n\n # Use a matplotlib method to create a polygon path and check if points\n # are within the polygon. The ouput is a boolean table.\n from matplotlib.path import Path\n polymask = Path(poly)\n c = polymask.contains_points(b)\n if not inside:\n c = ~c\n\n self._mask |= c.reshape(self.shape)\n return poly\n\n def truncate(self, y_min, y_max, x_min, x_max, mask=True, unit=u.deg,\n inplace=False):\n \"\"\"Return a sub-image that contains a specified area of the sky.\n\n The ranges x_min to x_max and y_min to y_max, specify a rectangular\n region of the sky in world coordinates. The truncate function returns\n the sub-image that just encloses this region. Note that if the world\n coordinate axes are not parallel to the array axes, the region will\n appear to be a rotated rectangle within the sub-image. In such cases,\n the corners of the sub-image will contain pixels that are outside the\n region. By default these pixels are masked. However this can be\n disabled by changing the optional mask argument to False.\n\n Parameters\n ----------\n y_min : float\n The minimum Y-axis world-coordinate of the selected\n region. The Y-axis is usually Declination, which may not\n be parallel to the Y-axis of the image array.\n y_max : float\n The maximum Y-axis world coordinate of the selected region.\n x_min : float\n The minimum X-axis world-coordinate of the selected\n region. The X-axis is usually Right Ascension, which may\n not be parallel to the X-axis of the image array.\n x_max : float\n The maximum X-axis world coordinate of the selected region.\n mask : bool\n If True, any pixels in the sub-image that remain outside the\n range x_min to x_max and y_min to y_max, will be masked.\n unit : `astropy.units.Unit`\n The units of the X and Y world-coordinates (degrees by default).\n inplace : bool\n If False, return a truncated copy of the image (the default).\n If True, truncate the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n\n # Get the sky and pixel coordinates of the corners of the rectangular\n # region that is bounded by x_min..x_max and y_min..y_max.\n skycrd = np.array([[y_min, x_min],\n [y_min, x_max],\n [y_max, x_min],\n [y_max, x_max]])\n\n if unit is not None:\n pixcrd = self.wcs.sky2pix(skycrd, unit=unit)\n else:\n pixcrd = skycrd\n\n # The sides of the selected region may not be parallel with the\n # array axes. Determine the pixel bounds of a rectangular\n # region of the array that contains the requested region.\n imin = max(0, int(np.min(pixcrd[:, 0]) + 0.5))\n imax = min(self.shape[0], int(np.max(pixcrd[:, 0]) + 0.5) + 1)\n jmin = max(0, int(np.min(pixcrd[:, 1]) + 0.5))\n jmax = min(self.shape[1], int(np.max(pixcrd[:, 1]) + 0.5) + 1)\n\n # Extract the rectangular area that contains the requested region.\n subima = self[imin:imax, jmin:jmax]\n if inplace:\n self._data = subima._data\n if self._var is not None:\n self._var = subima._var\n self._mask = subima._mask\n self.wcs = subima.wcs\n out = self\n else:\n out = subima.copy()\n\n # If the region is rotated relative to the image array axes\n # then the rectangular sub-image that contains this will has\n # some pixels outside this region. Should these be masked?\n if mask:\n # Get the indexes of all of the pixels in the \"out\" array,\n # ordered like: [[0,0], [0,1], [1,0], [1,1], [2,0], [2,1]...]\n pixcrd = np.mgrid[:out.shape[0], :out.shape[1]].reshape(2, -1).T\n\n if unit is None:\n skycrd = pixcrd\n else:\n skycrd = np.array(out.wcs.pix2sky(pixcrd, unit=unit))\n\n # Reshape the array of coordinates to have the shape of\n # the output array.\n x = skycrd[:, 1].reshape(out.shape)\n y = skycrd[:, 0].reshape(out.shape)\n\n # Test the X and Y coordinates of each pixel against the\n # requested range of X and Y coordinates, and mask pixels\n # that are outside this range.\n test_x = np.logical_or(x < x_min, x > x_max)\n test_y = np.logical_or(y < y_min, y > y_max)\n test = np.logical_or(test_x, test_y)\n out._mask = np.logical_or(out._mask, test)\n\n # Remove any array margins that are now completely masked.\n out.crop()\n\n return out\n\n def subimage(self, center, size, unit_center=u.deg, unit_size=u.arcsec,\n minsize=2.0):\n \"\"\"Return a view on a square or rectangular part.\n\n This method returns a square or rectangular sub-image whose center and\n size are specified in world coordinates. Note that this is a view on\n the original map and that both will be modified at the same time. If\n you need to modify only the sub-image, copy() the result of the\n method.\n\n Parameters\n ----------\n center : (float,float)\n The center (dec, ra) of the square region. If this position\n is not within the parent image, None is returned.\n size : float or (float,float)\n The width of a square region, or the width and height of\n a rectangular region.\n unit_center : `astropy.units.Unit`\n The units of the center coordinates.\n Degrees are assumed by default. To specify the center\n in pixels, assign None to unit_center.\n unit_size : `astropy.units.Unit`\n The units of the size and minsize arguments.\n Arcseconds are assumed by default (use None to specify\n sizes in pixels).\n minsize : float\n The minimum width of the output image along both the Y and\n X axes. This function returns None if size is smaller than\n minsize, or if the part of the square that lies within the\n parent image is smaller than minsize along either axis.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n\n # If just one size is given, use it for both axes.\n if np.isscalar(size):\n size = np.array([size, size])\n else:\n size = np.asarray(size)\n if size[0] <= 0 or size[1] <= 0:\n raise ValueError('Size must be positive')\n\n# # Require the center to be within the parent image.\n# if not self.inside(center, unit_center):\n# raise ValueError('The center must be within the image')\n\n # Convert the center position from world-coordinates to pixel indexes.\n center = np.asarray(center)\n if unit_center is not None:\n center = self.wcs.sky2pix(center, unit=unit_center)[0]\n\n # Get the pixel sizes in the units of the size argument.\n if unit_size is None:\n step = np.array([1.0, 1.0]) # Pixel counts\n else:\n step = self.wcs.get_step(unit=unit_size)\n\n # Convert the minimum size from world coordinates to pixel counts,\n # taking account of the possibility that pixels can be rectangular.\n minsize /= step\n\n # Convert the width and height of the region to radii, and\n # get Y-axis and X-axis slice objects that select this region.\n radius = size / 2.\n [sy, sx], [uy, ux], center = bounding_box(\n form=\"rectangle\", center=center, radii=radius,\n shape=self.shape, step=step)\n\n if (sx.start >= self.shape[1] or\n sx.stop < 0 or\n sx.start == sx.stop or\n sy.start >= self.shape[0] or\n sy.stop < 0 or\n sy.start == sy.stop):\n raise ValueError('Sub-image boundaries are outside the cube: '\n 'center: {}, shape: {}, size: {}'\n .format(center, self.shape, size))\n\n # Require that the image be at least minsize x minsize pixels.\n if (sy.stop - sy.start + 1) < minsize[0] or \\\n (sx.stop - sx.start + 1) < minsize[1]:\n # Should we raise an exception instead ?\n self.logger.warning('extracted image is too small')\n return\n\n # Extract the requested part of the image.\n res = self[sy, sx]\n\n # If the image region was not clipped at the edges of the parent cube,\n # then return the subcube.\n if sy == uy and sx == ux:\n return res\n\n # Since the subimage is smaller than requested, due to clipping,\n # create new data and variance arrays of the required size.\n shape = (uy.stop - uy.start, ux.stop - ux.start)\n data = np.zeros(shape, dtype=self.data.dtype)\n if self._var is None:\n var = None\n else:\n var = np.zeros(shape)\n\n # If no mask is currently in use, start with every pixel of\n # the new array filled with nans. Otherwise create a mask that\n # initially flags all pixels.\n if self._mask is ma.nomask:\n mask = ma.nomask\n data[:] = (np.nan if self.dtype.kind == 'f'\n else self.data.fill_value)\n if var is not None:\n var[:] = np.nan\n else:\n mask = np.ones(shape, dtype=bool)\n\n # Calculate the slices where the clipped subcube should go in\n # the new arrays.\n slices = (slice(sy.start - uy.start, sy.stop - uy.start),\n slice(sx.start - ux.start, sx.stop - ux.start))\n\n # Copy the clipped subcube into unclipped arrays.\n data[slices] = res._data[:]\n if var is not None:\n var[slices] = res._var[:]\n if (mask is not None) and (mask is not ma.nomask):\n mask[slices] = res._mask[:]\n\n # Create a new WCS object for the unclipped subcube.\n wcs = res.wcs\n wcs.set_crpix1(wcs.wcs.wcs.crpix[0] + slices[1].start)\n wcs.set_crpix2(wcs.wcs.wcs.crpix[1] + slices[0].start)\n wcs.naxis1 = shape[1]\n wcs.naxis2 = shape[0]\n\n # Create the new unclipped sub-cube.\n return Image(wcs=wcs, unit=self.unit, copy=False,\n data=data, var=var, mask=mask,\n data_header=fits.Header(self.data_header),\n primary_header=fits.Header(self.primary_header),\n filename=self.filename)\n\n def _rotate(self, theta=0.0, interp='no', reshape=False, order=1,\n pivot=None, unit=u.deg, regrid=None, flux=False, cutoff=0.25):\n\n # In general it isn't possible to both anchor a point in the\n # image while reshaping the image so that it fits.\n if reshape and pivot is not None:\n raise ValueError(\"The pivot and reshape options can't be combined\")\n\n # Turn off the sampling filter when orders of less than 2 are selected.\n prefilter = order > 1\n\n # Convert the rotation angle to radians.\n angle = (theta * unit).to(u.rad).value\n\n # Get the current rotation angle of the image in radians.\n oldrot = self.wcs.get_rot(unit=u.rad)\n\n # Where should north end up after we have rotated the image?\n newrot = oldrot + angle\n\n # Get the current pixel size.\n oldinc = self.wcs.get_axis_increments()\n\n # If no value has been specified for the regrid option, regrid\n # unless asked not to reshape the array.\n if regrid is None:\n regrid = reshape\n\n # Have we been asked to adjust pixel dimensions to avoid undersampling\n # and oversampling?\n if regrid:\n\n # Determine the spatial frequencies that need to be sampled\n # along the rotated Y and X axes.\n newfmax = self.get_spatial_fmax(np.rad2deg(newrot))\n\n # Calculate the pixel increments along the X and Y axes\n # that will be needed to adequately sample these\n # frequencies.\n newinc = 0.5 / newfmax * np.sign(oldinc)\n\n else:\n # Keep pixel sizes fixed?\n newinc = oldinc\n\n # Get the coordinate reference pixel of the input image,\n # arranged as a column vector in python (Y,X) order. Note that\n # crpix contains FITS pixel indexes which are 1 greater than\n # the corresponding python pixel indexes.\n oldcrpix = np.array([[self.wcs.get_crpix2() - 1],\n [self.wcs.get_crpix1() - 1]])\n\n # Create a similar matrix that would scale a column vector in\n # (X,Y) axis order by the rotated X-axis and Y-axis pixel\n # increments.\n newscale = np.array([[newinc[1], 0.0],\n [0.0, newinc[0]]])\n\n # Get the current WCS coordinate transformation matrix (which\n # transforms pixel coordinates to intermediate sky\n # coordinates).\n oldcd = self.wcs.get_cd()\n\n # Create a rotation matrix that multiplies the sky by the\n # above angle.\n sinq = np.sin(newrot)\n cosq = np.cos(newrot)\n sky_mrot = np.array([[cosq, -sinq],\n [sinq, cosq]])\n\n # Compute the coordinate transformation matrix that will\n # pertain to the output image. We can interpolate to any grid,\n # so take the opportunity to zero any shear terms that were in\n # the original CD matrix, and just create a CD matrix that\n # rotates and scales the sky.\n newcd = np.dot(sky_mrot, newscale)\n\n # To fill the pixels of the output image we need a coordinate\n # transformation matrix to transform pixel indexes of the\n # rotated image back to pixel indexes of the input image. To\n # do this, we apply the new CD matrix to convert the rotated\n # indexes to intermediate sky coordinates, then apply the\n # inverse of the old CD matrix, to convert these back to\n # indexes of the original image.\n wcs_remap = np.dot(np.linalg.inv(oldcd), newcd)\n\n # The above matrix was computed from the WCS CD matrix, which\n # is designed to multiply a column vector in FORTRAN (X,Y)\n # axis order. Rearrange it to the equivalent matrix for\n # multiplying a column vector in python (Y,X) axis order.\n new2old = np.array([[wcs_remap[1, 1], wcs_remap[1, 0]],\n [wcs_remap[0, 1], wcs_remap[0, 0]]])\n\n # Also compute the inverse of this, so that we can convert\n # from input image indexes to rotated image indexes.\n old2new = np.linalg.inv(new2old)\n\n # Have we been asked to reshape the image array to just encompass\n # the rotated image?\n if reshape:\n\n # Determine where the corners of the input image end up in the\n # output image with CRPIX set to [0,0].\n corners = np.array(\n [[0, 0, self.shape[0] - 1, self.shape[0] - 1], # Y indexes\n [0, self.shape[1] - 1, 0, self.shape[1] - 1]], # X indexes\n dtype=float)\n pix = np.dot(old2new, (corners - oldcrpix))\n\n # Get the ranges of indexes occupied by the input image in the\n # rotated image.\n ymin = min(pix[0, :])\n ymax = max(pix[0, :])\n xmin = min(pix[1, :])\n xmax = max(pix[1, :])\n\n # Calculate the indexes of the coordinate reference pixel of\n # the rotated image, such that pixel [xmin,ymin] is moved to\n # array index [0,0]. Use (Y,X) axis ordering.\n newcrpix = np.array([[-ymin], [-xmin]])\n\n # Calculate the dimensions of the output image in (Y,X) order.\n # The dimensions are ymax-ymin+1 rounded up, and xmax-xmin+1\n # rounded up.\n newdims = np.array([int(ymax - ymin + 1.5),\n int(xmax - xmin + 1.5)])\n\n # If not asked to reshape the image array, keep the image\n # dimensions the same, and choose the reference pixel such\n # that the rotation appears to occur around a specified pixel,\n # or the central pixel of the image.\n else:\n newdims = np.asarray(self.shape)\n\n # If no pivot pixel has been specified, substitute the\n # central pixel of the input image.\n if pivot is None:\n pivot = np.asarray(self.shape, dtype=float) / 2.0\n else:\n pivot = np.asarray(pivot, dtype=float)\n\n # Convert the pivot indexes to a column vector.\n pivot = pivot[np.newaxis, :].T\n\n # If the new coordinate reference pixel were zero, where\n # would the pivot pixel end up if we rotated the image\n # around oldcrpix?\n pix = np.dot(old2new, (pivot - oldcrpix))\n\n # Calculate the indexes of the coordinate reference pixel of\n # the rotated image, such that pixel pix is moved to\n # pivot. Use (Y,X) axis ordering.\n newcrpix = pivot - pix\n\n # The affine_transform() function calculates the pixel index\n # of the input image that corresponds to a given pixel index\n # of the rotated image, as follows:\n #\n # oldpixel = new2old * newpixel + offset\n #\n # The coordinate reference pixels of the rotated and input\n # images must refer to the same position on the sky, so:\n #\n # oldcrpix = new2old * newcrpix + offset\n #\n # Thus the value of 'offset' has to be:\n #\n # offset = oldcrpix - new2old * newcrpix\n\n offset = oldcrpix - np.dot(new2old, newcrpix)\n\n # Get a copy of the current image array with masked values filled.\n newdata = self._prepare_data(interp)\n\n # For each pixel of the rotated image, use the new2old affine\n # transformation matrix to determine where that pixel\n # originates in the input image, then interpolate a value from\n # the pixels of the input image surrounding that point.\n newdata = affine_transform(newdata, matrix=new2old,\n offset=offset.flatten(), cval=0.0,\n output_shape=newdims, output=float,\n order=order, prefilter=prefilter)\n\n # Zero the current data array and then fill its masked pixels\n # with floating point 1.0s, so that we can rotate this in the\n # the same way as the data to see where the masked areas end up.\n self._data[:, :] = 0.0\n newmask = np.ma.filled(self.data, 1.0)\n\n # Rotate the array of 1s that represent masked pixels, and fill\n # corners that weren't mapped from the input array with 1s, so\n # that we end up flagging them too.\n newmask = affine_transform(newmask, matrix=new2old,\n offset=offset.flatten(), cval=1.0,\n output_shape=newdims, output=float,\n order=order, prefilter=prefilter)\n\n # Create a new boolean mask in which all pixels that had an\n # integrated contribution of more than 'cutoff' originally\n # masked pixels are masked. Note that setting the cutoff to\n # the \"obvious\" value of zero results in lots of pixels being\n # masked that are far away from any masked pixels, due to\n # precision errors in the affine_transform() function.\n # Limit the minimum value of the cutoff to avoid this.\n newmask = np.greater(newmask, max(cutoff, 1e-6))\n\n # If the image has an associated array of variances, rotate it too.\n if self._var is not None:\n newvar = affine_transform(self._var, matrix=new2old,\n offset=offset.flatten(), cval=0.0,\n output_shape=newdims, output=float,\n order=order, prefilter=prefilter)\n else:\n newvar = None\n\n # Compute the number of old pixel areas per new pixel, if the\n # pixel dimensions have been changed.\n if regrid:\n n = newinc.prod() / oldinc.prod()\n\n # Scale the flux per pixel by the multiplicative increase in the\n # area of a pixel?\n if flux:\n\n # Scale the pixel fluxes by the increase in the area.\n newdata *= n\n\n # Each output pixel is an interpolation between the\n # nearest neighboring pixels, so the variance is unchanged\n # by resampling. Scaling the pixel values by n, however,\n # increases the variances by n**2.\n if newvar is not None:\n newvar *= n**2\n\n # Install the rotated data array, mask and variances.\n self._data = newdata\n self._mask = newmask\n self._var = newvar\n\n # Install the new world-coordinate transformation matrix, along\n # with the new reference pixel.\n self.wcs.set_cd(newcd)\n self.wcs.naxis1 = newdims[1]\n self.wcs.naxis2 = newdims[0]\n\n # Record the new value of the coordinate reference pixel,\n # being careful to convert from python 0-relative pixel\n # indexes to FITS 1-relative pixel indexes.\n self.wcs.set_crpix1(newcrpix[1] + 1)\n self.wcs.set_crpix2(newcrpix[0] + 1)\n\n # If allowed to reshape the array, crop away any entirely\n # masked margins.\n if reshape:\n self.crop()\n\n def rotate(self, theta=0.0, interp='no', reshape=False, order=1,\n pivot=None, unit=u.deg, regrid=None, flux=False, cutoff=0.25,\n inplace=False):\n \"\"\"Rotate the sky within an image in the sense of a rotation from\n north to east.\n\n For example if the image rotation angle that is currently\n returned by image.get_rot() is zero, image.rotate(10.0) will\n rotate the northward direction of the image 10 degrees\n eastward of where it was, and self.get_rot() will thereafter\n return 10.0.\n\n Uses `scipy.ndimage.affine_transform`.\n\n Parameters\n ----------\n theta : float\n The angle to rotate the image (degrees). Positive\n angles rotate features in the image in the sense of a\n rotation from north to east.\n interp : 'no' | 'linear' | 'spline'\n If 'no', replace masked data with the median value of the\n image. This is the default.\n If 'linear', replace masked values using a linear\n interpolation between neighboring values.\n if 'spline', replace masked values using a spline\n interpolation between neighboring values.\n reshape : bool\n If True, the size of the output image array is adjusted\n so that the input image is contained completely in the\n output. The default is False.\n order : int\n The order of the prefilter that is applied by the affine\n transform function. Prefiltering is not really needed for\n band-limited images, but this option is retained for\n backwards compatibility with an older version of the\n image.rotate method. In general orders > 1 tend to\n generate ringing at sharp edges, such as those of CCD\n saturation spikes, so this argument is best left with\n its default value of 1.\n pivot : float,float or None\n When the reshape option is True, or the pivot argument is\n None, the image is rotated around its center.\n Alternatively, when the reshape option is False, the pivot\n argument can be used to indicate which pixel index [y,x]\n the image will be rotated around. Integer pixel indexes\n specify the centers of pixels. Non-integer values can be\n used to indicate positions between pixel centers.\n\n On the sky, the rotation always occurs around the\n coordinate reference position of the observation. However\n the rotated sky is then mapped onto the pixel array of the\n image in such a way as to keep the sky position of the\n pivot pixel at the same place. This makes the image appear\n to rotate around that pixel.\n unit : `astropy.units.Unit`\n The angular units of the rotation angle, theta.\n regrid : bool\n When this option is True, the pixel sizes along each axis\n are adjusted to avoid undersampling or oversampling any\n direction in the original image that would otherwise be\n rotated onto a lower or higher resolution axis. This is\n particularly important for images whose pixels have\n different angular dimensions along the X and Y axes, but\n it can also be important for images with square pixels,\n because the diagonal of an image with square pixels has\n higher resolution than the axes of that image.\n\n If this option is left with its default value of None,\n then it is given the value of the reshape option.\n flux : bool\n This tells the function whether the pixel units of the\n image are flux densities (flux=True), such as\n erg/s/cm2/Hz, or whether they are per-steradian brightness\n units (flux=False), such as erg/s/cm2/Hz/steradian. It\n needs to know this when it changes the pixel size, because\n when pixel sizes change, resampled flux densities need to\n be corrected for the change in the area per pixel, where\n resampled brightnesses don't.\n cutoff : float\n Mask each output pixel where at least this fraction of the\n pixel was interpolated from dummy values given to masked\n input pixels.\n inplace : bool\n If False, return a rotated copy of the image (the default).\n If True, rotate the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n\n res = self if inplace else self.copy()\n res._rotate(theta=theta, interp=interp, reshape=reshape, order=order,\n pivot=pivot, unit=unit, regrid=regrid, flux=flux,\n cutoff=cutoff)\n return res\n\n def norm(self, typ='flux', value=1.0):\n \"\"\"Normalize in place total flux to value (default 1).\n\n Parameters\n ----------\n type : 'flux' | 'sum' | 'max'\n If 'flux',the flux is normalized and\n the pixel area is taken into account.\n\n If 'sum', the flux is normalized to the sum\n of flux independantly of pixel size.\n\n If 'max', the flux is normalized so that\n the maximum of intensity will be 'value'.\n value : float\n Normalized value (default 1).\n \"\"\"\n if typ == 'flux':\n norm = value / (self.get_step().prod() * self.data.sum())\n elif typ == 'sum':\n norm = value / self.data.sum()\n elif typ == 'max':\n norm = value / self.data.max()\n else:\n raise ValueError('Error in type: only flux,sum,max permitted')\n self._data *= norm\n if self._var is not None:\n self._var *= (norm * norm)\n\n def background(self, niter=3, sigma=3.0):\n \"\"\"Compute the image background with sigma-clipping.\n\n Returns the background value and its standard deviation.\n\n Parameters\n ----------\n niter : int\n Number of iterations.\n sigma : float\n Number of sigma used for the clipping.\n\n Returns\n -------\n out : 2-dim float array\n \"\"\"\n tab = self.data.compressed()\n\n for n in range(niter + 1):\n tab = tab[tab <= (tab.mean() + sigma * tab.std())]\n return tab.mean(), tab.std()\n\n def peak_detection(self, nstruct, niter, threshold=None):\n \"\"\"Return a list of peak locations.\n\n Parameters\n ----------\n nstruct : int\n Size of the structuring element used for the erosion.\n niter : int\n Number of iterations used for the erosion and the dilatation.\n threshold : float\n Threshold value. If None, it is initialized with background value.\n\n Returns\n -------\n out : np.array\n\n \"\"\"\n if threshold is None:\n background, std = self.background()\n threshold = background + 10 * std\n\n def _struct(n):\n struct = np.zeros([n, n])\n for i in range(0, n):\n dist = abs(i - (n // 2))\n struct[i][dist: abs(n - dist)] = 1\n return struct\n\n selec = self.data > threshold\n selec.fill_value = False\n struct = _struct(nstruct)\n selec = ndi.binary_erosion(selec, structure=struct, iterations=niter)\n selec = ndi.binary_dilation(selec, structure=struct, iterations=niter)\n selec = ndi.binary_fill_holes(selec)\n structure = ndi.generate_binary_structure(2, 2)\n label = ndi.measurements.label(selec, structure)\n pos = ndi.measurements.center_of_mass(self.data, label[0],\n np.arange(label[1]) + 1)\n return np.array(pos)\n\n def peak(self, center=None, radius=0, unit_center=u.deg,\n unit_radius=u.arcsec, dpix=2, background=None, plot=False):\n \"\"\"Find image peak location.\n\n Used `scipy.ndimage.measurements.maximum_position` and\n `scipy.ndimage.measurements.center_of_mass`.\n\n Parameters\n ----------\n center : (float,float)\n Center (y,x) of the explored region.\n If center is None, the full image is explored.\n radius : float or (float,float)\n Radius defined the explored region.\n unit_center : `astropy.units.Unit`\n Type of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_radius : `astropy.units.Unit`\n Radius unit.\n Arcseconds by default (use None for radius in pixels)\n dpix : int\n Half size of the window (in pixels) to compute the center of\n gravity.\n background : float\n Background value. If None, it is computed.\n plot : bool\n If True, the peak center is overplotted on the image.\n\n Returns\n -------\n out : dict {'y', 'x', 'p', 'q', 'data'}\n Containing the peak position and the peak intensity.\n\n \"\"\"\n if center is None or radius == 0:\n d = self.data\n imin = 0\n jmin = 0\n else:\n if is_number(radius):\n radius = (radius, radius)\n\n if unit_center is not None:\n center = self.wcs.sky2pix(center, unit=unit_center)[0]\n if unit_radius is not None:\n radius = radius / self.wcs.get_step(unit=unit_radius)\n\n imin = max(0, int(center[0] - radius[0]))\n imax = min(self.shape[0], int(center[0] + radius[0] + 1))\n jmin = max(0, int(center[1] - radius[1]))\n jmax = min(self.shape[1], int(center[1] + radius[1] + 1))\n\n d = self.data[imin:imax, jmin:jmax]\n if np.shape(d)[0] == 0 or np.shape(d)[1] == 0:\n raise ValueError('Coord area outside image limits')\n\n ic, jc = ndi.measurements.maximum_position(d)\n if dpix == 0:\n di = 0\n dj = 0\n else:\n if background is None:\n background = self.background()[0]\n di, dj = ndi.measurements.center_of_mass(\n d[max(0, ic - dpix):ic + dpix + 1,\n max(0, jc - dpix):jc + dpix + 1] - background)\n ic = imin + max(0, ic - dpix) + di\n jc = jmin + max(0, jc - dpix) + dj\n \n # WIP (to solve problem)\n iic, jjc = int(round(ic)), int(round(jc))\n if (iic < 0) or (jjc < 0) or (iic >= self.data.shape[0]) or (jjc >= self.data.shape[1]):\n return None\n \n [[dec, ra]] = self.wcs.pix2sky([[ic, jc]])\n maxv = self.data[int(round(ic)), int(round(jc))]\n if plot:\n self._ax.plot(jc, ic, 'r+')\n try:\n _str = 'center (%g,%g) radius (%g,%g) dpix %i peak: %g %g' % \\\n (center[0], center[1], radius[0], radius[1], dpix, jc, ic)\n except Exception:\n _str = 'dpix %i peak: %g %g' % (dpix, ic, jc)\n self._ax.title(_str)\n\n return {'x': ra, 'y': dec, 'p': ic, 'q': jc, 'data': maxv}\n\n def fwhm(self, center=None, radius=0, unit_center=u.deg,\n unit_radius=u.arcsec):\n \"\"\"Compute the fwhm.\n\n Parameters\n ----------\n center : (float,float)\n Center of the explored region.\n If center is None, the full image is explored.\n radius : float or (float,float)\n Radius defined the explored region.\n unit_center : `astropy.units.Unit`\n type of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_radius : `astropy.units.Unit`\n Radius unit. Arcseconds by default (use None for radius in pixels)\n\n Returns\n -------\n out : array of float\n [fwhm_y,fwhm_x], returned in unit_radius (arcseconds by default).\n\n \"\"\"\n if center is None or radius == 0:\n img = self\n else:\n size = ((radius * 2, radius * 2) if is_number(radius)\n else (radius[0] * 2, radius[1] * 2))\n img = self.subimage(center, size, unit_center=unit_center,\n unit_size=unit_radius)\n\n width = img.moments(unit=unit_radius)\n return width / 2 * gaussian_sigma_to_fwhm\n\n def ee(self, center=None, radius=0, unit_center=u.deg,\n unit_radius=u.arcsec, frac=False, cont=0):\n \"\"\"Compute ensquared/encircled energy.\n\n Parameters\n ----------\n center : (float,float)\n Center of the explored region.\n If center is None, the full image is explored.\n radius : float or (float,float)\n Radius defined the explored region.\n If float, it defined a circular region (encircled energy).\n If (float,float), it defined a rectangular region (ensquared\n energy).\n unit_center : `astropy.units.Unit`\n Type of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_radius : `astropy.units.Unit`\n Radius unit. Arcseconds by default (use None for radius in pixels)\n frac : bool\n If frac is True, result is given relative to the total energy of\n the full image.\n cont : float\n Continuum value.\n\n Returns\n -------\n out : float\n Ensquared/encircled flux.\n\n \"\"\"\n if center is None or radius == 0:\n if frac:\n return 1.\n else:\n return (self.data - cont).sum()\n else:\n if is_number(radius):\n circular = True\n radius2 = radius * radius\n radius = (radius, radius)\n else:\n circular = False\n\n if unit_center is not None:\n center = self.wcs.sky2pix(center, unit=unit_center)[0]\n if unit_radius is not None:\n radius = radius / self.wcs.get_step(unit=unit_radius)\n radius2 = radius[0] * radius[1]\n\n imin = max(0, center[0] - radius[0])\n imax = min(center[0] + radius[0] + 1, self.shape[0])\n jmin = max(0, center[1] - radius[1])\n jmax = min(center[1] + radius[1] + 1, self.shape[1])\n ima = self[imin:imax, jmin:jmax]\n\n if circular:\n xaxis = np.arange(ima.shape[0], dtype=float) \\\n - ima.shape[0] / 2.\n yaxis = np.arange(ima.shape[1], dtype=float) \\\n - ima.shape[1] / 2.\n gridx = np.empty(ima.shape, dtype=float)\n gridy = np.empty(ima.shape, dtype=float)\n for j in range(ima.shape[1]):\n gridx[:, j] = xaxis\n for i in range(ima.shape[0]):\n gridy[i, :] = yaxis\n r2 = gridx * gridx + gridy * gridy\n ksel = np.where(r2 < radius2)\n if frac:\n return (ima.data[ksel] - cont).sum() \\\n / (self.data - cont).sum()\n else:\n return (ima.data[ksel] - cont).sum()\n else:\n if frac:\n return (ima.data - cont).sum() / (self.data - cont).sum()\n else:\n return (ima.data - cont).sum()\n\n def eer_curve(self, center=None, unit_center=u.deg, unit_radius=u.arcsec,\n etot=None, cont=0):\n \"\"\"Return containing enclosed energy as function of radius.\n\n The enclosed energy ratio (EER) shows how much light is concentrated\n within a certain radius around the image-center.\n\n\n Parameters\n ----------\n center : (float,float)\n Center of the explored region.\n If center is None, center of the image is used.\n unit_center : `astropy.units.Unit`\n Type of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_radius : `astropy.units.Unit`\n Radius units (arcseconds by default)/\n etot : float\n Total energy used to comute the ratio.\n If etot is not set, it is computed from the full image.\n cont : float\n Continuum value.\n\n Returns\n -------\n out : (float array, float array)\n Radius array, EER array\n \"\"\"\n if center is None:\n i = self.shape[0] // 2\n j = self.shape[1] // 2\n elif unit_center is None:\n i = center[0]\n j = center[1]\n else:\n pixcrd = self.wcs.sky2pix([center[0], center[1]],\n nearest=True, unit=unit_center)\n i = pixcrd[0][0]\n j = pixcrd[0][1]\n\n nmax = min(self.shape[0] - i, self.shape[1] - j, i, j)\n if etot is None:\n etot = (self.data - cont).sum()\n if nmax <= 1:\n raise ValueError('Coord area outside image limits')\n ee = np.empty(nmax)\n for d in range(0, nmax):\n ee[d] = (self.data[i - d:i + d + 1, j - d:j + d + 1] - cont).sum() / etot\n\n radius = np.arange(0, nmax)\n if unit_radius is not None:\n step = np.mean(self.get_step(unit=unit_radius))\n radius = radius * step\n\n return radius, ee\n\n def ee_size(self, center=None, unit_center=u.deg, etot=None, frac=0.9,\n cont=0, unit_size=u.arcsec):\n \"\"\"Compute the size of the square centered on (y,x) containing the\n fraction of the energy.\n\n Parameters\n ----------\n center : (float,float)\n Center (y,x) of the explored region.\n If center is None, center of the image is used.\n unit : `astropy.units.Unit`\n Type of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n etot : float\n Total energy used to comute the ratio.\n If etot is not set, it is computed from the full image.\n frac : float in ]0,1]\n Fraction of energy.\n cont : float\n continuum value\n unit_center : `astropy.units.Unit`\n Type of the center coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_size : `astropy.units.Unit`\n Size unit. Arcseconds by default (use None for sier in pixels).\n\n Returns\n -------\n out : float array\n \"\"\"\n if center is None:\n i = self.shape[0] // 2\n j = self.shape[1] // 2\n elif unit_center is None:\n i = center[0]\n j = center[1]\n else:\n pixcrd = self.wcs.sky2pix([[center[0], center[1]]],\n unit=unit_center)\n i = int(pixcrd[0][0] + 0.5)\n j = int(pixcrd[0][1] + 0.5)\n\n nmax = min(self.shape[0] - i, self.shape[1] - j, i, j)\n if etot is None:\n etot = (self.data - cont).sum()\n\n if nmax <= 1:\n if unit_size is None:\n return np.array([1, 1])\n else:\n return self.get_step(unit_size)\n for d in range(1, nmax):\n ee2 = (self.data[i - d:i + d + 1, j - d:j + d + 1] - cont).sum() / etot\n if ee2 > frac:\n break\n d -= 1\n ee1 = (self.data[i - d:i + d + 1, i - d:i + d + 1] - cont).sum() / etot\n d += (frac - ee1) / (ee2 - ee1) # interpolate\n d *= 2\n if unit_size is None:\n return np.array([d, d])\n else:\n step = self.get_step(unit_size)\n return np.array([d * step[0], d * step[1]])\n\n def _interp(self, grid, spline=False):\n \"\"\"Return the interpolated values corresponding to the grid points.\n\n Parameters\n ----------\n grid :\n pixel values\n spline : bool\n If False, linear interpolation (uses\n `scipy.interpolate.griddata`), or if True: spline\n interpolation (uses `scipy.interpolate.bisplrep` and\n `scipy.interpolate.bisplev`).\n\n \"\"\"\n if self.mask is np.ma.nomask:\n x, y = np.mgrid[:self.shape[0], :self.shape[1]].reshape(2, -1)\n data = self._data\n else:\n x, y = np.where(~self._mask)\n data = self._data[x, y]\n\n grid = np.array(grid)\n\n if spline:\n if self.var is not None:\n var = self.var.filled(np.inf)\n weight = 1 / np.sqrt(np.abs(var[x, y]))\n else:\n weight = None\n\n tck = interpolate.bisplrep(x, y, data, w=weight)\n res = interpolate.bisplev(grid[0], grid[1], tck)\n return res\n else:\n # FIXME - check if this is still needed :\n # scipy 0.9 griddata - interpolate.interp2d segfaults when there\n # are too many data points\n # f = interpolate.interp2d(x, y, data)\n res = interpolate.griddata((x, y), data, grid.T, method='linear')\n return res\n\n def _interp_data(self, spline=False):\n \"\"\"Return data array with interpolated values for masked pixels.\n\n Parameters\n ----------\n spline : bool\n False: bilinear interpolation (it uses\n `scipy.interpolate.griddata`), True: spline interpolation (it\n uses `scipy.interpolate.bisplrep` and\n `scipy.interpolate.bisplev`).\n\n \"\"\"\n if not self._mask.any():\n return self._data\n else:\n ksel = np.where(self._mask)\n data = self._data.__copy__()\n data[ksel] = self._interp(ksel, spline)\n return data\n\n def _prepare_data(self, interp='no'):\n \"\"\"Return a copy of the data array in which masked values\n have been filled, either with the median value of the image,\n or by interpolating neighboring pixels.\n\n Parameters\n ----------\n interp : 'no' | 'linear' | 'spline'\n If 'no', replace masked data with the median image value.\n If 'linear', replace masked values using a linear\n interpolation between neighboring values.\n if 'spline', replace masked values using a spline\n interpolation between neighboring values.\n\n Returns\n -------\n out : numpy.ndarray\n A patched copy of the data array.\n\n \"\"\"\n\n if interp == 'linear':\n data = self._interp_data(spline=False)\n elif interp == 'spline':\n data = self._interp_data(spline=True)\n else:\n data = np.ma.filled(self.data, np.ma.median(self.data))\n return data\n\n def moments(self, unit=u.arcsec):\n \"\"\"Return [width_y, width_x] first moments of the 2D gaussian.\n\n Parameters\n ----------\n unit : `astropy.units.Unit`\n Unit of the returned moments (arcseconds by default).\n If None, moments will be in pixels.\n\n Returns\n -------\n out : float array\n\n \"\"\"\n total = np.abs(self.data).sum()\n P, Q = np.indices(self.data.shape)\n # python convention: reverse x,y numpy.indices\n p = np.argmax((Q * np.abs(self.data)).sum(axis=1) / total)\n q = np.argmax((P * np.abs(self.data)).sum(axis=0) / total)\n\n # FIXME: check from where does this formula comes. Should be equivalent\n # to scipy.stats.moment(..., moment=2) ??\n col = self.data[int(p), :]\n width_q = np.sqrt(np.abs((np.arange(col.size) - p) * col).sum() /\n np.abs(col).sum())\n row = self.data[:, int(q)]\n width_p = np.sqrt(np.abs((np.arange(row.size) - q) * row).sum() /\n np.abs(row).sum())\n mom = np.array([width_p, width_q])\n if unit is not None:\n mom *= self.wcs.get_step(unit=unit)\n return mom\n\n def _prepare_fit_parameters(self, pos_min, pos_max, weight=True,\n center=None, unit_center=u.deg,\n fwhm=None, unit_fwhm=u.arcsec):\n pmin, qmin = 0, 0\n pmax, qmax = self.shape\n\n if unit_center is None:\n if pos_min is not None:\n pmin, qmin = pos_min\n if pos_max is not None:\n pmax, qmax = pos_max\n else:\n if pos_min is not None:\n pmin, qmin = self.wcs.sky2pix(pos_min, unit=unit_center,\n nearest=True)[0]\n if pos_max is not None:\n pmax, qmax = self.wcs.sky2pix(pos_max, unit=unit_center,\n nearest=True)[0]\n if pmin > pmax:\n pmin, pmax = pmax, pmin\n if qmin > qmax:\n qmin, qmax = qmax, qmin\n\n pmin = int(max(0, pmin))\n qmin = int(max(0, qmin))\n pmax = int(pmax)\n qmax = int(qmax)\n ima = self[pmin:pmax, qmin:qmax]\n\n N = ima.data.count()\n if N == 0:\n raise ValueError('empty sub-image')\n data = ima.data.compressed()\n p, q = np.where(~ima._mask)\n\n # weight\n if ima.var is not None and weight:\n wght = 1.0 / np.sqrt(np.abs(ima.var[p, q].filled(np.inf)))\n else:\n wght = np.ones(N)\n\n # initial gaussian peak position\n if center is None:\n imax = data.argmax()\n center = np.array([p[imax], q[imax]])\n elif unit_center is not None:\n center = ima.wcs.sky2pix(center, unit=unit_center)[0]\n else:\n center = np.array(center)\n center[0] -= pmin\n center[1] -= qmin\n\n # initial moment value\n if fwhm is None:\n width = ima.moments(unit=None)\n fwhm = width * gaussian_sigma_to_fwhm\n else:\n fwhm = np.asarray(fwhm) / self.wcs.get_step(unit=unit_fwhm)\n\n return ima, pmin, pmax, qmin, qmax, data, wght, p, q, center, fwhm\n\n def gauss_fit(self, pos_min=None, pos_max=None, center=None, flux=None,\n fwhm=None, circular=False, cont=0, fit_back=True, rot=0,\n peak=False, factor=1, weight=True, plot=False,\n unit_center=u.deg, unit_fwhm=u.arcsec, maxiter=0,\n verbose=True, full_output=0):\n \"\"\"Perform Gaussian fit on image.\n\n Parameters\n ----------\n pos_min : (float,float)\n Minimum y and x values. Their unit is given by the unit_center\n parameter (degrees by default).\n pos_max : (float,float)\n Maximum y and x values. Their unit is given by the unit_center\n parameter (degrees by default).\n center : (float,float)\n Initial gaussian center (y_peak,x_peak) If None it is estimated.\n The unit is given by the unit_center parameter (degrees by\n default).\n flux : float\n Initial integrated gaussian flux or gaussian peak value if peak is\n True. If None, peak value is estimated.\n fwhm : (float,float)\n Initial gaussian fwhm (fwhm_y,fwhm_x). If None, they are estimated.\n The unit is given by ``unit_fwhm`` (arcseconds by default).\n circular : bool\n True: circular gaussian, False: elliptical gaussian\n cont : float\n continuum value, 0 by default.\n fit_back : bool\n False: continuum value is fixed,\n True: continuum value is a fit parameter.\n rot : float\n Initial rotation in degree.\n If None, rotation is fixed to 0.\n peak : bool\n If true, flux contains a gaussian peak value.\n factor : int\n If factor<=1, gaussian value is computed in the center of each\n pixel. If factor>1, for each pixel, gaussian value is the sum of\n the gaussian values on the factor*factor pixels divided by the\n pixel area.\n weight : bool\n If weight is True, the weight is computed as the inverse of\n variance.\n unit_center : `astropy.units.Unit`\n type of the center and position coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_fwhm : `astropy.units.Unit`\n FWHM unit. Arcseconds by default (use None for radius in pixels)\n maxiter : int\n The maximum number of iterations during the sum of square\n minimization.\n plot : bool\n If True, the gaussian is plotted.\n verbose : bool\n If True, the Gaussian parameters are printed at the end of the\n method.\n full_output : bool\n True-zero to return a `mpdaf.obj.Gauss2D` object containing\n the gauss image.\n\n Returns\n -------\n out : `mpdaf.obj.Gauss2D`\n\n \"\"\"\n ima, pmin, pmax, qmin, qmax, data, wght, p, q, center, fwhm = \\\n self._prepare_fit_parameters(\n pos_min, pos_max, weight=weight,\n center=center, unit_center=unit_center,\n fwhm=fwhm, unit_fwhm=unit_fwhm)\n\n # initial gaussian integrated flux\n if flux is None:\n peak = ima._data[int(center[0]), int(center[1])] - cont\n elif peak is True:\n peak = flux - cont\n\n N = len(p)\n width = fwhm * gaussian_fwhm_to_sigma\n flux = peak * np.sqrt(2 * np.pi * (width[0] ** 2)) \\\n * np.sqrt(2 * np.pi * (width[1] ** 2))\n\n if circular:\n rot = None\n if not fit_back:\n # 2d gaussian function\n gaussfit = lambda v, p, q: \\\n cont + v[0] * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-(p - v[1]) ** 2 / (2 * v[2] ** 2)) \\\n * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-(q - v[3]) ** 2 / (2 * v[2] ** 2))\n # inital guesses for Gaussian Fit\n v0 = [flux, center[0], width[0], center[1]]\n else:\n # 2d gaussian function\n gaussfit = lambda v, p, q: \\\n v[4] + v[0] * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-(p - v[1]) ** 2 / (2 * v[2] ** 2)) \\\n * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-(q - v[3]) ** 2 / (2 * v[2] ** 2))\n # inital guesses for Gaussian Fit\n v0 = [flux, center[0], width[0], center[1], cont]\n else:\n if not fit_back:\n if rot is None:\n # 2d gaussian function\n gaussfit = lambda v, p, q: \\\n cont + v[0] * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-(p - v[1]) ** 2 / (2 * v[2] ** 2)) \\\n * (1 / np.sqrt(2 * np.pi * (v[4] ** 2))) \\\n * np.exp(-(q - v[3]) ** 2 / (2 * v[4] ** 2))\n # inital guesses for Gaussian Fit\n v0 = [flux, center[0], width[0], center[1], width[1]]\n else:\n # rotation angle in rad\n rot = np.pi * rot / 180.0\n # 2d gaussian function\n gaussfit = lambda v, p, q: \\\n cont + v[0] * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-((p - v[1]) * np.cos(v[5])\n - (q - v[3]) * np.sin(v[5])) ** 2\n / (2 * v[2] ** 2)) \\\n * (1 / np.sqrt(2 * np.pi * (v[4] ** 2))) \\\n * np.exp(-((p - v[1]) * np.sin(v[5])\n + (q - v[3]) * np.cos(v[5])) ** 2\n / (2 * v[4] ** 2))\n # inital guesses for Gaussian Fit\n v0 = [flux, center[0], width[0], center[1], width[1], rot]\n else:\n if rot is None:\n # 2d gaussian function\n gaussfit = lambda v, p, q: \\\n v[5] + v[0] * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-(p - v[1]) ** 2 / (2 * v[2] ** 2)) \\\n * (1 / np.sqrt(2 * np.pi * (v[4] ** 2))) \\\n * np.exp(-(q - v[3]) ** 2 / (2 * v[4] ** 2))\n # inital guesses for Gaussian Fit\n v0 = [flux, center[0], width[0], center[1],\n width[1], cont]\n else:\n # r otation angle in rad\n rot = np.pi * rot / 180.0\n # 2d gaussian function\n gaussfit = lambda v, p, q: \\\n v[6] + v[0] * (1 / np.sqrt(2 * np.pi * (v[2] ** 2))) \\\n * np.exp(-((p - v[1]) * np.cos(v[5])\n - (q - v[3]) * np.sin(v[5])) ** 2\n / (2 * v[2] ** 2)) \\\n * (1 / np.sqrt(2 * np.pi * (v[4] ** 2))) \\\n * np.exp(-((p - v[1]) * np.sin(v[5])\n + (q - v[3]) * np.cos(v[5])) ** 2\n / (2 * v[4] ** 2))\n # inital guesses for Gaussian Fit\n v0 = [flux, center[0], width[0], center[1],\n width[1], rot, cont]\n\n # Minimize the sum of squares\n if factor > 1:\n factor = int(factor)\n deci = np.ones((factor, factor)) \\\n * np.arange(factor)[:, np.newaxis] \\\n / float(factor) + 1. / float(factor * 2) - 0.5\n fp = (p[:, np.newaxis] + deci.ravel()[np.newaxis, :]).ravel()\n fq = (q[:, np.newaxis] + deci.T.ravel()[np.newaxis, :]).ravel()\n pixcrd = np.array(list(zip(fp, fq)))\n\n e_gauss_fit = lambda v, p, q, data, w: \\\n w * (((gaussfit(v, p, q)).reshape(N, factor * factor).sum(1)\n / factor / factor).T.ravel() - data)\n v, covar, info, mesg, success = \\\n leastsq(e_gauss_fit, v0[:],\n args=(pixcrd[:, 0], pixcrd[:, 1], data, wght),\n maxfev=maxiter, full_output=1)\n else:\n e_gauss_fit = lambda v, p, q, data, w: \\\n w * (gaussfit(v, p, q) - data)\n v, covar, info, mesg, success = \\\n leastsq(e_gauss_fit, v0[:], args=(p, q, data, wght),\n maxfev=maxiter, full_output=1)\n\n if success not in [1, 2, 3, 4]:\n self._logger.info(mesg)\n\n # calculate the errors from the estimated covariance matrix\n chisq = sum(info[\"fvec\"] * info[\"fvec\"])\n dof = len(info[\"fvec\"]) - len(v)\n if covar is not None:\n err = np.array([np.sqrt(np.abs(covar[i, i]))\n * np.sqrt(np.abs(chisq / dof))\n for i in range(len(v))])\n else:\n err = None\n\n # center in pixel in the input image\n v[1] += int(pmin)\n v[3] += int(qmin)\n\n # plot\n # ne fonctionne pas si colorbar\n if plot:\n pp = np.arange(pmin, pmax, float(pmax - pmin) / 100)\n qq = np.arange(qmin, qmax, float(qmax - qmin) / 100)\n ff = np.empty((np.shape(pp)[0], np.shape(qq)[0]))\n for i in range(np.shape(pp)[0]):\n ff[i, :] = gaussfit(v, pp[i], qq[:])\n self._ax.contour(qq, pp, ff, 5)\n\n # Gauss2D object in pixels\n flux = v[0]\n p_peak = v[1]\n q_peak = v[3]\n if circular:\n if fit_back:\n cont = v[4]\n p_width = np.abs(v[2])\n q_width = p_width\n rot = 0\n else:\n if fit_back:\n if rot is None:\n cont = v[5]\n else:\n cont = v[6]\n if rot is None:\n p_width = np.abs(v[2])\n q_width = np.abs(v[4])\n rot = 0\n else:\n if np.abs(v[2]) > np.abs(v[4]):\n p_width = np.abs(v[2])\n q_width = np.abs(v[4])\n rot = (v[5] * 180.0 / np.pi) % 180\n else:\n p_width = np.abs(v[4])\n q_width = np.abs(v[2])\n rot = (v[5] * 180.0 / np.pi + 90) % 180\n p_fwhm = p_width * gaussian_sigma_to_fwhm\n q_fwhm = q_width * gaussian_sigma_to_fwhm\n peak = flux / np.sqrt(2 * np.pi * (p_width ** 2)) \\\n / np.sqrt(2 * np.pi * (q_width ** 2))\n # error\n if err is not None:\n err_flux = err[0]\n err_p_peak = err[1]\n err_q_peak = err[3]\n if circular:\n if fit_back:\n err_cont = err[4]\n else:\n err_cont = 0\n err_p_width = np.abs(err[2])\n err_q_width = err_p_width\n err_rot = 0\n else:\n if fit_back:\n try:\n err_cont = err[6]\n except Exception:\n err_cont = err[5]\n else:\n err_cont = 0\n\n if np.abs(v[2]) > np.abs(v[4]) or rot == 0:\n err_p_width = np.abs(err[2])\n err_q_width = np.abs(err[4])\n else:\n err_p_width = np.abs(err[4])\n err_q_width = np.abs(err[2])\n\n try:\n err_rot = err[4] * 180.0 / np.pi\n except Exception:\n err_rot = 0\n err_p_fwhm = err_p_width * gaussian_sigma_to_fwhm\n err_q_fwhm = err_q_width * gaussian_sigma_to_fwhm\n err_peak = (err_flux * p_width * q_width - flux\n * (err_p_width * q_width + err_q_width * p_width)) \\\n / (2 * np.pi * p_width * p_width * q_width * q_width)\n else:\n err_flux = np.NAN\n err_p_peak = np.NAN\n err_p_width = np.NAN\n err_p_fwhm = np.NAN\n err_q_peak = np.NAN\n err_q_width = np.NAN\n err_q_fwhm = np.NAN\n err_rot = np.NAN\n err_peak = np.NAN\n err_cont = np.NAN\n\n if unit_center is not None:\n # Gauss2D object in degrees/arcseconds\n center = self.wcs.pix2sky([p_peak, q_peak], unit=unit_center)[0]\n\n err_center = np.array([err_p_peak, err_q_peak]) * \\\n self.wcs.get_step(unit=unit_center)\n else:\n center = (p_peak, q_peak)\n err_center = (err_p_peak, err_q_peak)\n\n step = self.wcs.get_step(unit=unit_fwhm)\n fwhm = np.array([p_fwhm, q_fwhm]) * step\n err_fwhm = np.array([err_p_fwhm, err_q_fwhm]) * step\n\n gauss = Gauss2D(center, flux, fwhm, cont, rot, peak, err_center,\n err_flux, err_fwhm, err_cont, err_rot, err_peak)\n\n if verbose:\n gauss.print_param()\n if full_output:\n ima = gauss_image(shape=self.shape, wcs=self.wcs, gauss=gauss,\n unit_center=unit_center, unit_fwhm=unit_fwhm)\n gauss.ima = ima\n return gauss\n\n def moffat_fit(self, pos_min=None, pos_max=None, center=None, fwhm=None,\n flux=None, n=2.0, circular=False, cont=0, fit_back=True,\n rot=0, peak=False, factor=1, weight=True, plot=False,\n unit_center=u.deg, unit_fwhm=u.arcsec,\n verbose=True, full_output=0, fit_n=True, maxiter=0):\n \"\"\"Perform moffat fit on image.\n\n Parameters\n ----------\n\n pos_min : (float,float)\n Minimum y and x values. Their unit is given by the unit_center\n parameter (degrees by default).\n pos_max : (float,float)\n Maximum y and x values. Their unit is given by the unit_center\n parameter (degrees by default).\n center : (float,float)\n Initial moffat center (y_peak,x_peak). If None it is estimated.\n The unit is given by the unit_center parameter (degrees by\n default).\n flux : float\n Initial integrated gaussian flux or gaussian peak value if peak is\n True. If None, peak value is estimated.\n fwhm : (float,float)\n Initial gaussian fwhm (fwhm_y,fwhm_x). If None, they are estimated.\n Their unit is given by the unit_fwhm parameter (arcseconds by\n default).\n n : int\n Initial atmospheric scattering coefficient.\n circular : bool\n True: circular moffat, False: elliptical moffat\n cont : float\n continuum value, 0 by default.\n fit_back : bool\n False: continuum value is fixed,\n True: continuum value is a fit parameter.\n rot : float\n Initial angle position in degree.\n peak : bool\n If true, flux contains a gaussian peak value.\n factor : int\n If factor<=1, gaussian is computed in the center of each pixel.\n If factor>1, for each pixel, gaussian value is the sum of the\n gaussian values on the factor*factor pixels divided by the pixel\n area.\n weight : bool\n If weight is True, the weight is computed as the inverse of\n variance.\n plot : bool\n If True, the gaussian is plotted.\n unit_center : `astropy.units.Unit`\n type of the center and position coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_fwhm : `astropy.units.Unit`\n FWHM unit. Arcseconds by default (use None for radius in pixels)\n full_output : bool\n True to return a `mpdaf.obj.Moffat2D` object containing the\n moffat image.\n fit_n : bool\n False: n value is fixed,\n True: n value is a fit parameter.\n maxiter : int\n The maximum number of iterations during the sum of square\n minimization.\n\n Returns\n -------\n out : `mpdaf.obj.Moffat2D`\n\n \"\"\"\n ima, pmin, pmax, qmin, qmax, data, wght, p, q, center, fwhm = \\\n self._prepare_fit_parameters(\n pos_min, pos_max, weight=weight,\n center=center, unit_center=unit_center,\n fwhm=fwhm, unit_fwhm=unit_fwhm)\n\n N = len(p)\n a = fwhm[0] / (2 * np.sqrt(2 ** (1.0 / n) - 1.0))\n e = fwhm[0] / fwhm[1]\n\n # initial gaussian integrated flux\n if flux is None:\n I = ima.data.data[int(center[0]), int(center[1])] - cont\n elif peak is True:\n I = flux - cont\n else:\n I = flux * (n - 1) / (np.pi * a * a * e)\n\n def moffat(c, x, y, amplitude, x_0, y_0, alpha, beta, e):\n \"\"\"Two dimensional Moffat model function\"\"\"\n rr_gg = (((x - x_0) / alpha) ** 2 + ((y - y_0) / alpha / e) ** 2)\n return c + amplitude * (1 + rr_gg) ** (-beta)\n\n # def ellpt_moffat(c, x, y, amplitude, x_0, y_0, alpha, beta, e, theta):\n # \"\"\"Two dimensional elliptical Moffat model function\"\"\"\n # cost = np.cos(theta)\n # sint = np.sin(theta)\n # xdiff = x - x_0\n # ydiff = y - y_0\n # rr_gg = (((xdiff * cost - ydiff * sint) / alpha) ** 2 +\n # ((xdiff * sint + ydiff * cost) / alpha / e) ** 2)\n # return c + amplitude * (1 + rr_gg) ** (-beta)\n\n if circular:\n rot = None\n if not fit_back:\n if fit_n:\n moffatfit = lambda v, p, q: moffat(\n cont, p, q, v[0], v[1], v[2], v[3], v[4], 1)\n v0 = [I, center[0], center[1], a, n]\n else:\n moffatfit = lambda v, p, q: moffat(\n cont, p, q, v[0], v[1], v[2], v[3], n, 1)\n v0 = [I, center[0], center[1], a]\n else:\n # 2d moffat function\n if fit_n:\n moffatfit = lambda v, p, q: moffat(\n v[5], p, q, v[0], v[1], v[2], v[3], v[4], 1)\n v0 = [I, center[0], center[1], a, n, cont]\n else:\n moffatfit = lambda v, p, q: moffat(\n v[4], p, q, v[0], v[1], v[2], v[3], n, 1)\n v0 = [I, center[0], center[1], a, cont]\n else:\n if not fit_back:\n if rot is None:\n if fit_n:\n moffatfit = lambda v, p, q: moffat(\n cont, p, q, v[0], v[1], v[2], v[3], v[4], v[5])\n v0 = [I, center[0], center[1], a, n, e]\n else:\n moffatfit = lambda v, p, q: moffat(\n cont, p, q, v[0], v[1], v[2], v[3], n, v[5])\n v0 = [I, center[0], center[1], a, e]\n else:\n # rotation angle in rad\n rot = np.pi * rot / 180.0\n if fit_n:\n # 2d moffat function\n moffatfit = lambda v, p, q: cont + v[0] \\\n * (1 + (((p - v[1]) * np.cos(v[6]) - (q - v[2])\n * np.sin(v[6])) / v[3]) ** 2\n + (((p - v[1]) * np.sin(v[6]) + (q - v[2])\n * np.cos(v[6])) / v[3] / v[5]) ** 2) ** (-v[4])\n # inital guesses\n v0 = [I, center[0], center[1], a, n, e, rot]\n else:\n # 2d moffat function\n moffatfit = lambda v, p, q: cont + v[0] \\\n * (1 + (((p - v[1]) * np.cos(v[5]) - (q - v[2])\n * np.sin(v[5])) / v[3]) ** 2\n + (((p - v[1]) * np.sin(v[5]) + (q - v[2])\n * np.cos(v[5])) / v[3] / v[4]) ** 2) ** (-n)\n # inital guesses\n v0 = [I, center[0], center[1], a, e, rot]\n else:\n if rot is None:\n if fit_n:\n moffatfit = lambda v, p, q: moffat(\n v[6], p, q, v[0], v[1], v[2], v[3], v[4], v[5])\n v0 = [I, center[0], center[1], a, n, e, cont]\n else:\n moffatfit = lambda v, p, q: moffat(\n v[5], p, q, v[0], v[1], v[2], v[3], n, v[4])\n v0 = [I, center[0], center[1], a, e, cont]\n else:\n # rotation angle in rad\n rot = np.pi * rot / 180.0\n if fit_n:\n # 2d moffat function\n moffatfit = lambda v, p, q: v[7] + v[0] \\\n * (1 + (((p - v[1]) * np.cos(v[6])\n - (q - v[2]) * np.sin(v[6])) / v[3]) ** 2\n + (((p - v[1]) * np.sin(v[6])\n + (q - v[2]) * np.cos(v[6])) / v[3] / v[5]) ** 2) ** (-v[4])\n # inital guesses\n v0 = [I, center[0], center[1], a, n, e, rot, cont]\n else:\n # 2d moffat function\n moffatfit = lambda v, p, q: v[6] + v[0] \\\n * (1 + (((p - v[1]) * np.cos(v[5])\n - (q - v[2]) * np.sin(v[5])) / v[3]) ** 2\n + (((p - v[1]) * np.sin(v[5])\n + (q - v[2]) * np.cos(v[5])) / v[3] / v[4]) ** 2) ** (-n)\n # inital guesses\n v0 = [I, center[0], center[1], a, e, rot, cont]\n\n # Minimize the sum of squares\n if factor > 1:\n factor = int(factor)\n deci = np.ones((factor, factor)) \\\n * np.arange(factor)[:, np.newaxis] / float(factor) \\\n + 1 / float(factor * 2)\n fp = (p[:, np.newaxis] + deci.ravel()[np.newaxis, :]).ravel()\n fq = (q[:, np.newaxis] + deci.T.ravel()[np.newaxis, :]).ravel()\n pixcrd = np.array(list(zip(fp, fq)))\n\n e_moffat_fit = lambda v, p, q, data, w: \\\n w * (((moffatfit(v, p, q)).reshape(N, factor * factor).sum(1)\n / factor / factor).T.ravel() - data)\n v, covar, info, mesg, success = \\\n leastsq(e_moffat_fit, v0[:], args=(pixcrd[:, 0], pixcrd[:, 1],\n data, wght),\n maxfev=maxiter, full_output=1)\n else:\n e_moffat_fit = lambda v, p, q, data, w: \\\n w * (moffatfit(v, p, q) - data)\n v, covar, info, mesg, success = \\\n leastsq(e_moffat_fit, v0[:],\n args=(p, q, data, wght),\n maxfev=maxiter, full_output=1)\n\n if success not in [1, 2, 3, 4]:\n self._logger.warning(mesg)\n\n # calculate the errors from the estimated covariance matrix\n chisq = sum(info[\"fvec\"] * info[\"fvec\"])\n dof = len(info[\"fvec\"]) - len(v)\n if covar is not None:\n err = np.array([np.sqrt(np.abs(covar[i, i])) *\n np.sqrt(np.abs(chisq / dof))\n for i in range(len(v))])\n else:\n err = np.zeros_like(v)\n err[:] = np.abs(v[:] - v0[:])\n\n # center in pixel in the input image\n v[1] += int(pmin)\n v[2] += int(qmin)\n\n if plot:\n pp = np.arange(pmin, pmax, float(pmax - pmin) / 100)\n qq = np.arange(qmin, qmax, float(qmax - qmin) / 100)\n ff = np.empty((np.shape(pp)[0], np.shape(qq)[0]))\n for i in range(np.shape(pp)[0]):\n ff[i, :] = moffatfit(v, pp[i], qq[:])\n self._ax.contour(qq, pp, ff, 5)\n\n # Moffat2D object in pixels\n I, p_peak, q_peak = v[:3]\n a = np.abs(v[3])\n v = list(v[4:])\n\n # v0 = [I, center[0], center[1], a, n, e, rot, cont]\n if fit_back:\n # If present, cont is always the last parameter\n cont = v.pop()\n\n if fit_n:\n n = v.pop(0)\n\n _fwhm = a * (2 * np.sqrt(2 ** (1.0 / n) - 1.0))\n\n if circular:\n rot = 0\n fwhm = (_fwhm, _fwhm)\n else:\n e = v.pop(0)\n if e < 1:\n fwhm = (_fwhm, _fwhm * e)\n else:\n fwhm = (_fwhm * e, _fwhm)\n if rot is None:\n rot = 0\n else:\n if e < 1:\n rot = (v[0] * 180.0 / np.pi) % 180\n else:\n rot = (v[0] * 180.0 / np.pi + 90) % 180\n\n flux = I / (n - 1) * (np.pi * a * a * e)\n\n if err is not None:\n err_I, err_p_peak, err_q_peak = err[:3]\n err_a = err[3]\n if fit_n:\n err_n = err[4]\n err_fwhm = err_a * n\n if circular:\n err_e = 0\n err_rot = 0\n err_fwhm = np.array([err_fwhm, err_fwhm])\n if fit_back:\n err_cont = err[5]\n else:\n err_cont = 0\n err_flux = err_I * err_n * err_a * err_a\n else:\n err_e = err[5]\n if err_e != 0:\n err_fwhm = np.array([err_fwhm, err_fwhm / err_e])\n else:\n err_fwhm = np.array([err_fwhm, err_fwhm])\n if rot is None:\n err_rot = 0\n if fit_back:\n err_cont = err[6]\n else:\n err_cont = 0\n else:\n err_rot = err[6] * 180.0 / np.pi\n if fit_back:\n err_cont = err[7]\n else:\n err_cont = 0\n err_flux = err_I * err_n * err_a * err_a * err_e\n else:\n err_n = 0\n err_fwhm = err_a * n\n if circular:\n err_e = 0\n err_rot = 0\n err_fwhm = np.array([err_fwhm, err_fwhm])\n if fit_back:\n err_cont = err[4]\n else:\n err_cont = 0\n err_flux = err_I * err_n * err_a * err_a\n else:\n err_e = err[4]\n if err_e != 0:\n err_fwhm = np.array([err_fwhm, err_fwhm / err_e])\n else:\n err_fwhm = np.array([err_fwhm, err_fwhm])\n if rot is None:\n err_rot = 0\n if fit_back:\n err_cont = err[5]\n else:\n err_cont = 0\n else:\n err_rot = err[5] * 180.0 / np.pi\n if fit_back:\n err_cont = err[6]\n else:\n err_cont = 0\n err_flux = err_I * err_n * err_a * err_a * err_e\n else:\n err_I = np.NAN\n err_p_peak = np.NAN\n err_q_peak = np.NAN\n err_a = np.NAN\n err_n = np.NAN\n err_e = np.NAN\n err_rot = np.NAN\n err_cont = np.NAN\n err_fwhm = (np.NAN, np.NAN)\n err_flux = np.NAN\n\n if unit_center is None:\n center = (p_peak, q_peak)\n err_center = (err_p_peak, err_q_peak)\n else:\n # Gauss2D object in degrees/arcseconds\n center = self.wcs.pix2sky([p_peak, q_peak], unit=unit_center)[0]\n err_center = np.array([err_p_peak, err_q_peak]) * \\\n self.wcs.get_step(unit=unit_center)\n\n fwhm = np.array(fwhm)\n\n if unit_fwhm is not None:\n step0 = self.wcs.get_step(unit=unit_fwhm)[0]\n a = a * step0\n err_a = err_a * step0\n fwhm = fwhm * step0\n err_fwhm = err_fwhm * step0\n\n result = Moffat2D(center, flux, fwhm, cont, n,\n rot, I, err_center, err_flux, err_fwhm,\n err_cont, err_n, err_rot, err_I)\n\n if verbose:\n result.print_param()\n if full_output:\n ima = moffat_image(shape=self.shape, wcs=self.wcs, moffat=result,\n unit_center=unit_center, unit_fwhm=unit_fwhm)\n result.ima = ima\n return result\n\n def rebin(self, factor, margin='center', inplace=False):\n \"\"\"Combine neighboring pixels to reduce the size of an image by\n integer factors along each axis.\n\n Each output pixel is the mean of n pixels, where n is the\n product of the reduction factors in the factor argument.\n\n Parameters\n ----------\n factor : int or (int,int)\n The integer reduction factor along the y and x array axes.\n Note the conventional python ordering of the axes.\n margin : 'center'|'right'|'left'|'origin'\n When the dimensions of the input image are not integer\n multiples of the reduction factor, the image is truncated\n to remove just enough pixels that its dimensions are\n multiples of the reduction factor. This subimage is then\n rebinned in place of the original image. The margin\n parameter determines which pixels of the input image are\n truncated, and which remain.\n\n The options are:\n 'origin' or 'center':\n The starts of the axes of the output image are\n coincident with the starts of the axes of the input\n image.\n 'center':\n The center of the output image is aligned with the\n center of the input image, within one pixel along\n each axis.\n 'right':\n The ends of the axes of the output image are\n coincident with the ends of the axes of the input\n image.\n inplace : bool\n If False, return a rebinned copy of the image (the default).\n If True, rebin the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n\n # Delegate the rebinning to the generic DataArray function.\n res = self._rebin(factor, margin, inplace)\n\n # If the spatial frequency band-limits of the image have been\n # reduced by the changes in the Y and X sampling intervals,\n # record this.\n res.update_spatial_fmax(0.5 / res.wcs.get_step())\n\n return res\n\n def resample(self, newdim, newstart, newstep, flux=False,\n order=1, interp='no', unit_start=u.deg, unit_step=u.arcsec,\n antialias=True, inplace=False, window=\"blackman\"):\n \"\"\"Resample an image of the sky to select its angular resolution and\n to specify which sky position appears at the center of pixel [0,0].\n\n This function is a simplified interface to the `mpdaf.obj.Image.regrid`\n function, which it calls with the following arguments::\n\n regrid(newdim, newstart, [0.0, 0.0],\n [abs(newstep[0]),-abs(newstep[1])]\n flux=flux, order=order, interp=interp, unit_pos=unit_start,\n unit_inc=unit_step, inplace=inplace)\n\n When this function is used to resample an image to a lower\n resolution, a low-pass anti-aliasing filter is applied to the\n image before it is resampled, to remove all spatial frequencies\n below half the new sampling rate. This is required to satisfy\n the Nyquist sampling constraint. It prevents high\n spatial-frequency noise and edges from being folded into lower\n frequency artefacts in the resampled image. The removal of\n this noise improves the signal to noise ratio of the resampled\n image.\n\n Parameters\n ----------\n newdim : int or (int,int)\n The desired new dimensions. Python notation: (ny,nx)\n newstart : float or (float, float)\n The sky position (dec,ra) that should appear at the center\n of pixel [0,0].\n\n If None, the value of self.get_start() is substituted,\n so that the sky position that appears at the center of pixel\n [0,0] is unchanged by the resampling operation.\n newstep : float or (float, float)\n The desired angular size of the image pixels on the sky.\n The size is expressed as either one number to request\n square pixels on the sky with that width and height, or\n two numbers that specify the height and width of\n rectangular pixels on the sky. In the latter case, the two\n numbers are the size along the Y axis of the image array\n followed by the size along the X axis.\n flux : bool\n This tells the function whether the pixel units of the\n image are flux densities (flux=True), such as\n erg/s/cm2/Hz, or whether they are per-steradian brightness\n units (flux=False), such as erg/s/cm2/Hz/steradian. It\n needs to know this when it changes the pixel size, because\n when pixel sizes change, resampled flux densities need to\n be corrected for the change in the area per pixel, where\n resampled brightnesses don't.\n order : int\n The order of the spline interpolation. This can take any\n value from 0-5. The default is 1 (linear interpolation).\n When this function is used to lower the resolution of\n an image, the low-pass anti-aliasing filter that is applied,\n makes linear interpolation sufficient.\n Conversely, when this function is used to increase the\n image resolution, order=3 might be useful. Higher\n orders than this will tend to introduce ringing artefacts.\n interp : 'no' | 'linear' | 'spline'\n If 'no', replace masked data with the median image value.\n If 'linear', replace masked values using a linear\n interpolation between neighboring values.\n if 'spline', replace masked values using a spline\n interpolation between neighboring values.\n unit_start : `astropy.units.Unit`\n The angular units of the newstart coordinates. Degrees by default.\n unit_step : `astropy.units.Unit`\n The angular units of the step argument. Arcseconds by default.\n antialias : bool\n By default, when the resolution of an image axis is about\n to be reduced, a low pass filter is first applied to suppress\n high spatial frequencies that can not be represented by the\n reduced sampling interval. If this is not done, high-frequency\n noise and sharp edges get folded back to lower frequencies,\n where they increase the noise level of the image and introduce\n ringing artefacts next to sharp edges, such as CCD saturation\n spikes. This filtering can be disabled by passing False to\n the antialias argument.\n inplace : bool\n If False, return a rotated copy of the image (the default).\n If True, rotate the original image in-place, and return that.\n window : str\n The type of window function to use for antialiasing\n in the Fourier plane. The following windows are supported:\n\n blackman\n This window suppresses ringing better than any other\n window, at the expense of lowered image resolution. In\n the image plane, the PSF of this window is\n approximately gaussian, with a standard deviation of\n around 0.96*newstep, and a FWHM of about 2.3*newstep.\n\n gaussian\n A truncated gaussian window. This has a smaller PSF\n than the blackman window, however gaussians never fall\n to zero, so either significant ringing will be seen due\n to truncation of the gaussian, or low-level aliasing\n will occur, depending on the spatial frequency coverage\n of the image beyond the folding frequency. It can be a\n good choice for images that only contain smoothly\n varying features. It is equivalent to a convolution of\n the image with both an airy profile and a gaussian of\n standard deviation 0.724*newstep (FWHM 1.704*newstep).\n\n rectangle\n This window simply zeros all spatial frequencies above\n the highest that can be correctly sampled by the new\n pixel size. This gives the best resolution of any of\n the windows, but this is marred by the strong sidelobes\n of the resulting airy-profile, especially near bright\n point sources and CCD saturation lines.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n The resampled image.\n\n \"\"\"\n # Convert newstep to the newinc argument used by regrid(), being\n # careful to preserve the signs of the existing coordinate increments.\n step_signs = np.sign(self.get_axis_increments())\n if is_number(newstep):\n newinc = step_signs * abs(newstep)\n else:\n newinc = step_signs * abs(np.asarray(newstep))\n\n # Convert newstart to the refpos,refpix arguments expected by regrid().\n refpix = None if newstart is None else [0.0, 0.0]\n\n return self.regrid(newdim, newstart, refpix, newinc, flux=flux,\n order=order, interp=interp, unit_pos=unit_start,\n unit_inc=unit_step, antialias=antialias,\n inplace=inplace, window=window)\n\n def regrid(self, newdim, refpos, refpix, newinc, flux=False, order=1,\n interp='no', unit_pos=u.deg, unit_inc=u.arcsec, antialias=True,\n inplace=False, cutoff=0.25, window=\"blackman\"):\n \"\"\"Resample an image of the sky to select its angular resolution,\n to specify the position of the sky in the image array, and\n optionally to reflect one or more of its axes.\n\n This function can be used to decrease or increase the\n resolution of an image. It can also shift the contents of an\n image to place a specific (dec,ra) position at a specific\n fractional pixel position. Finally, it can be used to invert\n the direction of one or both of the array axes on the sky.\n\n When this function is used to resample an image to a lower\n resolution, a low-pass anti-aliasing filter is applied to the\n image before it is resampled, to remove all spatial\n frequencies below half the new sampling rate. This is required\n to satisfy the Nyquist sampling constraint. It prevents high\n spatial-frequency noise and edges from being aliased to lower\n frequency artefacts in the resampled image. The removal of\n this noise improves the signal to noise ratio of the resampled\n image.\n\n Parameters\n ----------\n newdim : int or (int,int)\n The desired new dimensions. Python notation: (ny,nx)\n refpos : (float, float)\n The sky position (dec,ra) to place at the pixel specified\n by the refpix argument.\n\n If refpix and refpos are both None, the sky position at\n the bottom corner of the input image is placed at the\n bottom left corner of the output image. Note that refpix\n and refpos must either both be given values, or both\n be None.\n refpix : (float, float)\n The [Y, X] indexes of the output pixel where the sky\n position, refpos, should be placed. Y and X are\n interpreted as floating point indexes, where integer\n values indicate pixel centers and integer values +/- 0.5\n indicate the edges of pixels.\n\n If refpix and refpos are both None, the sky position at\n the bottom corner of the input image is placed at the\n bottom left corner of the output image. Note that refpix\n and refpos must either both be given values, or both\n be None.\n newinc : float or (float, float)\n The signed increments of the angle on the sky from one\n pixel to the next, given as either a single increment for\n both image axes, or two numbers (dy,dx) for the Y and X\n axes respectively.\n\n The signs of these increments are interpreted as described\n in the documentation of the Image.get_axis_increments()\n function. In particular, note that dy is typically\n positive and dx is usually negative, such that when the\n image is plotted, east appears anticlockwise of north, and\n east is towards the left of the plot when the image\n rotation angle is zero.\n\n If either of the signs of the two newinc numbers is\n different from the sign of the increments of the original\n image (queryable with image.get_axis_increments()), then\n the image will be reflected about that axis. In this case\n the value of the refpix argument should be chosen with\n care, because otherwise the sampled part of the image may\n end up being reflected outside the limits of the image\n array, and the result will be a blank image.\n\n If only one number is given for newinc then both axes\n are given the same resolution, but the signs of the\n increments are kept the same as the pixel increments\n of the original image.\n flux : bool\n This tells the function whether the pixel units of the\n image are flux densities (flux=True), such as\n erg/s/cm2/Hz, or whether they are per-steradian brightness\n units (flux=False), such as erg/s/cm2/Hz/steradian. It\n needs to know this when it changes the pixel size, because\n when pixel sizes change, resampled flux densities need to\n be corrected for the change in the area per pixel, where\n resampled brightnesses don't.\n order : int\n The order of the spline interpolation. This can take any\n value from 0-5. The default is 1 (linear interpolation).\n When this function is used to lower the resolution of\n an image, the low-pass anti-aliasing filter that is applied,\n makes linear interpolation sufficient.\n Conversely, when this function is used to increase the\n image resolution, order=3 might be useful. Higher\n orders than this will tend to introduce ringing artefacts.\n interp : 'no' | 'linear' | 'spline'\n If 'no', replace masked data with the median image value.\n If 'linear', replace masked values using a linear\n interpolation between neighboring values.\n if 'spline', replace masked values using a spline\n interpolation between neighboring values.\n unit_pos : `astropy.units.Unit`\n The units of the refpos coordinates. Degrees by default.\n unit_inc : `astropy.units.Unit`\n The units of newinc. Arcseconds by default.\n antialias : bool\n By default, when the resolution of an image axis is about\n to be reduced, a low pass filter is first applied to suppress\n high spatial frequencies that can not be represented by the\n reduced sampling interval. If this is not done, high-frequency\n noise and sharp edges get folded back to lower frequencies,\n where they increase the noise level of the image and introduce\n ringing artefacts next to sharp edges, such as CCD saturation\n spikes. This filtering can be disabled by passing False to\n the antialias argument.\n inplace : bool\n If False, return a resampled copy of the image (the default).\n If True, resample the original image in-place, and return that.\n cutoff : float\n Mask each output pixel where at least this fraction of the\n pixel was interpolated from dummy values given to masked\n input pixels.\n window : str\n The type of window function to use for antialiasing\n in the Fourier plane. The following windows are supported:\n\n blackman\n This window suppresses ringing better than any other\n window, at the expense of lowered image resolution. In\n the image plane, the PSF of this window is\n approximately gaussian, with a standard deviation of\n around 0.96*newstep, and a FWHM of about 2.3*newstep.\n\n gaussian\n A truncated gaussian window. This has a smaller PSF\n than the blackman window, however gaussians never fall\n to zero, so either significant ringing will be seen due\n to truncation of the gaussian, or low-level aliasing\n will occur, depending on the spatial frequency coverage\n of the image beyond the folding frequency. It can be a\n good choice for images that only contain smoothly\n varying features. It is equivalent to a convolution of\n the image with both an airy profile and a gaussian of\n standard deviation 0.724*newstep (FWHM 1.704*newstep).\n\n rectangle\n This window simply zeros all spatial frequencies above\n the highest that can be correctly sampled by the new\n pixel size. This gives the best resolution of any of\n the windows, but this is marred by the strong sidelobes\n of the resulting airy-profile, especially near bright\n point sources and CCD saturation lines.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n The resampled image is returned.\n\n \"\"\"\n if is_int(newdim):\n newdim = (newdim, newdim)\n newdim = np.asarray(newdim, dtype=int)\n\n if refpos is None and refpix is None:\n # If neither refpos nor refpix have values, substitute values\n # that will place the current sky position of the bottom left\n # corner of the image at the bottom left corner of the output\n # image.\n\n # Use the pixel index of the bottom left corner of the image.\n refpix = np.array([-0.5, -0.5])\n refpos = self.wcs.pix2sky(refpix)\n elif refpos is not None and refpix is not None:\n # Were refpos and refpix both given values?\n\n # If necessary convert refpos to a numpy array and convert\n # it's units to the current WCS units.\n refpos = np.asarray(refpos, dtype=float)\n if unit_pos is not None:\n refpos = UnitArray(refpos, unit_pos, self.wcs.unit)\n\n # If necessary convert refpix to a floating point numpy array.\n refpix = np.asarray(refpix, dtype=float)\n else:\n # Complain if just one of refpos and refpix is None.\n raise ValueError('The refpos and refpix arguments should both be '\n 'None or both have values.')\n\n # Get the current index increments of the 2 axes.\n oldinc = self.wcs.get_axis_increments()\n\n # Use a common increment for both axes? If so, give them\n # the same size, but with signs matching the current\n # pixel increments.\n if is_number(newinc):\n size = abs(newinc)\n newinc = (size * np.sign(oldinc[0]), size * np.sign(oldinc[1]))\n\n # Ensure that newinc is an array of values that have the\n # same units as the WCS object.\n newinc = np.asarray(newinc, dtype=float)\n if unit_inc is not None:\n newinc = UnitArray(newinc, unit_inc, self.wcs.unit)\n\n # Get a copy of the data array with masked values filled.\n data = self._prepare_data(interp)\n\n # If the angular pixel increments along either axis are being\n # increased, then low-pass filter the data along that axis to\n # prevent aliasing in the resampled data.\n if antialias:\n data, newfmax = _antialias_filter_image(\n data, abs(oldinc), abs(newinc), self.get_spatial_fmax(),\n window)\n else:\n newfmax = 0.5 / abs(newinc)\n\n # For each pixel in the output image, the affine_transform\n # function calculates the index of the equivalent pixel in the\n # input image, and interpolates a value of the output pixel\n # from the surrounding pixels of the input image. It calculates\n # the input index from the output index as follows:\n #\n # oldpixel = new2old * newpixel + offset\n #\n # where new2old is a 2x2 affine transform matrix designed to\n # multiply a column vector in axis order (Y,X). In our case\n # the matrix is:\n #\n # new2old = |newinc[0]/oldinc[0], 0 |\n # | 0 , newinc[1]/oldinc[0]|\n #\n # This scales an output index by newinc to calculate the\n # corresponding angular offset of that pixel from the origin\n # of the output array, then divides this by oldinc to compute\n # the equivalent index offset in the input array.\n new2old = np.array([[newinc[0] / oldinc[0], 0],\n [0, newinc[1] / oldinc[1]]])\n\n # Also work out the inverse, so that we can convert from\n # pixels in the current image to the equivalent pixel of the\n # resampled image.\n old2new = np.linalg.inv(new2old)\n\n # We have been asked to locate sky position 'refpos' at pixel\n # 'refpix' of the resampled array. Reproducing the equation\n # from above:\n #\n # oldpixel = new2old * newpixel + offset\n #\n # In this case oldpixel is the pixel index of the input array\n # where the sky position 'refpos' is currently located, and\n # newpixel is refpix.\n #\n # sky2pix(refpos) = new2old * refpix + offset\n #\n # Thus the appropriate value for the offset parameter of\n # affine_transform() is:\n #\n # offset = sky2pix(refpos) - new2old * refpix\n offset = (self.wcs.sky2pix(refpos).T[:, :1] -\n np.dot(new2old, refpix[np.newaxis, :].T))\n\n # For each pixel of the output image, map its index to the\n # equivalent index of the input image and interpolate a value\n # for the new pixel from there.\n data = affine_transform(data, new2old, offset.flatten(),\n output_shape=newdim, order=order,\n prefilter=order >= 3)\n\n # Create a floating point version of the mask in which masked\n # elements are 1.0 and unmasked elements are 0.0.\n mask = self._mask.astype(float)\n\n # Resample the floating point version of the mask array.\n mask = affine_transform(mask, new2old, offset.flatten(), cval=1.0,\n output_shape=newdim, output=float)\n\n # Create new boolean mask in which all pixels that had an\n # integrated contribution of more than 'cutoff' originally\n # masked pixels are masked. Note that setting the cutoff to\n # the \"obvious\" value of zero results in lots of pixels being\n # masked that are far away from any masked pixels, due to\n # precision errors in the affine_transform() function.\n # Limit the minimum value of the cutoff to avoid this.\n mask = np.greater(mask, max(cutoff, 1.0e-6))\n\n # Also repeat the procedure for the array of variances, if any.\n if self._var is not None:\n var = affine_transform(self._var, new2old, offset.flatten(),\n output_shape=newdim, order=order,\n prefilter=order >= 3)\n\n else:\n var = None\n\n # Compute the absolute changes in the size of the pixels\n # along the X and Y axes.\n xs = abs(newinc[1] / oldinc[1])\n ys = abs(newinc[0] / oldinc[0])\n\n # Compute the number of input pixels per output pixel.\n n = xs * ys\n\n # Scale the flux per pixel by the multiplicative increase in the\n # area of a pixel?\n\n if flux:\n # Scale the pixel fluxes by the increase in the area.\n data *= n\n\n # The variances of the output pixels depend on whether an\n # anti-aliasing filter was applied, as follows.\n #\n # 1. An anti-aliasing filter is applied before resampling\n # when increasing the pixel size. This filter\n # effectively averages together n neighboring\n # pixels. The affine_transform() samples these averages\n # when it interpolates the output pixel values, so the\n # output pixels are effectively the average of n\n # independent pixels of the input image. Multiplying\n # these pixel values by n, then turns each output pixel\n # value into the sum of n pixels. The variance of a\n # sum of n samples of variance v, is n*v.\n # 2. No anti-aliasing filter is applied when decreasing\n # the pixel size, so in this case affine_transform()\n # samples raw pixel values. The variances of these\n # output pixels are thus identical to those of the\n # input pixels. If we then multiply this by n, then the\n # variance of each output pixel is n**2 times the\n # variance of the input pixels.\n # 3. If the pixel sizes along one axis are increased,\n # while those of the other axis are decreased, then we\n # have a mix of the above two cases.\n\n if var is not None:\n # Scale the variance according to the prescription described\n # above.\n var *= ((xs if xs > 1.0 and antialias else xs**2) *\n (ys if ys > 1.0 and antialias else ys**2))\n\n # If we haven't been asked to scale the fluxes by the increase\n # in the area of a pixel, the effect on the variances are as\n # explained above, but without the flux scaling. If\n # anti-aliasing was applied to both axes, then each output\n # pixel is effectively the average of n input pixels, and the\n # variance of a mean of n samples of variance v is v/n. If no\n # anti-aliasing was applied, then there is no change to the\n # variance.\n\n else:\n if var is not None and (xs > 1.0 or ys > 1.0):\n var *= ((1 / xs if xs > 1.0 and antialias else 1.0) *\n (1 / ys if ys > 1.0 and antialias else 1.0))\n\n # Get the coordinate reference pixel of the input image,\n # arranged as a column vector in python (Y,X) order. Note that\n # crpix contains FITS pixel indexes which are 1 greater than\n # the corresponding python pixel indexes.\n oldcrpix = np.array([[self.wcs.get_crpix2() - 1],\n [self.wcs.get_crpix1() - 1]])\n\n # Compute the updated value of the coordinate reference pixel\n # in (Y,X) axis order.\n newcrpix = np.dot(old2new, (oldcrpix - offset))\n\n # Make a copy of the WCS object of the image to use as a template\n # for the WCS object of the resampled image.\n wcs = self.wcs.copy()\n\n # Install the new increments and image dimensions.\n wcs.set_axis_increments(newinc)\n wcs.naxis1 = newdim[1]\n wcs.naxis2 = newdim[0]\n\n # Record the new value of the coordinate reference pixel,\n # being careful to convert from python 0-relative pixel\n # indexes to FITS 1-relative pixel indexes.\n wcs.set_crpix1(newcrpix[1] + 1)\n wcs.set_crpix2(newcrpix[0] + 1)\n\n # Install the resampled data, mask and variance arrays, either\n # within self, or in a new Image object.\n out = self if inplace else self.clone()\n out._data = data\n out._mask = mask\n out._var = var\n out.wcs = wcs\n\n # If the spatial frequency band-limits of the image have been\n # reduced by the changes in the Y and X sampling intervals,\n # record this.\n out.update_spatial_fmax(newfmax)\n\n return out\n\n def align_with_image(self, other, flux=False, inplace=False, cutoff=0.25,\n antialias=True, window=\"blackman\"):\n \"\"\"Resample the image to give it the same orientation, position,\n resolution and size as a given image.\n\n The image is first rotated to give it the same orientation on\n the sky as the other image. The resampling process also\n eliminates any shear terms from the original image, so that\n its pixels can be correctly drawn on a rectangular grid.\n\n Secondly the image is resampled. This changes its resolution,\n shifts the image such that the same points on the sky appear\n in the same pixels as in the other image, and changes the\n dimensions of the image array to match that of the other\n image.\n\n The rotation and resampling processes are performed as\n separate steps because the anti-aliasing filter that needs to\n be applied in the resampling step reduces the resolution, is\n difficult to implement before the axes have been rotated to\n the final orientation.\n\n Parameters\n ----------\n other : `~mpdaf.obj.Image`\n The image to be aligned with.\n flux : bool\n This tells the function whether the pixel units of the\n image are flux densities (flux=True), such as\n erg/s/cm2/Hz, or whether they are per-steradian brightness\n units (flux=False), such as erg/s/cm2/Hz/steradian. It\n needs to know this when it changes the pixel size, because\n when pixel sizes change, resampled flux densities need to\n be corrected for the change in the area per pixel, where\n resampled brightnesses don't.\n inplace : bool\n If False, return an aligned copy of the image (the default).\n If True, align the original image in-place, and return that.\n cutoff : float\n Mask each output pixel where at least this fraction of the\n pixel was interpolated from dummy values given to masked\n input pixels.\n antialias : bool\n By default, when the resolution of an image axis is about\n to be reduced, a low pass filter is first applied to suppress\n high spatial frequencies that can not be represented by the\n reduced sampling interval. If this is not done, high-frequency\n noise and sharp edges get folded back to lower frequencies,\n where they increase the noise level of the image and introduce\n ringing artefacts next to sharp edges, such as CCD saturation\n spikes and bright unresolved stars. This filtering can be\n disabled by passing False to the antialias argument.\n window : str\n The type of window function to use for antialiasing\n in the Fourier plane. The following windows are supported:\n\n blackman\n This window suppresses ringing better than any other\n window, at the expense of lowered image resolution. In\n the image plane, the PSF of this window is\n approximately gaussian, with a standard deviation of\n around 0.96*newstep, and a FWHM of about 2.3*newstep.\n\n gaussian\n A truncated gaussian window. This has a smaller PSF\n than the blackman window, however gaussians never fall\n to zero, so either significant ringing will be seen due\n to truncation of the gaussian, or low-level aliasing\n will occur, depending on the spatial frequency coverage\n of the image beyond the folding frequency. It can be a\n good choice for images that only contain smoothly\n varying features. It is equivalent to a convolution of\n the image with both an airy profile and a gaussian of\n standard deviation 0.724*newstep (FWHM 1.704*newstep).\n\n rectangle\n This window simply zeros all spatial frequencies above\n the highest that can be correctly sampled by the new\n pixel size. This gives the best resolution of any of\n the windows, but this is marred by the strong sidelobes\n of the resulting airy-profile, especially near bright\n point sources and CCD saturation lines.\n\n \"\"\"\n\n # Do nothing if the images are already aligned.\n if self.wcs.isEqual(other.wcs):\n return self if inplace else self.copy()\n\n # Determine the ranges of right-ascension and declination\n # covered by the target image grid plus an extra pixel at\n # each edge.\n pixsky = other.wcs.pix2sky([[-1, -1],\n [other.shape[0], -1],\n [-1, other.shape[1]],\n [other.shape[0], other.shape[1]]],\n unit=u.deg)\n dec_min, ra_min = pixsky.min(axis=0)\n dec_max, ra_max = pixsky.max(axis=0)\n\n # Truncate the input image to just enclose the above ranges of\n # right-ascension and declination.\n out = self.truncate(dec_min, dec_max, ra_min, ra_max, mask=False,\n unit=u.deg, inplace=inplace)\n\n # Rotate the image to have the same orientation as the other\n # image. Note that the rotate function has a side effect of\n # correcting the image for shear terms in the CD matrix, so we\n # perform this step even if no rotation is otherwise needed.\n out._rotate(other.wcs.get_rot() - out.wcs.get_rot(), reshape=True,\n regrid=True, flux=flux, cutoff=cutoff)\n\n # Get the pixel index and Dec,Ra coordinate at the center of\n # the image that we are aligning with.\n centerpix = np.asarray(other.shape) / 2.0\n centersky = other.wcs.pix2sky(centerpix)[0]\n\n # Re-sample the rotated image to have the same axis\n # increments, offset and number of pixels as the image that we\n # are aligning it with.\n out.regrid(other.shape, centersky, centerpix,\n other.wcs.get_axis_increments(unit=u.deg),\n flux, unit_inc=u.deg, inplace=True, cutoff=cutoff,\n antialias=antialias, window=window)\n return out\n\n def estimate_coordinate_offset(self, ref, nsigma=1.0):\n \"\"\"Given a reference image of the sky that is expected to\n overlap with the current image, attempt to fit for any offset\n between the sky coordinate system of the current image and\n that of the reference image. The returned value is designed to\n be added to the coordinate reference pixel values of self.wcs.\n\n This function performs the following steps:\n\n 1. The align_with_image() method is called to resample the\n reference image onto the same coordinate grid as the\n current image.\n\n 2. The two images are then cross-correlated, after zeroing all\n background values in the images below nsigma standard\n deviations above the mean.\n\n 3. The peak in the auto-correlation image is found and its\n sub-pixel position is estimated by a simple quadratic\n interpolation. This position, relative to the center of the\n auto-correlation image, gives the average position offset\n between similar features in the two images.\n\n Parameters\n ----------\n ref : `~mpdaf.obj.Image`\n The image of the sky that is to be used as the coordinate\n reference. The sky coverage of this image should overlap\n with that of self. Ideally the resolution of this image\n should be at least as good as the resolution of self.\n nsigma : float\n Only values that exceed this many standard deviations\n above the mean of each image will be used.\n\n Returns\n -------\n out : float,float\n The pixel offsets that would need to be added to the\n coordinate reference pixel values, crpix2 and crpix1, of\n self.wcs to make the features in self line up with those\n in the reference image.\n\n \"\"\"\n\n # Resample the reference sky image onto the same coordinate\n # grid as our image.\n ref = ref.align_with_image(self)\n\n # Before cross-correlating the images we need to make sure\n # that any areas that are masked in one image are also masked\n # in the other. Otherwise if one image has a very bright\n # source in an area that is masked in the other, then this\n # will produce false correlations.\n #\n # First get the union of the masked areas of the two images.\n mask = np.ma.mask_or(self._mask, ref._mask)\n\n # Place both image arrays into masked array containers that\n # share the above mask.\n sdata = np.ma.array(data=self._data, mask=mask)\n rdata = np.ma.array(data=ref._data, mask=mask)\n\n # Get copies of the above arrays with masked pixels filled\n # with the median values of the images.\n sdata = np.ma.filled(sdata, np.ma.median(sdata))\n rdata = np.ma.filled(rdata, np.ma.median(rdata))\n\n # When we cross-correlate the images, any constant or noisy\n # background will bias the result towards the origin of the\n # correlation, so remove most of the noisy background by\n # zeroing all values that are less than nsigma standard\n # deviations above the mean.\n mask = sdata < sdata.mean() + nsigma * sdata.std()\n sdata[mask] = 0\n mask = rdata < rdata.mean() + nsigma * rdata.std()\n rdata[mask] = 0\n\n # Sometimes a bright artefact or a bright star with\n # appreciable proper motion biases the correlation. To avoid\n # this take the log of the thresholded data to prevent very\n # bright features from dominating the correlation.\n sdata = np.log(1.0 + sdata)\n rdata = np.log(1.0 + rdata)\n\n # Cross correlate our image with the reference image, by\n # convolving our image with an axis-reversed version of the\n # reference image. Use mode=\"same\" to only keep the inner half\n # of the array. We don't expect the peak to be outside this\n # area, and this avoids edge effects where there is incomplete\n # data.\n cc = signal.fftconvolve(sdata, rdata[::-1, ::-1], mode=\"same\")\n\n # Find the position of the maximum value in the correlation image.\n py, px = np.unravel_index(np.argmax(cc), cc.shape)\n\n # Quadratically interpolate a more precise peak position from three\n # points along the X and Y axes, centered on the position found above.\n py2 = py - 1 + _find_quadratic_peak(cc[py - 1: py + 2, px])\n px2 = px - 1 + _find_quadratic_peak(cc[py, px - 1: px + 2])\n\n # Compute the offset of the peak relative to the central pixel\n # of the correlation image. This yields the offset between the\n # two images.\n dy = py2 - float(cc.shape[0] // 2)\n dx = px2 - float(cc.shape[1] // 2)\n\n return dy, dx\n\n def adjust_coordinates(self, ref, nsigma=1.0, inplace=False):\n \"\"\"Given a reference image of the sky that is expected to\n overlap with the current image, attempt to fit for any offset\n between the sky coordinate system of the current image and\n that of the reference image. Apply this offset to the\n coordinates of the current image, to bring it into line with\n the reference image.\n\n This function calls self.estimate_coordinate_offset() to\n fit for the offset between the coordinate systems of the\n two images, then adjusts the coordinate reference pixel of\n the current image to bring its coordinates into line with\n those of the reference image.\n\n Parameters\n ----------\n ref : `~mpdaf.obj.Image`\n The image of the sky that is to be used as the coordinate\n reference. The sky coverage of this image should overlap\n with that of self. Ideally the resolution of this image\n should be at least as good as the resolution of self.\n nsigma : float\n Only values that exceed this many standard deviations\n above the mean of each image will be used.\n inplace : bool\n If False, return a shifted copy of the image (the default).\n If True, shift the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n A version of self in which the sky coordinates have been\n shifted to match those of the reference image.\n\n \"\"\"\n\n out = self if inplace else self.copy()\n\n # Determine the pixel offset of features in the current\n # image relative to features in the reference image.\n dy, dx = out.estimate_coordinate_offset(ref, nsigma)\n\n # Offset the WCS of the current image by the pixel shift found above.\n out.wcs.set_crpix1(out.wcs.get_crpix1() + dx)\n out.wcs.set_crpix2(out.wcs.get_crpix2() + dy)\n\n # Calculate the resulting shift in pixel coordinates, for display\n # to the user.\n units = u.arcsec if self.wcs.unit is u.deg else self.wcs.unit\n offset = np.array([-dy, -dx]) * self.wcs.get_axis_increments(units)\n self._logger.info(\"Shifted the coordinates by dy=%.3g dx=%.3g %s\" %\n (offset[0], offset[1], units))\n return out\n\n def gaussian_filter(self, sigma=3, interp='no', inplace=False):\n \"\"\"Return an image containing Gaussian filter applied to the current\n image.\n\n Uses `scipy.ndimage.gaussian_filter`.\n\n Parameters\n ----------\n sigma : float\n Standard deviation for Gaussian kernel\n interp : 'no' | 'linear' | 'spline'\n if 'no', data median value replaced masked values.\n if 'linear', linear interpolation of the masked values.\n if 'spline', spline interpolation of the masked values.\n inplace : bool\n If False, return a filtered copy of the image (the default).\n If True, filter the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n\n out = self if inplace else self.copy()\n\n # Get a copy of the data array with masked values filled.\n data = out._prepare_data(interp)\n out._data = ndi.gaussian_filter(data, sigma)\n if out._var is not None:\n out._var = ndi.gaussian_filter(out._var, sigma)\n return out\n\n def segment(self, shape=(2, 2), minsize=20, minpts=None,\n background=20, interp='no', median=None):\n \"\"\"Segment the image in a number of smaller images.\n\n Returns a list of images. Uses\n `scipy.ndimage.generate_binary_structure`,\n `scipy.ndimage.grey_dilation`, `scipy.ndimage.measurements.label`, and\n `scipy.ndimage.measurements.find_objects`.\n\n Parameters\n ----------\n shape : (int,int)\n Shape used for connectivity.\n minsize : int\n Minimmum size of the images.\n minpts : int\n Minimmum number of points in the object.\n background : float\n Under this value, flux is considered as background.\n interp : 'no' | 'linear' | 'spline'\n if 'no', data median value replaced masked values.\n if 'linear', linear interpolation of the masked values.\n if 'spline', spline interpolation of the masked values.\n median : (int,int) or None\n If not None (default), size of the window to apply a median filter\n on the image.\n\n Returns\n -------\n out : list of `Image`\n\n \"\"\"\n data = self._prepare_data(interp)\n if median is not None:\n data = np.ma.array(ndi.median_filter(data, median),\n mask=self._mask)\n expanded = ndi.grey_dilation(data, (minsize, minsize))\n expanded[expanded < background] = 0\n\n structure = ndi.generate_binary_structure(shape[0], shape[1])\n labels, nlabels = ndi.measurements.label(expanded, structure)\n slices = ndi.measurements.find_objects(labels)\n\n return [self[slices[i]] for i in range(nlabels)\n if minpts is None or len(data[labels == i + 1]) >= minpts]\n\n def add_gaussian_noise(self, sigma, interp='no'):\n \"\"\"Add Gaussian noise to image in place.\n\n Parameters\n ----------\n sigma : float\n Standard deviation.\n interp : 'no' | 'linear' | 'spline'\n if 'no', data median value replaced masked values.\n if 'linear', linear interpolation of the masked values.\n if 'spline', spline interpolation of the masked values.\n \"\"\"\n # Get a copy of the data array with masked values filled.\n data = self._prepare_data(interp)\n self._data = np.random.normal(data, sigma)\n\n if self._var is None:\n self._var = np.ones((self.shape)) * sigma * sigma\n else:\n self._var *= (sigma * sigma)\n\n def inside(self, coord, unit=u.deg):\n \"\"\"Return True if coord is inside image.\n\n Parameters\n ----------\n coord : (float,float)\n coordinates (y,x).\n unit : `astropy.units.Unit`\n Type of the coordinates (degrees by default)\n\n Returns\n -------\n out : bool\n \"\"\"\n if unit is not None:\n pixcrd = self.wcs.sky2pix([coord[0], coord[1]], unit=unit)[0]\n else:\n pixcrd = coord\n if (pixcrd >= -self.wcs.get_step(unit=unit) / 100).all() and \\\n (pixcrd < self.shape + self.wcs.get_step(unit=unit) / 100).all():\n return True\n else:\n return False\n\n def convolve(self, other, inplace=False):\n \"\"\"Convolve an Image with a 2D array or another Image, using the\n discrete convolution equation.\n\n This function, which uses the discrete convolution equation, is\n usually slower than Image.fftconvolve(). However it can be faster when\n other.data.size is small, and it always uses much less memory, so it\n is sometimes the only practical choice.\n\n Masked values in self.data and self.var are replaced with zeros before\n the convolution is performed, but they are masked again after the\n convolution.\n\n If self.var exists, the variances are propagated using the equation:\n\n result.var = self.var (*) other**2\n\n where (*) indicates convolution. This equation can be derived by\n applying the usual rules of error-propagation to the discrete\n convolution equation.\n\n The speed of this function scales as O(Nd x No) where\n Nd=self.data.size and No=other.data.size.\n\n Uses `scipy.signal.convolve`.\n\n Parameters\n ----------\n other : Image or numpy.ndarray\n The 2D array with which to convolve the image in self.data.\n This array can be an image of the same size as self, or it\n can be a smaller image, such as a small gaussian to use to\n smooth the larger image.\n\n When ``other`` contains a symmetric filtering function, such\n as a two-dimensional gaussian, the center of the function\n should be placed at the center of pixel:\n\n ``(other.shape - 1) // 2``\n\n If other is an MPDAF Image object, note that only its data\n array is used. Masked values in this array are treated\n as zero. Any variances found in other.var are ignored.\n inplace : bool\n If False (the default), return the results in a new Image.\n If True, record the result in self and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n # Delegate the task to DataArray._convolve()\n return self._convolve(signal.convolve, other=other, inplace=inplace)\n\n def fftconvolve(self, other, inplace=False):\n \"\"\"Convolve an Image with a 2D array or another Image, using the\n Fourier convolution theorem.\n\n This function, which performs the convolution by multiplying the\n Fourier transforms of the two images, is usually much faster than\n Image.convolve(), except when other.data.size is small. However it\n uses much more memory, so Image.convolve() is sometimes a better\n choice.\n\n Masked values in self.data and self.var are replaced with zeros before\n the convolution is performed, but they are masked again after the\n convolution.\n\n If self.var exists, the variances are propagated using the equation:\n\n result.var = self.var (*) other**2\n\n where (*) indicates convolution. This equation can be derived by\n applying the usual rules of error-propagation to the discrete\n convolution equation.\n\n The speed of this function scales as O(Nd x log(Nd)) where\n Nd=self.data.size. It temporarily allocates a pair of arrays that\n have the sum of the shapes of self.shape and other.shape, rounded up\n to a power of two along each axis. This can involve a lot of memory\n being allocated. For this reason, when other.shape is small,\n Image.convolve() may be more efficient than Image.fftconvolve().\n\n Uses `scipy.signal.fftconvolve`.\n\n Parameters\n ----------\n other : Image or numpy.ndarray\n The 2D array with which to convolve the image in self.data. This\n array can be an image of the same size as self, or it can be a\n smaller image, such as a small 2D gaussian to use to smooth the\n larger image.\n\n When ``other`` contains a symmetric filtering function, such as a\n two-dimensional gaussian, the center of the function should be\n placed at the center of pixel:\n\n ``(other.shape - 1) // 2``\n\n If other is an MPDAF Image object, note that only its data array\n is used. Masked values in this array are treated as zero. Any\n variances found in other.var are ignored.\n inplace : bool\n If False (the default), return the results in a new Image.\n If True, record the result in self and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n # Delegate the task to DataArray._convolve()\n return self._convolve(signal.fftconvolve, other=other, inplace=inplace)\n\n def fftconvolve_gauss(self, center=None, flux=1., fwhm=(1., 1.),\n peak=False, rot=0., factor=1, unit_center=u.deg,\n unit_fwhm=u.arcsec, inplace=False):\n \"\"\"Return the convolution of the image with a 2D gaussian.\n\n Parameters\n ----------\n center : (float,float)\n Gaussian center (y_peak, x_peak). If None the center of the image\n is used. The unit is given by the unit_center parameter (degrees\n by default).\n flux : float\n Integrated gaussian flux or gaussian peak value if peak is True.\n fwhm : (float,float)\n Gaussian fwhm (fwhm_y,fwhm_x). The unit is given by the unit_fwhm\n parameter (arcseconds by default).\n peak : bool\n If true, flux contains a gaussian peak value.\n rot : float\n Angle position in degree.\n factor : int\n If factor<=1, gaussian value is computed in the center of each\n pixel. If factor>1, for each pixel, gaussian value is the sum of\n the gaussian values on the factor*factor pixels divided by the\n pixel area.\n unit_center : `astropy.units.Unit`\n type of the center and position coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_fwhm : `astropy.units.Unit`\n FWHM unit. Arcseconds by default (use None for radius in pixels)\n inplace : bool\n If False, return a convolved copy of the image (default value).\n If True, convolve the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n ima = gauss_image(self.shape, wcs=self.wcs, center=center,\n flux=flux, fwhm=fwhm, peak=peak, rot=rot,\n factor=factor, gauss=None, unit_center=unit_center,\n unit_fwhm=unit_fwhm, cont=0, unit=self.unit)\n\n # Normalize the total flux of the Gaussian.\n ima.norm(typ='sum')\n return self.fftconvolve(ima, inplace=inplace)\n\n def fftconvolve_moffat(self, center=None, flux=1., a=1.0, q=1.0,\n n=2, peak=False, rot=0., factor=1,\n unit_center=u.deg, unit_a=u.arcsec, inplace=False):\n \"\"\"Return the convolution of the image with a 2D moffat.\n\n Parameters\n ----------\n center : (float,float)\n Gaussian center (y_peak, x_peak). If None the center of the image\n is used. The unit is given by the unit_center parameter (degrees\n by default).\n flux : float\n Integrated gaussian flux or gaussian peak value if peak is True.\n a : float\n Half width at half maximum of the image in the absence of\n atmospheric scattering. 1 by default. The unit is given by the\n unit_a parameter (arcseconds by default).\n q : float\n Axis ratio, 1 by default.\n n : int\n Atmospheric scattering coefficient. 2 by default.\n rot : float\n Angle position in degree.\n factor : int\n If factor<=1, moffat value is computed in the center of each pixel.\n If factor>1, for each pixel, moffat value is the sum\n of the moffat values on the factor*factor pixels\n divided by the pixel area.\n peak : bool\n If true, flux contains a gaussian peak value.\n unit_center : `astropy.units.Unit`\n type of the center and position coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_a : `astropy.units.Unit`\n a unit. Arcseconds by default (use None for radius in pixels)\n inplace : bool\n If False, return a convolved copy of the image (default value).\n If True, convolve the original image in-place, and return that.\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n fwhmy = a * (2 * np.sqrt(2 ** (1.0 / n) - 1.0))\n fwhmx = fwhmy / q\n ima = moffat_image(self.shape, wcs=self.wcs, factor=factor,\n center=center, flux=flux, fwhm=(fwhmy, fwhmx), n=n,\n rot=rot, peak=peak, unit_center=unit_center,\n unit_fwhm=unit_a, unit=self.unit)\n ima.norm(typ='sum')\n return self.fftconvolve(ima, inplace=inplace)\n\n def correlate2d(self, other, interp='no'):\n \"\"\"Return the cross-correlation of the image with an array/image\n\n Uses `scipy.signal.correlate2d`.\n\n Parameters\n ----------\n other : 2d-array or Image\n Second Image or 2d-array.\n interp : 'no' | 'linear' | 'spline'\n if 'no', data median value replaced masked values.\n if 'linear', linear interpolation of the masked values.\n if 'spline', spline interpolation of the masked values.\n\n \"\"\"\n if not isinstance(other, DataArray):\n # Get a copy of the data array with masked values filled.\n data = self._prepare_data(interp)\n\n res = self.copy()\n res._data = signal.correlate2d(data, other, mode='same',\n boundary='symm')\n if res._var is not None:\n res._var = signal.correlate2d(res._var, other, mode='same',\n boundary='symm')\n return res\n elif other.ndim == 2:\n # Get copies of the data arrays with masked values filled.\n data = self._prepare_data(interp)\n other_data = other._prepare_data(interp)\n other_data = UnitMaskedArray(other_data, other.unit, self.unit)\n res = self.copy()\n res._data = signal.correlate2d(data, other_data, mode='same')\n\n if res._var is not None:\n res._var = signal.correlate2d(res._var, other_data,\n mode='same')\n return res\n else:\n raise IOError('Operation forbidden')\n\n def plot(self, title=None, scale='linear', vmin=None, vmax=None,\n zscale=False, colorbar=None, var=False, show_xlabel=False,\n show_ylabel=False, ax=None, unit=u.deg, use_wcs=False, **kwargs):\n \"\"\"Plot the image with axes labeled in pixels.\n\n If either axis has just one pixel, plot a line instead of an image.\n\n Colors are assigned to each pixel value as follows. First each\n pixel value, ``pv``, is normalized over the range ``vmin`` to ``vmax``,\n to have a value ``nv``, that goes from 0 to 1, as follows::\n\n nv = (pv - vmin) / (vmax - vmin)\n\n This value is then mapped to another number between 0 and 1 which\n determines a position along the colorbar, and thus the color to give\n the displayed pixel. The mapping from normalized values to colorbar\n position, color, can be chosen using the scale argument, from the\n following options:\n\n - 'linear': ``color = nv``\n - 'log': ``color = log(1000 * nv + 1) / log(1000 + 1)``\n - 'sqrt': ``color = sqrt(nv)``\n - 'arcsinh': ``color = arcsinh(10*nv) / arcsinh(10.0)``\n\n A colorbar can optionally be drawn. If the colorbar argument is given\n the value 'h', then a colorbar is drawn horizontally, above the plot.\n If it is 'v', the colorbar is drawn vertically, to the right of the\n plot.\n\n By default the image is displayed in its own plot. Alternatively\n to make it a subplot of a larger figure, a suitable\n ``matplotlib.axes.Axes`` object can be passed via the ``ax`` argument.\n Note that unless matplotlib interative mode has previously been enabled\n by calling ``matplotlib.pyplot.ion()``, the plot window will not appear\n until the next time that ``matplotlib.pyplot.show()`` is called. So to\n arrange that a new window appears as soon as ``Image.plot()`` is\n called, do the following before the first call to ``Image.plot()``::\n\n import matplotlib.pyplot as plt\n plt.ion()\n\n Parameters\n ----------\n title : str\n An optional title for the figure (None by default).\n scale : 'linear' | 'log' | 'sqrt' | 'arcsinh'\n The stretch function to use mapping pixel values to\n colors (The default is 'linear'). The pixel values are\n first normalized to range from 0 for values <= vmin,\n to 1 for values >= vmax, then the stretch algorithm maps\n these normalized values, nv, to a position p from 0 to 1\n along the colorbar, as follows:\n linear: p = nv\n log: p = log(1000 * nv + 1) / log(1000 + 1)\n sqrt: p = sqrt(nv)\n arcsinh: p = arcsinh(10*nv) / arcsinh(10.0)\n vmin : float\n Pixels that have values <= vmin are given the color\n at the dark end of the color bar. Pixel values between\n vmin and vmax are given colors along the colorbar according\n to the mapping algorithm specified by the scale argument.\n vmax : float\n Pixels that have values >= vmax are given the color\n at the bright end of the color bar. If None, vmax is\n set to the maximum pixel value in the image.\n zscale : bool\n If True, vmin and vmax are automatically computed\n using the IRAF zscale algorithm.\n colorbar : str\n If 'h', a horizontal colorbar is drawn above the image.\n If 'v', a vertical colorbar is drawn to the right of the image.\n If None (the default), no colorbar is drawn.\n var : bool\n If true variance array is shown in place of data array\n ax : matplotlib.axes.Axes\n An optional Axes instance in which to draw the image,\n or None to have one created using ``matplotlib.pyplot.gca()``.\n unit : `astropy.units.Unit`\n The units to use for displaying world coordinates\n (degrees by default). In the interactive plot, when\n the mouse pointer is over a pixel in the image the\n coordinates of the pixel are shown using these units,\n along with the pixel value.\n use_wcs : bool\n If True, use `astropy.visualization.wcsaxes` to get axes\n with world coordinates.\n kwargs : matplotlib.artist.Artist\n Optional extra keyword/value arguments to be passed to\n the ``ax.imshow()`` function.\n\n Returns\n -------\n out : matplotlib AxesImage\n\n \"\"\"\n import matplotlib.pyplot as plt\n cax = None\n # Default X and Y axes are labeled in pixels.\n xlabel = 'q (pixel)'\n ylabel = 'p (pixel)'\n\n if ax is None:\n if use_wcs:\n ax = plt.subplot(projection=self.wcs.wcs)\n xlabel = 'ra'\n ylabel = 'dec'\n else:\n ax = plt.gca()\n elif use_wcs:\n self._logger.warning(\n 'use_wcs does not work when giving also an axis (ax)')\n\n if var:\n data_plot = self.var\n else:\n data_plot = self.data\n\n # If either axis has just one pixel, plot it as a line-graph.\n if self.shape[1] == 1:\n # Plot a column as a line-graph\n yaxis = np.arange(self.shape[0], dtype=float)\n ax.plot(yaxis, data_plot)\n xlabel = 'p (pixel)'\n ylabel = self.unit\n elif self.shape[0] == 1:\n # Plot a row as a line-graph\n xaxis = np.arange(self.shape[1], dtype=float)\n ax.plot(xaxis, data_plot.T)\n xlabel = 'q (pixel)'\n ylabel = self.unit\n else:\n # Plot a 2D image.\n\n # get image normalization\n norm = get_plot_norm(data_plot, vmin=vmin, vmax=vmax,\n zscale=zscale, scale=scale)\n\n # Display the image.\n cax = ax.imshow(data_plot, interpolation='nearest',\n origin='lower', norm=norm, **kwargs)\n\n # Create a colorbar\n import matplotlib.axes as maxes\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax)\n if colorbar == \"h\":\n cax2 = divider.append_axes(\"top\", size=\"5%\", pad=0.2,\n axes_class=maxes.Axes)\n cbar = plt.colorbar(cax, cax=cax2, orientation='horizontal')\n for t in cbar.ax.xaxis.get_major_ticks():\n t.tick1On = True\n t.tick2On = True\n t.label1On = False\n t.label2On = True\n elif colorbar == \"v\":\n cax2 = divider.append_axes(\"right\", size=\"5%\", pad=0.05,\n axes_class=maxes.Axes)\n plt.colorbar(cax, cax=cax2)\n\n # Keep the axis to allow other functions to overplot\n # the image with contours etc.\n self._ax = ax\n\n # Label the axes if requested.\n if show_xlabel:\n ax.set_xlabel(xlabel)\n if show_ylabel:\n ax.set_ylabel(ylabel)\n if title is not None:\n ax.set_title(title)\n\n # Change the way that plt.show() displays coordinates when the pointer\n # is over the image, such that world coordinates are displayed with the\n # specified unit, and pixel values are displayed with their native\n # units.\n ax.format_coord = FormatCoord(self, data_plot)\n self._unit = unit\n return cax\n\n def get_spatial_fmax(self, rot=None):\n \"\"\"Return the spatial-frequency band-limits of the image along\n the Y and X axes.\n\n See the documentation of set_spatial_fmax() for an explanation\n of what the band-limits are used for.\n\n If no band limits have been specified yet, this function has the\n side-effect of setting them to the band-limits dictated by the\n sampling interval of the image array. Specifically, an X axis\n with a sampling interval of dx can sample spatial frequencies of\n up to 0.5/dx cycles per unit of dx without aliasing.\n\n Parameters\n ----------\n rot : float or None\n Either None, to request band-limits that pertain to the\n Y and X axes of the current image without any rotation,\n or, if the band-limits pertain to a rotated version of\n the image, the rotation angle of its Y axis westward of north\n (degrees). This is defined such that if image.wcs.get_rot()\n is passed to this function, the band limits for the Y and\n X axes of the current image axes will be returned.\n\n Returns\n -------\n out : numpy.ndarray\n The spatial-frequency band-limits of the image along\n the Y and X axes of the image in cycles per self.wcs.unit.\n\n \"\"\"\n\n # If no image angle was provided, get the current rotation angle.\n\n if rot is None:\n rot = self.wcs.get_rot()\n\n # If no band-limits have been specified, initialize them to the\n # limits currently dictated by the sampling intervals of the image.\n\n if self._spflims is None:\n self.set_spatial_fmax(0.5 / self.get_step(), self.wcs.get_rot())\n\n # Return the frequency limits that pertain to the specified\n # rotation angle.\n\n return self._spflims.get_fmax(rot)\n\n def update_spatial_fmax(self, newfmax, rot=None):\n \"\"\"Update the spatial-frequency band-limits recorded for the\n current image.\n\n See the documentation of set_spatial_fmax() for an explanation\n of what the band-limits are used for.\n\n If either of the new limits is less than an existing\n band-limit, and the rotation angle of the new limits is\n the same as the angle of the recorded limits, then the smaller\n limits replace the originals.\n\n If either of the new limits is smaller than the existing\n limits, but the rotation angle for the new limits differs from\n the recorded limits, then both of the original limits are\n discarded and replaced by the new ones at the specified angle.\n\n Parameters\n ----------\n newfmax : numpy.ndarray\n The frequency limits along the Y and X axes, respectively,\n specified in cycles per the angular unit in self.wcs.unit.\n rot : float or None\n Either None, to specify band-limits that pertain to the Y\n and X axes of the current image without any rotation, or,\n if the band-limits pertain to a rotated version of the\n image, the rotation angle of its Y axis westward of north\n (degrees). This is defined such that if\n image.wcs.get_rot() is passed to this function, the\n band-limit newfmax[0] will be along the Y axis of the\n image and newfmax[1] will be along its X axis.\n\n \"\"\"\n\n # If no image rotation angle was specified, assume the\n # current angle.\n\n if rot is None:\n rot = self.wcs.get_rot()\n\n # If no band-limits have been set yet, record the new limits.\n\n if self._spflims is None:\n self.set_spatial_fmax(newfmax, rot)\n else:\n\n # Get the existing spatial-frequency band limits at the\n # specified angle.\n\n oldfmax = self._spflims.get_fmax(rot)\n\n # Are either of the new limits smaller than the old ones?\n\n if np.any(newfmax < oldfmax):\n\n # If the rotation angle of the recorded limits is the\n # same as the rotation angle of the new limits, keep\n # existing axis limits that are smaller than the new\n # limits.\n\n if np.isclose(rot, self._spflims.rot):\n newfmax = np.minimum(newfmax, oldfmax)\n\n # Record the new limits.\n\n self.set_spatial_fmax(newfmax, rot)\n\n def set_spatial_fmax(self, newfmax=None, rot=None):\n \"\"\"Specify the spatial-frequency band-limits of the image along\n the Y and X axis. This function completely replaces any existing\n band-limits. See also update_spatial_fmax().\n\n The recorded limits are used to avoid redundantly performing\n anti-aliasing measures such as low-pass filtering an image\n before resampling to a lower resolution, or decreasing pixel\n sizes before rotating high resolution axes onto low resolution\n axes.\n\n Parameters\n ----------\n newfmax : numpy.ndarray\n The new frequency limits along the Y and X axes or a\n band-limiting ellipse, specified in cycles per the angular\n unit in self.wcs.unit.\n rot : float or None\n Either None, to specify band-limits that pertain to the Y\n and X axes of the current image without any rotation, or,\n if the band-limits pertain to a rotated version of the\n image, the rotation angle of its Y axis westward of north\n (degrees). This is defined such that if\n image.wcs.get_rot() is passed to this function, the\n band-limit newfmax[0] will be along the Y axis of the\n image and newfmax[1] will be along its X axis.\n\n \"\"\"\n\n if rot is None:\n rot = self.wcs.get_rot()\n self._spflims = SpatialFrequencyLimits(newfmax, rot)\n\n\ndef gauss_image(shape=(101, 101), wcs=None, factor=1, gauss=None,\n center=None, flux=1., fwhm=(1., 1.), peak=False, rot=0.,\n cont=0, unit_center=u.deg, unit_fwhm=u.arcsec,\n unit=u.dimensionless_unscaled):\n \"\"\"Create a new image from a 2D gaussian.\n\n Parameters\n ----------\n shape : int or (int,int)\n Lengths of the image in Y and X with python notation: (ny,nx).\n (101,101) by default. If wcs object contains dimensions, shape is\n ignored and wcs dimensions are used.\n wcs : `mpdaf.obj.WCS`\n World coordinates.\n factor : int\n If factor<=1, gaussian value is computed in the center of each pixel.\n If factor>1, for each pixel, gaussian value is the sum of the gaussian\n values on the factor*factor pixels divided by the pixel area.\n gauss : `mpdaf.obj.Gauss2D`\n Object that contains all Gaussian parameters. If it is present, the\n following parameters are not used.\n center : (float,float)\n Gaussian center (y_peak, x_peak). If None the center of the image is\n used. The unit is given by the unit_center parameter (degrees by\n default).\n flux : float\n Integrated gaussian flux or gaussian peak value if peak is True.\n fwhm : (float,float)\n Gaussian fwhm (fwhm_y,fwhm_x).\n The unit is given by the unit_fwhm parameter (arcseconds by default).\n peak : bool\n If true, flux contains a gaussian peak value.\n rot : float\n Angle position in degree.\n cont : float\n Continuum value. 0 by default.\n unit_center : `astropy.units.Unit`\n type of the center and position coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_fwhm : `astropy.units.Unit`\n FWHM unit. Arcseconds by default (use None for radius in pixels)\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n if is_int(shape):\n shape = (shape, shape)\n shape = np.array(shape)\n\n wcs = wcs or WCS()\n if wcs.naxis1 == 1. and wcs.naxis2 == 1.:\n wcs.naxis1 = shape[1]\n wcs.naxis2 = shape[0]\n else:\n if wcs.naxis1 != 0. or wcs.naxis2 != 0.:\n shape[1] = wcs.naxis1\n shape[0] = wcs.naxis2\n\n if gauss is not None:\n center = gauss.center\n flux = gauss.flux\n fwhm = gauss.fwhm\n peak = False\n rot = gauss.rot\n cont = gauss.cont\n\n if center is None:\n center = (np.array(shape) - 1) / 2.0\n else:\n if unit_center is not None:\n center = wcs.sky2pix(center, unit=unit_center)[0]\n\n if unit_fwhm is not None:\n fwhm = np.array(fwhm) / wcs.get_step(unit=unit_fwhm)\n\n # data = np.empty(shape=shape, dtype=float)\n\n if fwhm[1] == 0 or fwhm[0] == 0:\n raise ValueError('fwhm equal to 0')\n p_width = fwhm[0] * gaussian_fwhm_to_sigma\n q_width = fwhm[1] * gaussian_fwhm_to_sigma\n\n # rotation angle in rad\n theta = np.pi * rot / 180.0\n\n if peak is True:\n norm = flux * 2 * np.pi * p_width * q_width\n else:\n norm = flux\n\n def gauss(p, q):\n cost = np.cos(theta)\n sint = np.sin(theta)\n xdiff = p - center[0]\n ydiff = q - center[1]\n return (\n norm / (2 * np.pi * p_width * q_width) *\n np.exp(-(xdiff * cost - ydiff * sint) ** 2 / (2 * p_width ** 2)) *\n np.exp(-(xdiff * sint + ydiff * cost) ** 2 / (2 * q_width ** 2))\n )\n\n if factor > 1:\n if rot == 0:\n from scipy import special\n\n X, Y = np.meshgrid(range(shape[0]), range(shape[1]))\n pixcrd_min = np.array(list(zip(X.ravel(), Y.ravel()))) - 0.5\n # pixsky_min = wcs.pix2sky(pixcrd)\n xmin = (pixcrd_min[:, 1] - center[1]) / np.sqrt(2.0) / q_width\n ymin = (pixcrd_min[:, 0] - center[0]) / np.sqrt(2.0) / p_width\n\n pixcrd_max = np.array(list(zip(X.ravel(), Y.ravel()))) + 0.5\n # pixsky_max = wcs.pix2sky(pixcrd)\n xmax = (pixcrd_max[:, 1] - center[1]) / np.sqrt(2.0) / q_width\n ymax = (pixcrd_max[:, 0] - center[0]) / np.sqrt(2.0) / p_width\n\n dx = pixcrd_max[:, 1] - pixcrd_min[:, 1]\n dy = pixcrd_max[:, 0] - pixcrd_min[:, 0]\n data = norm * 0.25 / dx / dy \\\n * (special.erf(xmax) - special.erf(xmin)) \\\n * (special.erf(ymax) - special.erf(ymin))\n data = np.reshape(data, (shape[1], shape[0])).T\n else:\n yy, xx = np.mgrid[:shape[0] * factor, :shape[1] * factor] / factor\n data = gauss(yy, xx)\n data = data.reshape(shape[0], 2, shape[1], 2).sum(axis=(1, 3))\n data /= factor ** 2\n else:\n yy, xx = np.mgrid[:shape[0], :shape[1]]\n data = gauss(yy, xx)\n\n return Image(data=data + cont, wcs=wcs, unit=unit, copy=False, dtype=None)\n\n\ndef moffat_image(shape=(101, 101), wcs=None, factor=1, moffat=None,\n center=None, flux=1., fwhm=(1., 1.), peak=False, n=2,\n rot=0., cont=0, unit_center=u.deg, unit_fwhm=u.arcsec,\n unit=u.dimensionless_unscaled):\n \"\"\"Create a new image from a 2D Moffat function.\n\n Parameters\n ----------\n shape : int or (int,int)\n Lengths of the image in Y and X with python notation: (ny,nx).\n (101,101) by default. If wcs object contains dimensions, shape is\n ignored and wcs dimensions are used.\n wcs : `mpdaf.obj.WCS`\n World coordinates.\n factor : int\n If factor<=1, moffat value is computed in the center of each pixel.\n If factor>1, for each pixel, moffat value is the sum\n of the moffat values on the factor*factor pixels divided\n by the pixel area.\n moffat : `mpdaf.obj.Moffat2D`\n object that contains all moffat parameters.\n If it is present, following parameters are not used.\n center : (float,float)\n Peak center (x_peak, y_peak). The unit is genven byt the parameter\n unit_center (degrees by default). If None the center of the image is\n used.\n flux : float\n Integrated gaussian flux or gaussian peak value\n if peak is True.\n fwhm : (float,float)\n Gaussian fwhm (fwhm_y,fwhm_x).\n The unit is given by the parameter unit_fwhm (arcseconds by default)\n peak : bool\n If true, flux contains a gaussian peak value.\n n : int\n Atmospheric scattering coefficient. 2 by default.\n rot : float\n Angle position in degree.\n cont : float\n Continuum value. 0 by default.\n unit_center : `astropy.units.Unit`\n type of the center and position coordinates.\n Degrees by default (use None for coordinates in pixels).\n unit_fwhm : `astropy.units.Unit`\n FWHM unit. Arcseconds by default (use None for radius in pixels)\n\n Returns\n -------\n out : `~mpdaf.obj.Image`\n\n \"\"\"\n n = float(n)\n if is_int(shape):\n shape = (shape, shape)\n shape = np.array(shape)\n\n wcs = wcs or WCS()\n if wcs.naxis1 == 1. and wcs.naxis2 == 1.:\n wcs.naxis1 = shape[1]\n wcs.naxis2 = shape[0]\n else:\n if wcs.naxis1 != 0. or wcs.naxis2 != 0.:\n shape[1] = wcs.naxis1\n shape[0] = wcs.naxis2\n\n if moffat is not None:\n center = moffat.center\n flux = moffat.flux\n fwhm = moffat.fwhm\n peak = False\n n = moffat.n\n rot = moffat.rot\n cont = moffat.cont\n\n fwhm = np.array(fwhm)\n a = fwhm[0] / (2 * np.sqrt(2 ** (1.0 / n) - 1.0))\n e = fwhm[1] / fwhm[0]\n\n if unit_fwhm is not None:\n a = a / wcs.get_step(unit=unit_fwhm)[0]\n\n if peak:\n norm = flux\n else:\n norm = flux * (n - 1) / (np.pi * a * a * e)\n\n if center is None:\n center = np.array([(shape[0] - 1) / 2.0, (shape[1] - 1) / 2.0])\n else:\n if unit_center is not None:\n center = wcs.sky2pix(center, unit=unit_center)[0]\n\n # rotation angle in rad\n theta = np.pi * rot / 180.0\n\n def moffat(p, q):\n cost = np.cos(theta)\n sint = np.sin(theta)\n xdiff = p - center[0]\n ydiff = q - center[1]\n return (\n norm * (1 +\n ((xdiff * cost - ydiff * sint) / a) ** 2 +\n ((xdiff * sint + ydiff * cost) / a / e) ** 2) ** (-n)\n )\n\n if factor > 1:\n X, Y = np.meshgrid(range(shape[0] * factor),\n range(shape[1] * factor))\n factor = float(factor)\n pixcrd = np.array(list(zip(X.ravel() / factor, Y.ravel() / factor)))\n data = moffat(pixcrd[:, 0], pixcrd[:, 1])\n data = (data.reshape(shape[1], factor, shape[0], factor)\n .sum(1).sum(2) / factor / factor).T\n else:\n yy, xx = np.mgrid[:shape[0], :shape[1]]\n data = moffat(yy, xx)\n\n return Image(data=data + cont, wcs=wcs, unit=unit, copy=False, dtype=None)\n\n\ndef _antialias_filter_image(data, oldstep, newstep, oldfmax=None,\n window=\"blackman\"):\n \"\"\"Apply an anti-aliasing prefilter to an image to prepare\n it for subsampling.\n\n Parameters\n ----------\n data : np.ndimage\n The 2D image to be filtered.\n oldstep: float or (float, float)\n The cell size of the input image. This can be a single\n number for both the X and Y axes, or it can be two\n numbers in an iterable, ordered like (ystep,xstep)\n newstep: float or (float, float)\n The cell size of the output image. This can be a single\n number for both the X and Y axes, or it can be two\n numbers in an iterable, ordered like (ystep,xstep)\n oldfmax : float,float or None\n When an image has previously been filtered, this\n argument can be used to indicate the frequency cutoffs\n that were applied at that time along the Y and X axes,\n respectively, in units of cycles per the unit of oldstep\n and newstep. Image axes that have already been sufficiently\n filtered will then not be refiltered redundantly. If no\n band-limits have previously been established, pass this\n argument as None.\n window : str\n The type of window function to use to filter the\n FFT, chosen from:\n\n blackman\n This window suppresses ringing better than any other\n window, at the expense of lowered image resolution. In\n the image plane, the PSF of this window is\n approximately gaussian, with a standard deviation of\n around 0.96*newstep, and a FWHM of about 2.3*newstep.\n\n gaussian\n A truncated gaussian window. This has a smaller PSF\n than the blackman window, however gaussians never fall\n to zero, so either significant ringing will be seen due\n to truncation of the gaussian, or low-level aliasing\n will occur, depending on the spatial frequency coverage\n of the image beyond the folding frequency. It can be a\n good choice for images that only contain smoothly\n varying features. It is equivalent to a convolution of\n the image with both an airy profile and a gaussian of\n standard deviation 0.724*newstep (FWHM 1.704*newstep).\n\n rectangle\n This window simply zeros all spatial frequencies above\n the highest that can be correctly sampled by the new\n pixel size. This gives the best resolution of any of\n the windows, but this is marred by the strong sidelobes\n of the resulting airy-profile, especially near bright\n point sources and CCD saturation lines.\n\n Returns\n -------\n out : numpy.ndarray, numpy.ndarray\n The filtered version of the 2D input image, followed by\n a 2-element array that contains the new band-limits\n along the Y and X axes, respectively.\n\n \"\"\"\n\n # Convert oldstep into a numpy array of two float elements.\n if is_number(oldstep):\n oldstep = (oldstep, oldstep)\n oldstep = abs(np.asarray(oldstep, dtype=float))\n\n # Convert newstep into a numpy array of two float elements.\n if is_number(newstep):\n newstep = (newstep, newstep)\n newstep = abs(np.asarray(newstep, dtype=float))\n\n # If no band-limits have been specified, substitute the\n # band-limits dictated by the current sampling interval.\n if oldfmax is None:\n oldfmax = 0.5 / oldstep\n else:\n oldfmax = np.minimum(oldfmax, 0.5 / oldstep)\n\n # Calculate the maximum frequencies that will be sampled by\n # the new pixel sizes along the Y and X axes.\n newfmax = 0.5 / newstep\n\n # Which axes need to be filtered?\n filter_axes = newfmax < oldfmax\n\n # Return the original image if neither axis needs filtering.\n if np.all(np.logical_not(filter_axes)):\n return data, oldfmax\n\n # Get the extent of the input image as a pair of slices.\n image_slice = (slice(0, data.shape[0]), slice(0, data.shape[1]))\n\n # FFT algorithms can be extremely slow for arrays whose\n # dimensions are not powers of 2. The conventional way to avoid this\n # is to copy the image into a new array whose dimensions\n # are powers of 2, and fill the extra pixels with zeros.\n\n shape = 2**(np.ceil(np.log(np.asarray(data.shape)) /\n np.log(2.0))).astype(int)\n if data.shape[0] != shape[0] or data.shape[1] != shape[1]:\n tmp = np.zeros(shape)\n tmp[image_slice] = data\n data = tmp\n\n # Get the new dimensions of the zero-padded image.\n ny, nx = shape\n\n # Obtain the FFT of the image.\n fft = np.fft.rfft2(data)\n del data\n\n # The new pixel sizes along the X and Y axes can only correctly\n # sample spatial frequencies up to the values in newfmax. Set the\n # cutoff frequencies for the window functions along the x and y\n # axes to those frequencies.\n fycut, fxcut = newfmax\n\n # Create an array which, for each pixel in the FFT image, holds\n # the radial spatial-frequency of the pixel center, divided by\n # the cutoff frequency. These values will later be used to index\n # the 1D window-function.\n\n wr = np.sqrt((np.fft.rfftfreq(nx, oldstep[1]) / fxcut)**2 +\n (np.fft.fftfreq(ny, oldstep[0]) / fycut)[np.newaxis, :].T**2)\n\n # Get the requested window function as a function of frequency\n # divided by its cutoff frequency.\n\n if window is None or window == \"blackman\":\n winfn = lambda r: np.where(r <= 1.0,\n 0.42 + 0.5 * np.cos(np.pi * r) +\n 0.08 * np.cos(2 * np.pi * r),\n 0.0)\n\n # For the gaussian window the standard deviation, sigma, is\n # as a fraction of the normalized cutoff frequency. Note that\n # in the image plane the corresponding gaussian standard\n # deviation should be newstep/(pi*sigma).\n\n elif window == \"gaussian\":\n sigma = 0.44\n winfn = lambda r: np.exp(-0.5 * (r / sigma)**2)\n\n # For the rectangular window, just multiply all pixels below the\n # cutoff frequency by one, and the rest by zero.\n\n elif window == \"rectangle\":\n winfn = lambda r: np.where(r <= 1.0, 1.0, 0.0)\n\n # Apply the window function to the FFT to remove frequencies above the\n # cutoff frequencies.\n\n fft *= winfn(wr)\n del wr\n\n # Perform an inverse Fourier transform to get the filtered image\n data = np.fft.irfft2(fft)\n del fft\n\n # Crop the antialiased image to remove the zero-padded pixels, and\n # return this along with the new spatial-frequency limits.\n return data[image_slice], np.where(filter_axes, newfmax, oldfmax)\n\n\ndef _find_quadratic_peak(y):\n \"\"\"Given an array of 3 numbers in which the first and last numbers are\n less than the central number, determine the array index at which a\n quadratic curve through the 3 points reaches its peak value.\n\n Parameters\n ----------\n y : float,float,float\n The values of the curve at x=0,1,2 respectively. Note that y[1]\n must be greater than both y[0] and y[2]. Otherwise +/- infinity\n will be returned.\n\n Returns\n -------\n xpeak : float\n The floating point array index of the peak of the quadratic. This\n will always be in the range 0.0 to 2.0, provided that y[0]<y[1] and\n y[2]<y[1].\n\n \"\"\"\n\n # Given the three equations:\n #\n # a * x0**2 + b * x0 + c = y0\n # a * x1**2 + b * x1 + c = y1\n # a * x2**2 + b * x2 + c = y2\n #\n # a, b, and c are given by:\n #\n # a = 0.5 * y0 - y1 + 0.5 * y2\n # b = -1.5 * y0 + 2.0 * y1 - 0.5 * y2\n # c = y0\n\n a = 0.5 * y[0] - y[1] + 0.5 * y[2]\n b = -1.5 * y[0] + 2.0 * y[1] - 0.5 * y[2]\n\n # Quadratic curves peak at: x = -b / (2*a)\n return -b / (2 * a)\n\n\nclass SpatialFrequencyLimits:\n\n \"\"\"Allow to keep track of the spatial frequency limits of an image.\n\n Such that before resampling an image it can see if anything needs to be\n done to avoid undersampling and generating aliasing artefacts in the output\n image.\n\n The band-limits are recorded as an ellipse. Most telescopes have circularly\n symmetric PSFs and thus circularly symmetric spatial-frequency band limits,\n but this spatial-frequency profile may become elliptical if an image is\n resampled to have a lower resolution along one axis.\n\n The ellipse is defined in its own X,Y coordinate system as follows::\n\n xe(t)=xs*cos(t)\n ye(t)=ys*sin(t)\n\n The ye axis of the ellipse is at self.rot degrees west of north in the\n image. For the Y axis of a coordinate system where Y is rot degrees west of\n north, the ellipse thus has to be rotated by ``psi = (rot - self.rot)``\n degrees anticlockwise to calculate the X and Y values of the ellipse in\n that coordinate system::\n\n |x(t)| = |cos(psi), -sin(psi)| |xe(t)|\n |y(t)| |sin(psi), cos(psi)| |ye(t)|\n\n Parameters\n ----------\n fmax : float, float\n The frequency limits along the Y-axis and X-axis of an\n elliptically shaped band-limit (eg. cycles per degree).\n rot : float\n The rotation angle of the Y axis of the ellipse westward of\n north (degrees). This is defined such that if\n image.wcs.get_rot() is passed to this function, the Y axis of\n the ellipse will be aligned with the Y axis of the image.\n\n \"\"\"\n\n def __init__(self, fmax, rot):\n # Store the Y and X axes of the band-limiting ellipse.\n self.fmax = np.array(fmax, dtype=float, copy=True)\n\n # Record the rotation angle in degrees of the ellipse, after\n # wrapping the angle into the range -180 to 180, to make it\n # easy to compare with angles returned by wcs.get_rot().\n self.rot = float(rot - 360.0 * np.floor(rot / 360.0 + 0.5))\n\n def deepcopy(self):\n return SpatialFrequencyLimits(self.fmax, self.rot)\n\n def get_fmax(self, rot):\n \"\"\"Return the spatial-frequency band-limits along a Y axis that is\n 'rot' degrees west of north, and an X axis that is 90 degrees\n away from this Y axis in the sense of a rotation from north to east.\n\n Parameters\n ----------\n rot : float\n The angle of the target Y axis west of north (degrees).\n\n Returns\n -------\n out : numpy.ndarray\n The maximum spatial frequencies along the Y and X axes at\n rotation angle rot, in the same units as were used to\n initialize the object.\n\n \"\"\"\n\n # Extract the Y and X axis radii of the ellipse.\n ys, xs = self.fmax\n\n # Compute the rotation angle of the ellipse in radians.\n psi = np.deg2rad(rot - self.rot)\n\n # Precalculate sin and cos of the ellipse rotation angle.\n cos_psi = np.cos(psi)\n sin_psi = np.sin(psi)\n\n # Calculate the ellipse phases where the X and Y coordinates\n # of the ellipse locus are maximized. These equations come from\n # calculating d[x(t)]/dt=0 and d[y(t)]/dt=0 using the definitions\n # of x(t) and y(t) that are shown in the class documentation.\n t_xmax = np.arctan2(-ys * sin_psi, xs * cos_psi)\n t_ymax = np.arctan2(ys * cos_psi, xs * sin_psi)\n\n # Get the maximum X and Y coordinates of the rotated ellipse.\n xmax = xs * np.cos(t_xmax) * cos_psi - ys * np.sin(t_xmax) * sin_psi\n ymax = xs * np.cos(t_ymax) * sin_psi + ys * np.sin(t_ymax) * cos_psi\n\n return np.array([ymax, xmax], dtype=float)\n\n def ellipse_locus(self, t, rot):\n \"\"\"Return the Y,X coordinates of the band-limiting ellipse\n at ellipse phase t.\n\n Parameters\n ----------\n t : float\n The elliptical phase at which the calculate the\n coordinates (radians).\n rot : float\n The rotation angle of the Y axis of the ellipse west\n of north (degrees).\n\n Returns\n -------\n out : numpy.ndarray\n The Y and X coordinates of the band-limiting ellipse.\n \"\"\"\n\n # Extract the Y and X axis radii of the ellipse.\n ys, xs = self.fmax\n\n # Compute the rotation angle of the ellipse in radians.\n psi = np.deg2rad(rot - self.rot)\n\n # Precalculate sin and cos of the ellipse rotation angle.\n cos_psi = np.cos(psi)\n sin_psi = np.sin(psi)\n\n # Precalculate sin and cos of the phase of the ellipse.\n cos_t = np.cos(t)\n sin_t = np.sin(t)\n\n # Calculate the locus of the ellipse at phase t, using\n # the equations shown in the class documentation.\n x = xs * cos_t * cos_psi - ys * sin_t * sin_psi\n y = xs * cos_t * sin_psi + ys * sin_t * cos_psi\n\n return np.array([y, x], dtype=float)\n"
] |
[
[
"numpy.arange",
"numpy.max",
"numpy.unique"
],
[
"numpy.dot",
"scipy.ndimage.binary_erosion",
"numpy.minimum",
"numpy.radians",
"numpy.sqrt",
"numpy.asarray",
"numpy.rad2deg",
"numpy.arctan2",
"numpy.max",
"scipy.signal.correlate2d",
"numpy.zeros_like",
"numpy.any",
"scipy.interpolate.griddata",
"numpy.ma.array",
"scipy.ndimage.binary_fill_holes",
"numpy.exp",
"numpy.where",
"matplotlib.pyplot.gca",
"numpy.reshape",
"numpy.arange",
"scipy.ndimage.generate_binary_structure",
"numpy.fft.rfftfreq",
"scipy.ndimage.median_filter",
"numpy.ma.mask_or",
"numpy.sin",
"scipy.optimize.leastsq",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"scipy.special.erf",
"numpy.zeros",
"numpy.isclose",
"numpy.logical_not",
"numpy.log",
"scipy.ndimage.measurements.maximum_position",
"scipy.signal.fftconvolve",
"numpy.min",
"numpy.linalg.inv",
"matplotlib.path.Path",
"scipy.ndimage.measurements.label",
"numpy.ma.median",
"numpy.logical_or",
"numpy.deg2rad",
"numpy.fft.rfft2",
"numpy.floor",
"scipy.ndimage.binary_dilation",
"numpy.fft.irfft2",
"numpy.fft.fftfreq",
"numpy.array",
"scipy.ndimage.grey_dilation",
"numpy.ma.filled",
"scipy.ndimage.gaussian_filter",
"numpy.abs",
"scipy.interpolate.bisplrep",
"scipy.interpolate.bisplev",
"numpy.cos",
"numpy.indices",
"numpy.ones",
"numpy.sign",
"matplotlib.pyplot.colorbar",
"numpy.random.normal",
"numpy.shape",
"numpy.isscalar",
"scipy.ndimage.measurements.find_objects",
"numpy.empty"
]
] |
CVC-TDA-ADRE/MonoDEVSNet
|
[
"25e2f7cd28909f933eb2f8dd7db9046dc7237635"
] |
[
"datasets_EXT/vk_dataset.py"
] |
[
"# Author: Akhil Gurram\n# Build on top of the monodepth2\n# (Automatically pulled from git repo, monodepth2 source code is not included in this repository)\n# This is the training script of the MonoDEVSNet framework.\n# MonoDEVSNet: Monocular Depth Estimation through Virtual-world Supervision and Real-world SfM Self-Supervision\n# https://arxiv.org/abs/2103.12209\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport random\nfrom abc import ABC\n\nimport kornia\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torch.nn import functional as F\nfrom torchvision import transforms\n\nfrom monodepth2.datasets.mono_dataset import pil_loader\n\n\nclass DepthDataset(data.Dataset):\n \"\"\"Superclass for monocular dataloaders\n\n Args:\n opt\n csv_file_path\n num_scales\n is_train\n \"\"\"\n\n def __init__(self,\n opt,\n csv_file_path,\n frame_ids,\n num_scales,\n subset='a0',\n is_train=False):\n super(DepthDataset, self).__init__()\n\n def shortlist_filenames():\n\n angles = []\n if self.ax == 'aall':\n angles = [0, 45, 90, 135, 180, 225, 270, 315, 360]\n elif self.ax == 'a0':\n angles = [0, 180]\n elif self.ax == 'a1':\n angles = [45, 135]\n elif self.ax == 'a2':\n angles = [90, 270]\n elif self.ax == 'a3':\n angles = [225, 315]\n else:\n print('choose subset among: aall, a0, a1, a2, a3')\n\n self.mono_or_stereo = 'mono' # opt.training_cam_type\n try:\n training_cam_type = 'mono' # opt.training_cam_type\n testing_cam_type = 'mono' # opt.testing_cam_type\n\n list_of_angles_indexes = self.filenames['angle'].to_numpy()\n list_of_cam_type_indexes = self.filenames['mono_or_stereo'].to_numpy()\n list_of_train_or_val_indexes = self.filenames['train_or_val'].to_numpy()\n list_of_valid_or_not_indexes = self.filenames['valid_or_not'].to_numpy()\n list_of_depth_init_indexes = self.filenames['depth_starting'].to_numpy()\n list_subset_ax = self.filenames['ax'].to_numpy()\n\n if is_train:\n train_or_val = 'train'\n cam_type = training_cam_type\n self.mono_or_stereo = training_cam_type\n else:\n train_or_val = 'val'\n cam_type = testing_cam_type\n self.mono_or_stereo = testing_cam_type\n\n # based on angles\n set_indexes_angle = []\n for local_angle in angles:\n set_indexes_angle = set_indexes_angle + np.where(list_of_angles_indexes == local_angle)[0].tolist()\n\n # based on cam type\n set_indexes_mono_or_stereo = np.where(list_of_cam_type_indexes == cam_type)[0].tolist()\n if 'mono' in training_cam_type: # if its mono use stereo images also\n set_indexes_mono_or_stereo = set_indexes_mono_or_stereo + \\\n np.where(list_of_cam_type_indexes == 'stereo')[0].tolist()\n\n # based on training or val set\n set_indexes_train_or_val = np.where(list_of_train_or_val_indexes == train_or_val)[0].tolist()\n\n # based on valid or not columns info :: valid = 1 else 0\n set_indexes_valid_or_not = np.where(list_of_valid_or_not_indexes == 1)[0].tolist()\n\n # based on depth values should start from 3 meters as minimum\n set_indexes_depth_init = np.where(list_of_depth_init_indexes > self.depth_init)[0].tolist()\n\n # based on depth values should start from 3 meters as minimum\n set_indexes_ax = []\n if self.ax == 'aall':\n for ax_ in ['a0', 'a1', 'a2', 'a3', 'aall']:\n set_indexes_ax += np.where(list_subset_ax == ax_)[0].tolist()\n else:\n set_indexes_ax = np.where(list_subset_ax == self.ax)[0].tolist()\n\n # find common intersection of conditions nodes\n set_indexes_shortlisted = list(set(set_indexes_angle) &\n set(set_indexes_mono_or_stereo) &\n set(set_indexes_train_or_val) &\n set(set_indexes_valid_or_not) &\n set(set_indexes_depth_init) &\n set(set_indexes_ax))\n\n self.filenames = self.filenames.copy().reindex(set_indexes_shortlisted).reset_index()\n\n except Exception as e_not_:\n print(e_not_)\n self.filenames = self.filenames.copy()\n\n if opt.total_number_of_images_for_training == -1:\n self.filenames = self.filenames.copy()\n else:\n self.filenames = self.filenames[:opt.total_number_of_images_for_training]\n\n return self.filenames\n\n self.num_scales = num_scales\n self.is_train = is_train\n self.frame_ids = frame_ids\n\n self.opt = opt\n self.dataset_name = self.opt.syn_dataset\n if self.opt.syn_data_path is not None:\n self.data_path = self.opt.syn_data_path\n else:\n self.data_path = '/mnt/ssd1/Datasets'\n self.n_class = self.opt.n_class\n self.height = self.opt.height\n self.width = self.opt.width\n self.loader = pil_loader\n self.to_tensor = transforms.ToTensor()\n self.do_flip = self.opt.do_flip\n self.do_color_aug = self.opt.do_color_aug\n\n # We need to specify augmentations differently in newer versions of torchvision.\n # We first try the newer tuple version; if this fails we fall back to scalars\n try:\n self.brightness = (0.8, 1.2)\n self.contrast = (0.8, 1.2)\n self.saturation = (0.8, 1.2)\n self.hue = (-0.1, 0.1)\n transforms.ColorJitter(\n self.brightness, self.contrast, self.saturation, self.hue)\n except TypeError:\n self.brightness = 0.2\n self.contrast = 0.2\n self.saturation = 0.2\n self.hue = 0.1\n\n self.resize = {}\n for i in range(self.num_scales):\n s = 2 ** i\n self.resize[i] = transforms.Resize((self.height // s, self.width // s), interpolation=Image.ANTIALIAS)\n\n # from virtual KITTI dataset\n self.debug = False\n self.filenames = pd.read_csv(csv_file_path, delimiter=',')\n self.depth_init = self.opt.depth_init\n self.ax = subset\n\n self.filenames = shortlist_filenames()\n self.total_num_ims = self.filenames.shape[0]\n\n # iter, rgb, depth, Segmentation, street_address, weather, view, angle, dataset\n self.iter = 0\n self.street_address = ''\n self.weather = ''\n self.view = ''\n self.angle = ''\n self.shape = np.array(Image.open(os.path.join(self.data_path, self.filenames['rgb_l'][0]))).shape[:2]\n self.load_depth = True\n self.load_segm = True\n\n def preprocess(self, inputs, color_aug):\n \"\"\"Resize colour images to the required scales and augment if required\n\n We create the color_aug object in advance and apply the same augmentation to all\n images in this item. This ensures that all images input to the pose network receive the\n same augmentation.\n \"\"\"\n for k in list(inputs):\n if \"color\" in k:\n n, im, i = k\n for i in range(self.num_scales):\n inputs[(n, im, i)] = self.resize[i](inputs[(n, im, i - 1)])\n\n for k in list(inputs):\n f = inputs[k]\n if \"color\" in k:\n n, im, i = k\n inputs[(n, im, i)] = self.to_tensor(f)\n inputs[(n + \"_aug\", im, i)] = self.to_tensor(color_aug(f))\n\n def __len__(self):\n return len(self.filenames)\n\n def __getitem__(self, index):\n \"\"\"Returns a single training item from the dataset as a dictionary.\n\n Values correspond to torch tensors.\n Keys in the dictionary are either strings or tuples:\n\n (\"color\", <frame_id>, <scale>) for raw colour images,\n (\"color_aug\", <frame_id>, <scale>) for augmented colour images,\n (\"K\", scale) or (\"inv_K\", scale) for camera intrinsics,\n \"stereo_T\" for camera extrinsics, and\n \"depth_gt\" for ground truth depth maps.\n\n <frame_id> is either:\n an integer (e.g. 0, -1, or 1) representing the temporal step relative to 'index',\n or\n \"s\" for the opposite image in the stereo pair.\n\n <scale> is an integer representing the scale of the image relative to the fullsize image:\n -1 images at native resolution as loaded from disk\n 0 images resized to (self.width, self.height )\n 1 images resized to (self.width // 2, self.height // 2)\n 2 images resized to (self.width // 4, self.height // 4)\n 3 images resized to (self.width // 8, self.height // 8)\n \"\"\"\n inputs = {\"syn_or_real\": \"syn\"}\n\n do_color_aug = self.is_train and self.do_color_aug and random.random() > 0.5\n do_flip = self.is_train and self.do_flip and random.random() > 0.5\n\n file_path = self.filenames['rgb_l'][index]\n side = self.filenames['l_or_r'][index]\n for i in self.frame_ids:\n if i == \"s\":\n other_side = {\"r\": [\"/Right/\", \"/Left/\"], \"l\": [\"/Left/\", \"/Right/\"]}[side]\n file_path = file_path.replace(other_side[0], other_side[1])\n inputs[(\"color\", i, -1)] = self.get_color(file_path, do_flip)\n\n # adjusting intrinsics to match each scale in the pyramid\n for scale in range(self.num_scales):\n K = self.K.copy()\n\n K[0, :] *= self.width // (2 ** scale)\n K[1, :] *= self.height // (2 ** scale)\n\n inv_K = np.linalg.pinv(K)\n\n inputs[(\"K\", scale)] = torch.from_numpy(K)\n inputs[(\"inv_K\", scale)] = torch.from_numpy(inv_K)\n\n if do_color_aug:\n color_aug = transforms.ColorJitter(\n self.brightness, self.contrast, self.saturation, self.hue)\n else:\n color_aug = (lambda x: x)\n\n self.preprocess(inputs, color_aug)\n\n for i in self.frame_ids:\n del inputs[(\"color\", i, -1)]\n del inputs[(\"color_aug\", i, -1)]\n\n if self.load_depth:\n file_path = self.filenames['depth_l'][index]\n depth_gt = self.get_depth(file_path, do_flip)\n inputs[\"depth_gt\"] = np.expand_dims(depth_gt, 0)\n inputs[\"depth_gt\"] = F.interpolate(\n torch.from_numpy(inputs[\"depth_gt\"].astype(np.float32)).unsqueeze(0),\n (self.height, self.width)).squeeze(0)\n\n if self.load_segm:\n file_path = self.filenames['segm_l'][index]\n segm_gt = self.get_segm(file_path, do_flip)\n if self.n_class == 2:\n mask_segm = np.ones(segm_gt.shape, dtype=np.int32)\n mask_segm[segm_gt == 8] = 0\n mask_segm[segm_gt == 9] = 0\n mask_segm[segm_gt == 10] = 0\n mask_segm[segm_gt == 11] = 0\n inputs[\"segm_gt\", 0, 0] = np.expand_dims(mask_segm, 0)\n else:\n inputs[\"segm_gt\", 0, 0] = np.expand_dims(segm_gt, 0)\n\n inputs[\"segm_gt\", 0, 0] = F.interpolate(\n torch.from_numpy(inputs[\"segm_gt\", 0, 0].astype(np.float32)).unsqueeze(0),\n (self.height, self.width)).long().squeeze(0)\n\n # semantic edges\n edges = kornia.laplacian(inputs[\"segm_gt\", 0, 0].unsqueeze(0).float(), kernel_size=5).squeeze(0)\n inputs[(\"segm_edges\", 0, 0)] = (edges[0] > 0.1).long()\n\n if \"s\" in self.frame_ids:\n stereo_T = np.eye(4, dtype=np.float32)\n baseline_sign = -1 if do_flip else 1\n side_sign = -1 if side == \"l\" else 1\n stereo_T[0, 3] = side_sign * baseline_sign * 0.1\n\n inputs[\"stereo_T\"] = torch.from_numpy(stereo_T)\n\n edges = kornia.laplacian(inputs[\"color_aug\", 0, 0].unsqueeze(0).float(), kernel_size=5)\n edges = edges / edges.max()\n inputs[(\"edges\", 0, 0)] = (edges[0, 0] > 0.1).long()\n\n return inputs\n\n def get_color(self, file_path, do_flip):\n raise NotImplementedError\n\n def get_depth(self, file_path, do_flip):\n raise NotImplementedError\n\n def get_segm(self, file_path, do_flip):\n raise NotImplementedError\n\n def check_depth(self):\n raise NotImplementedError\n\n\nclass VK1Dataset(DepthDataset, ABC):\n \"\"\"Superclass for different types of Virtual KITTI dataset loaders\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(VK1Dataset, self).__init__(*args, **kwargs)\n\n self.full_res_shape = (1242, 375)\n self.K = np.array([[0.58, 0, 0.5, 0],\n [0, 1.92, 0.5, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], dtype=np.float32)\n self.K_orig = self.K.copy()\n\n self.side_map = {\"l\": '/Camera_0/', \"r\": '/Camera_1/'}\n self.VK_id_to_trainid = {-1: [0, 0, 0], 0: [90, 200, 255], 1: [140, 140, 140], 2: [255, 255, 0],\n 3: [200, 200, 0], 4: [100, 60, 100], 6: [210, 0, 200], 7: [255, 127, 80],\n 8: [160, 60, 60], 9: [0, 139, 139], 11: [0, 199, 0], 11.1: [90, 240, 0],\n 12: [80, 80, 80], 13: [255, 130, 0], 15: [250, 100, 255]}\n\n def get_color(self, file_path, do_flip):\n color = Image.open(os.path.join(self.data_path, file_path)).convert('RGB')\n if do_flip:\n color = color.transpose(Image.FLIP_LEFT_RIGHT)\n color = np.array(color, dtype=np.float32)\n\n return Image.fromarray(color.astype(np.uint8))\n\n def get_depth(self, file_path, do_flip, h_matrix=None, wanted_crop=None):\n depth = Image.open(os.path.join(self.data_path, file_path)).convert('I')\n if do_flip:\n depth = depth.transpose(Image.FLIP_LEFT_RIGHT)\n\n # normalize depth values\n depth_np = np.array(depth, dtype=np.float32)\n depth_np = depth_np / 100\n depth_np[depth_np > self.opt.max_depth] = self.opt.max_depth\n depth_np_orig = depth_np.copy()\n\n return depth_np\n\n def get_segm(self, file_path, do_flip, h_matrix=None, wanted_crop=None):\n # Virtual kitti 1.3 dataset\n segm = Image.open(os.path.join(self.data_path, file_path)).convert('RGB')\n if do_flip:\n segm = segm.transpose(Image.FLIP_LEFT_RIGHT)\n\n segm = np.array(segm, dtype=np.int32)\n segm_copy = np.zeros(segm.shape[0:2]) - 1\n for v, [k0, k1, k2] in self.VK_id_to_trainid.items():\n valid_mask = np.logical_and(np.logical_and(segm[:, :, 0] == k0, segm[:, :, 1] == k1),\n segm[:, :, 2] == k2)\n segm_copy[valid_mask] = int(v)\n segm = segm_copy.astype(np.int32) + 1\n segm[segm > 16] = 0\n\n return segm.astype(np.int64)\n\n\nclass VK2Dataset(DepthDataset, ABC):\n \"\"\"Superclass for different types of Virtual KITTI dataset loaders\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(VK2Dataset, self).__init__(*args, **kwargs)\n\n self.full_res_shape = (1242, 375)\n self.K = np.array([[0.58, 0, 0.5, 0],\n [0, 1.92, 0.5, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], dtype=np.float32)\n self.K_orig = self.K.copy()\n\n self.side_map = {\"l\": '/Camera_0/', \"r\": '/Camera_1/'}\n self.VK_id_to_trainid = {-1: [0, 0, 0], 0: [90, 200, 255], 1: [140, 140, 140], 2: [255, 255, 0],\n 3: [200, 200, 0], 4: [100, 60, 100], 6: [210, 0, 200], 7: [255, 127, 80],\n 8: [160, 60, 60], 9: [0, 139, 139], 11: [0, 199, 0], 11.1: [90, 240, 0],\n 12: [80, 80, 80], 13: [255, 130, 0], 15: [250, 100, 255]}\n\n def get_color(self, file_path, do_flip):\n color = Image.open(os.path.join(self.data_path, file_path)).convert('RGB')\n if do_flip:\n color = color.transpose(Image.FLIP_LEFT_RIGHT)\n color = np.array(color, dtype=np.float32)\n\n return Image.fromarray(color.astype(np.uint8))\n\n def get_depth(self, file_path, do_flip, h_matrix=None, wanted_crop=None):\n depth = Image.open(os.path.join(self.data_path, file_path)).convert('I')\n if do_flip:\n depth = depth.transpose(Image.FLIP_LEFT_RIGHT)\n\n # normalize depth values\n depth_np = np.array(depth, dtype=np.float32)\n depth_np = depth_np / 100\n depth_np[depth_np > self.opt.max_depth] = self.opt.max_depth\n depth_np_orig = depth_np.copy()\n\n return depth_np\n\n def get_segm(self, file_path, do_flip, h_matrix=None, wanted_crop=None):\n # Virtual kitti 1.3/2.0 dataset\n segm = Image.open(os.path.join(self.data_path, file_path)).convert('RGB')\n if do_flip:\n segm = segm.transpose(Image.FLIP_LEFT_RIGHT)\n\n segm = np.array(segm, dtype=np.int32)\n segm_copy = np.zeros(segm.shape[0:2]) - 1\n for v, [k0, k1, k2] in self.VK_id_to_trainid.items():\n valid_mask = np.logical_and(np.logical_and(segm[:, :, 0] == k0, segm[:, :, 1] == k1),\n segm[:, :, 2] == k2)\n segm_copy[valid_mask] = int(v)\n segm = segm_copy.astype(np.int32) + 1\n segm[segm > 16] = 0\n\n return segm.astype(np.int64)\n"
] |
[
[
"pandas.read_csv",
"numpy.expand_dims",
"numpy.logical_and",
"numpy.eye",
"torch.from_numpy",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
AirWalk-Digital/o365-documentation
|
[
"81be385b813c28d0029c9d0973ba0a347551d271"
] |
[
"run_flask.py"
] |
[
"\"\"\"Flask-OAuthlib sample for Microsoft Graph\"\"\"\n# Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license.\n# See LICENSE in the project root for license information.\nimport uuid\n\nimport flask\nfrom flask_oauthlib.client import OAuth\nfrom flask import session, request, redirect\n\nfrom json2html import *\nimport config\nimport stringcase\nfrom requests import get\nfrom requests_oauthlib import OAuth2Session\nfrom urllib.request import pathname2url\nimport datetime\nfrom urllib.parse import urlparse\n\n\nimport yaml\nimport json\nimport os\nimport errno\nimport numpy as np\nimport pandas as pd\n\n\nAPP = flask.Flask(__name__, template_folder='static/templates')\nAPP.debug = True\nAPP.secret_key = 'development'\nOAUTH = OAuth(APP)\nMSGRAPH = OAUTH.remote_app(\n 'microsoft', consumer_key=config.CLIENT_ID, consumer_secret=config.CLIENT_SECRET,\n request_token_params={'scope': config.SCOPES},\n base_url=config.RESOURCE + config.API_VERSION + '/',\n request_token_url=None, access_token_method='POST',\n access_token_url=config.AUTHORITY_URL + config.TOKEN_ENDPOINT,\n authorize_url=config.AUTHORITY_URL + config.AUTH_ENDPOINT)\napply_prefix = '/apply'\n\[email protected]('/')\ndef homepage():\n \"\"\"Render the home page.\"\"\"\n return flask.render_template('homepage.html', sample='Flask-OAuthlib')\n\[email protected]('/login')\ndef login():\n \"\"\"Prompt user to authenticate.\"\"\"\n flask.session['state'] = str(uuid.uuid4())\n if urlparse(request.headers.get(\"Referer\")).netloc == urlparse(config.REDIRECT_URI).netloc:\n flask.session['referrer'] = request.headers.get(\"Referer\")\n else:\n flask.session['referrer'] = urlparse(config.REDIRECT_URI).netloc\n return MSGRAPH.authorize(callback=config.REDIRECT_URI, state=flask.session['state'])\n\[email protected]('/login/authorized')\ndef authorized():\n \"\"\"Handler for the application's Redirect Uri.\"\"\"\n if str(flask.session['state']) != str(flask.request.args['state']):\n raise Exception('state returned to redirect URL does not match!')\n response = MSGRAPH.authorized_response()\n flask.session['access_token'] = response['access_token']\n print('---token: ' + response['access_token'])\n return flask.redirect('/audit')\n\[email protected]('/document')\ndef document():\n \"\"\"Confirm user authentication by calling Graph and displaying some data.\"\"\"\n html = flask.render_template('report_head.html',\n sample='Flask-OAuthlib')\n footer = flask.render_template('report_foot.html') \n content = ''\n\n endpoint = 'deviceManagement/deviceConfigurations'\n\n\n for item in get_api(endpoint)['value']:\n table = json2html.convert(json = item, table_attributes=\"id=\\\"info-table\\\"\")\n head = flask.render_template('report_table_head.html',\n endpoint= '/' + endpoint + '[' + item['displayName'] + ']')\n foot = flask.render_template('report_table_foot.html') \n content = content + head + table + foot\n html = html + content + footer\n return html\n\ndef process():\n html = flask.render_template('report_head.html',\n sample='Flask-OAuthlib')\n footer = flask.render_template('report_foot.html') \n content = ''\n code = 0\n with open(\"o365.yml\", 'r') as yaml_in:\n # data = json.dumps(yaml.load(yaml_in))\n global data\n data = json.loads(json.dumps(yaml.load(yaml_in)))\n for section_title in data.keys():\n content_new, code = section(section_title)\n content = content + content_new\n html = html + content + footer\n if code == 403:\n return flask.redirect('/login')\n else:\n return html\n \ndef section(section_title):\n html_top = '<div class=\"container\">'\n html_bottom = '</div>'\n section_html = '<h2>' + stringcase.titlecase(section_title) + '</h2>'\n # configs = data[section_title]\n html_content = ''\n code = 200\n for content_data in data[section_title]:\n # html_content = html_content + content_title['name']\n content_html, code_out = content(section_title, content_data)\n if code_out != 200: code = code_out\n html_content = html_content + content_html\n return html_top + section_html + html_content + html_bottom, code\n\ndef content(section_title, content_data):\n api_name = content_data['name']\n if section_title == 'general':\n apiCall = api_name\n else:\n apiCall = section_title + '/' + api_name\n\n section_html = '<h3>' + stringcase.titlecase(api_name) + ' [' + apiCall + ']</h3>'\n\n # section_html = section_html + ' [' + apiCall + ']'\n # configs = data[section_title]\n # configuration(api_name, content_data)\n # return section_html\n content, code = configuration(apiCall, content_data)\n return section_html + content, code\n # return section_html + str(get_api(apiCall))\n\ndef configuration2(api, content_data):\n endpoint = api\n html = ''\n primary = content_data['primary']\n try:\n result = get_api(endpoint)\n if result.get('value'):\n for item in get_api(endpoint)['value']:\n item_processed = dict(sorted(item.items()))\n for remove_item in content_data['exclude']:\n # If key exist in dictionary then delete it using del.\n if remove_item in item_processed:\n del item_processed[remove_item]\n # remove None, NotConfigured and blank items\n trimmed = item_processed\n for key, value in item_processed.copy().items():\n if str(value) == 'None':\n del trimmed[key]\n elif str(value) == 'notConfigured':\n del trimmed[key]\n elif str(value) == '':\n del trimmed[key]\n elif str(value) is None:\n del trimmed[key]\n elif str(value) == '[]':\n del trimmed[key]\n\n table = json2html.convert(json = trimmed, table_attributes=\"class=\\\"leftheader-table\\\"\")\n cmd = ''\n if 'powershell' in content_data:\n cmd = generate_powershell(content_data['powershell'], trimmed )\n\n head = flask.render_template('report_table_head.html',\n powershell=cmd,\n item_name= item[primary] )\n foot = flask.render_template('report_table_foot.html') \n html = html + head + table + foot\n pass\n except TypeError as identifier:\n html = str(identifier) + str(get_api(endpoint))\n except KeyError as identifier:\n html = str(identifier) + str(get_api(endpoint))\n return html\n\n\n\ndef configuration(api, content_data):\n endpoint = api\n html = ''\n code = 200\n primary = content_data['primary']\n result = get_api(endpoint)\n all_results = []\n configuration_details = np.empty([1,5])\n\n all_configs, configuration_details = get_baseline(api, primary, configuration_details)\n \n if result.get('value'):\n for item in result['value']:\n item_processed = dict(item.items())\n trimmed = item_processed\n if item.get(primary):\n all_results.append(item[primary])\n link = '<a href=/download/msGraph/' + api + '?id=' + str(item['id']) + '&type=api&name=' + pathname2url(item[primary]) + '&primary=' + primary + '><i class=\"fas fa-cloud-download-alt\"></i>Download</a>'\n configuration_details = np.append(configuration_details, [[str(item[primary]), str(item['id']), 'api', 'tbd' ,link]], axis = 0)\n #remove any key from the 'exclude' section in the config\n if content_data.get('exclude'):\n for remove_item in content_data['exclude']:\n # If key exist in dictionary then delete it using del.\n if remove_item in item_processed:\n del item_processed[remove_item] \n # trim any null, None or empty values\n \n for key, value in item_processed.copy().items():\n if str(value) == 'None':\n del trimmed[key]\n elif str(value) == 'notConfigured':\n del trimmed[key]\n elif str(value) == '':\n del trimmed[key]\n elif str(value) is None:\n del trimmed[key]\n elif str(value) == '[]':\n del trimmed[key]\n\n \n\n compared_table, existing_policy, compliant = check_existing(trimmed.items(), endpoint, item[primary], primary)\n print('---------------------------' + item[primary] + '----------------------')\n print(trimmed.items())\n okstr = '<td>OK</td>'\n okstr_new = '<td bgcolor=\"#00FF00\">OK</td>'\n errorstr = '<td>Error</td>'\n errorstr_new = '<td bgcolor=\"#FF0000\">Error</td>'\n if existing_policy == True:\n hide_reapply = 'block'\n else:\n hide_reapply = 'none'\n # remove any key from the 'exclude' section in the config\n # if content_data.get('exclude'):\n # for remove_item in content_data['exclude']:\n # # If key exist in dictionary then delete it using del.\n # if remove_item in item_processed:\n # del item_processed[remove_item] \n # # trim any null, None or empty values\n # trimmed = trim_values(item_processed)\n # convert the json to a table\n # table = json2html.convert(json = trimmed, table_attributes=\"class=\\\"leftheader-table\\\"\")\n # table = json2html.convert(json = compared_table, table_attributes=\"class=\\\"leftheader-table\\\"\")\n table = compared_table.to_html(index=False)\n table = table.replace(okstr, okstr_new)\n table = table.replace(errorstr, errorstr_new)\n \n # process the @type bit\n item_name = item[primary]\n if item.get('@odata.type'):\n item_type = item['@odata.type'].replace('#microsoft.graph.','')\n item_name = '[' + stringcase.sentencecase(item_type) + '] ' + item_name\n\n # write the table header with the primary key (usually displayName) as the title\n table_head = flask.render_template('report_table_head.html',\n download_link=api + '?id=' + item['id'] + '&name=' +item[primary] + '&primary=' + primary, \n compliant=compliant,\n reapply_hidden=hide_reapply,\n item_name=item_name )\n table_foot = flask.render_template('report_table_foot.html')\n html = html + table_head + table + table_foot\n else:\n if result.get('error'):\n if result['error']['code']:\n result = result['error']['code']\n # table_head = flask.render_template('report_table_head.html',\n # powershell='',\n # item_name='No Data' )\n # table_foot = flask.render_template('report_table_foot.html') \n # html = html + table_head + table_foot\n if result == 'InvalidAuthenticationToken':\n code = 403\n\n \n missing_config = (set(all_results).difference(all_configs))\n missing_in_api = (set(all_configs).difference(all_results))\n header = ['Name', 'ID', 'Location', 'Missing', 'Action']\n \n configuration_details = np.delete(configuration_details, 0, axis=0)\n df = pd.DataFrame(configuration_details,index=configuration_details[:, 0], columns=header)\n for item in missing_config:\n df.loc[(df['Name'] == item) & (df['Location'] == 'api'),'Missing'] = 'True'\n for item in missing_in_api:\n df.loc[(df['Name'] == item) & (df['Location'] == 'baseline'),'Missing'] = 'True'\n\n # remove everything else\n df = df[df.Missing != 'tbd']\n df = df.drop('ID', axis=1)\n df = df.drop('Location', axis=1)\n df = df.drop('Missing', axis=1)\n # prints the missing and additional elements in list2 \n print(\"[|\" + str(len(missing_in_api)) + \"]Missing settings in API:\" + str(missing_in_api) ) \n print(\"[|\" + str(len(missing_config)) + \"]Additional settings in API (not in baseline):\" + str(missing_config))\n\n if len(missing_in_api) > 0:\n html = html + '<button type=\"button\" class=\"collapsible\"><i class=\"fa fa-exclamation-triangle\"></i>Missing Configuration<i class=\"fa fa-eye\"></i></button><div class=\"content\">' + df.to_html(index=False,escape=False) + '</div>'\n # html = html + '<h5>Missing Configuration</h5><p>Configuration that is in the saved baseline, but is not applied to the live environment</p>' + df.to_html(index=False,escape=False)\n\n return html, code\n\ndef get_baseline(api, primary, configuration_details):\n all_configs = []\n path = 'config/msGraph/' + api\n try:\n with os.scandir(path) as entries:\n for entry in entries:\n with open(entry, 'r') as f:\n parsed_json = json.load(f)\n all_configs.append(parsed_json[primary])\n link = '<a href=/post/msGraph/' + api + '?id=' + pathname2url(str(entry.name)) + '&type=baseline><i class=\"fas fa-angle-double-up\"></i>Apply</a>'\n configuration_details = np.append(configuration_details, [[str(parsed_json[primary]), str(entry.name), 'baseline', 'tbd' , link]], axis = 0)\n except FileNotFoundError:\n print('file not found' + path)\n return all_configs, configuration_details\ndef getfile(file):\n try:\n with open(file, 'r') as f:\n parsed_json = json.load(f)\n return parsed_json\n except FileNotFoundError:\n return \"{'error': 'file not found'}\"\n\ndef trim_policy(item_processed, content_data):\n trimmed = item_processed\n #remove any key from the 'exclude' section in the config\n if content_data.get('exclude'):\n for remove_item in content_data['exclude']:\n # If key exist in dictionary then delete it using del.\n if remove_item in item_processed:\n del item_processed[remove_item] \n # trim any null, None or empty values\n\n for key, value in item_processed.copy().items():\n if str(value) == 'None':\n del trimmed[key]\n elif str(value) == 'notConfigured':\n del trimmed[key]\n elif str(value) == '':\n del trimmed[key]\n elif str(value) is None:\n del trimmed[key]\n elif str(value) == '[]':\n del trimmed[key]\n return trimmed\n\ndef find_file_by_name(api, name, primary):\n path = 'config/msGraph/' + api\n rreturn = ''\n try:\n with os.scandir(path) as entries:\n for entry in entries:\n with open(entry, 'r') as f:\n parsed_json = json.load(f)\n if parsed_json[primary] == name:\n rreturn = f.name\n except FileNotFoundError:\n print('file not found' + path)\n return rreturn\n\n\n\n\ndef check_existing(table, api, name, primary):\n # load template\n path = find_file_by_name(api, name, primary)\n compliant = True\n try:\n # f=open(path,\"r\")\n with open(path, 'r') as f:\n parsed_json = json.load(f)\n a = np.empty([1,3])\n header = ['Setting', 'Value', 'Baseline']\n for key, value in table:\n if key in parsed_json:\n if str(value).lower() == str(parsed_json[key]).lower():\n good = 'OK'\n elif value == True and parsed_json[key] == True:\n good = 'OK'\n elif value == False and parsed_json[key] == False:\n good = 'OK'\n else:\n print(parsed_json[key])\n good = 'Error'\n compliant = False\n else:\n good = 'Error'\n compliant = False\n a = np.append(a, [[str(key), str(value), good ]], axis = 0)\n\n a = np.delete(a, 0, axis=0)\n df = pd.DataFrame(a,index=a[:, 0], columns=header)\n df.set_index(df.columns[0])\n existing = True\n except FileNotFoundError:\n a = np.empty([1,2])\n header = ['Setting', 'Value']\n for key, value in table:\n a = np.append(a, [[str(key), str(value)]], axis = 0)\n a = np.delete(a, 0, axis=0) # delete the empty first row\n df = pd.DataFrame(a, columns=header)\n existing = False\n compliant = False\n return df, existing, compliant\n\ndef missing_in_api_table(api, missing_in_api):\n a = np.empty([1,2])\n header = ['Setting', 'Action']\n for item in missing_in_api:\n url = '<a href=/apply_missing__api/' + stringcase.alphanumcase(item) + '> Apply </a>'\n a = np.append(a, [[str(item), str(url) ]], axis = 0)\n a = np.delete(a, 0, axis=0)\n df = pd.DataFrame(a,index=a[:, 0], columns=header)\n df.set_index(df.columns[0]) \n return df\n\n\n\n\ndef trim_values(item_processed):\n trimmed = item_processed\n for key, value in item_processed.copy().items():\n if str(value) == 'None':\n del trimmed[key]\n elif str(value) == 'notConfigured':\n del trimmed[key]\n elif str(value) == '':\n del trimmed[key]\n elif str(value) is None:\n del trimmed[key]\n elif str(value) == '[]':\n del trimmed[key]\n return trimmed\n\ndef get_api(api):\n if api.startswith(\"general/\"):\n api = api.replace(\"general/\", \"\")\n return proxy(api)\n\n\ndef generate_powershell_old(powershell, item_processed ):\n # cmd = powershell\n cmd = \"$hashtable = @{\"\n for key, value in item_processed.items():\n if str(value) != 'None':\n if str(value) == 'True':\n cmd = cmd + key + \" = '$True'\\n\" \n elif str(value) == 'False':\n cmd = cmd + key + \" = '$False'\\n\" \n else:\n cmd = cmd + key + \" = '\" + str(value) + \"'\\n\"\n\n cmd = cmd + \"}\\n\" + powershell + \" $hashtable\"\n return cmd\ndef generate_powershell(powershell, item_processed ):\n # cmd = powershell\n cmd = powershell + \" \"\n for key, value in item_processed.items():\n if str(key) == '@odata.type':\n cmd = cmd + '-' + str(value).replace('#microsoft.graph.', '') + ' '\n elif str(value) != 'None' and str(value) != '' :\n if str(value) == 'True':\n cmd = cmd + \" -\" + key + \" $True\" \n elif str(value) == 'False':\n cmd = cmd + \" -\" + key + \" $False\" \n else:\n cmd = cmd + \" -\" + key + \" '\" + str(value) + \"'\"\n return cmd\n\n\[email protected]('/', defaults={'path': ''})\[email protected]('/msGraph/<path:path>')\ndef proxy(path):\n endpoint = path\n headers = {'SdkVersion': 'ms365-documentation',\n 'x-client-SKU': 'ms365-documentation',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n return MSGRAPH.get(endpoint, headers=headers).data\n\[email protected]('/download/msGraph/<path:path>')\ndef download(path):\n endpoint = path\n headers = {'SdkVersion': 'ms365-documentation',\n 'x-client-SKU': 'ms365-documentation',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n graph = MSGRAPH.get(endpoint, headers=headers).data\n error = \"{'error': {'code': 'invalidParams','message': 'Invalid Parameters passed to download API','innerError': {'request-id': 'TBD','date': '\" + str(datetime.datetime.now()) +\"'} } }\"\n if flask.request.args.get('name') and flask.request.args.get('id'): \n filename = 'config/msGraph/' + path + '/' + flask.request.args.get('name').replace(' ','') + '.json'\n itemtosave = graph['value']\n itemtosave = [itemtosave for itemtosave in itemtosave if itemtosave['id'] == flask.request.args.get('id')][0]\n savefile(filename, itemtosave)\n return flask.render_template('redirect.html',\n message='Your file has been saved as ' + filename + '.',\n location=flask.request.referrer,\n data=str(itemtosave) )\n else:\n return error\n\ndef savefile(path, data):\n if not os.path.exists(os.path.dirname(path)):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(path, 'w') as f:\n json.dump(data, f)\n \[email protected]('/reapply/msGraph/<path:path>') # reapply from the template\ndef reapply(path):\n endpoint = path\n headers = {'SdkVersion': 'ms365-documentation',\n 'x-client-SKU': 'ms365-documentation',\n 'Content-type': 'application/json',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n error = \"{'error': {'code': 'invalidParams','message': 'Invalid Parameters passed to download API','innerError': {'request-id': 'TBD','date': '\" + str(datetime.datetime.now()) +\"'} } }\"\n if flask.request.args.get('name') and flask.request.args.get('id') and flask.request.args.get('primary'):\n data, existing = generate_replacement_json(flask.request.args.get('id'), path, flask.request.args.get('name'),flask.request.args.get('primary'))\n if existing == True:\n endpoint = endpoint + '/' + flask.request.args.get('id')\n print(json.dumps(data))\n resp = MSGRAPH.patch(endpoint, headers=headers, data=data, format='json')\n print('-----')\n print(str(resp.status))\n if resp.status == 200 or resp.status == 201 or resp.status == 204:\n msg = 'Successfully updated policy.'\n error = ''\n else:\n msg = 'Error updatging policy via ' + endpoint\n error = str(resp.data)\n return flask.render_template('redirect.html',\n message=msg,\n location=flask.request.referrer,\n error=error,\n data=str(data))\n else:\n return error \n\n\n \[email protected]('/post/msGraph/<path:path>') # reapply from the template\ndef post_create(path):\n endpoint = path\n headers = {'SdkVersion': 'ms365-documentation',\n 'x-client-SKU': 'ms365-documentation',\n 'Content-type': 'application/json',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n error = \"{'error': {'code': 'invalidParams','message': 'Invalid Parameters passed to download API','innerError': {'request-id': 'TBD','date': '\" + str(datetime.datetime.now()) +\"'} } }\"\n if flask.request.args.get('type') and flask.request.args.get('id'):\n if flask.request.args.get('type') == 'baseline':\n data = getfile('config/msGraph/' + path + '/' + flask.request.args.get('id'))\n resp = MSGRAPH.post(endpoint, headers=headers, data=data, format='json')\n \n print(str(resp.status))\n if resp.status == 200 or resp.status == 201 or resp.status == 204:\n msg = 'Successfully updated policy.'\n error = ''\n noerror = True\n else:\n msg = 'Error updatging policy via ' + endpoint\n error = str(resp.data)\n noerror = False\n return flask.render_template('redirect.html',\n message=msg,\n noError=noerror,\n location=flask.request.referrer,\n error=error,\n data=str(data))\n else:\n return error\n else:\n return error \n\n\ndef generate_replacement_json(existing_id, api, name, primary):\n\n path = find_file_by_name(api, name, primary)\n # load template\n # path = 'config/msGraph/' + api + '/' + name\n jsonpolicy = ''\n try:\n with open(path, 'r') as f:\n parsed_json = json.load(f)\n existing = True\n parsed_json['id'] = existing_id\n # del parsed_json['@odata.type']\n # del parsed_json['id']\n jsonpolicy = parsed_json\n except FileNotFoundError:\n existing = False\n return jsonpolicy, existing\n\n\[email protected]('/deviceManagement/deviceConfigurations')\ndef deviceManagement_deviceConfigurations():\n \"\"\"Confirm user authentication by calling Graph and displaying some data.\"\"\"\n endpoint = 'me'\n endpoint = 'deviceManagement/deviceConfigurations'\n headers = {'SdkVersion': 'ms365-documentation',\n 'x-client-SKU': 'ms365-documentation',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n return MSGRAPH.get(endpoint, headers=headers).data\n \[email protected]('/me')\ndef me():\n \"\"\"Confirm user authentication by calling Graph and displaying some data.\"\"\"\n endpoint = 'me'\n headers = {'SdkVersion': 'ms365-documentation',\n 'x-client-SKU': 'ms365-documentation',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n return MSGRAPH.get(endpoint, headers=headers).data\n \n\n\[email protected]('/audit')\ndef process_audit():\n return process()\n \n\[email protected]\ndef get_token():\n \"\"\"Called by flask_oauthlib.client to retrieve current access token.\"\"\"\n return (flask.session.get('access_token'), '')\n\nif __name__ == '__main__':\n APP.run(host=\"0.0.0.0\",port=5002)\n"
] |
[
[
"numpy.delete",
"numpy.empty",
"pandas.DataFrame"
]
] |
JieZheng-ShanghaiTech/HiCoEx
|
[
"1b3d4b80d3af9751cdd7a0cabda7af377d1c1253"
] |
[
"src/link_prediction/utils_link_prediction.py"
] |
[
"import itertools\nimport os\nimport pickle\nfrom collections import defaultdict\nfrom multiprocessing import Pool\nfrom time import time\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sps\nfrom bionev.utils import load_embedding\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, roc_auc_score, f1_score, precision_score, recall_score, confusion_matrix\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom utils import intra_mask\n\n\ndef select_classifier(classifier_name, clf_params, seed=42):\n if classifier_name == 'mlp':\n classifier = MLPClassifier(max_iter=500, hidden_layer_sizes=(100,))\n elif classifier_name == 'svm':\n classifier = SVC(gamma='scale')\n elif classifier_name == 'rf':\n classifier = RandomForestClassifier(n_jobs=10, **clf_params)\n elif classifier_name == 'random':\n classifier = classifier_name\n else:\n classifier = LogisticRegression(solver='lbfgs')\n return classifier\n\n\ndef confusion_matrix_distinct(y_true, y_pred, ids, mask):\n is_intra = mask[ids[:, 0], ids[:, 1]].astype(bool)\n y_true_intra, y_pred_intra = y_true[is_intra], y_pred[is_intra]\n print('Intra accuracy: ', accuracy_score(y_true_intra, y_pred_intra))\n print(confusion_matrix(y_true_intra, y_pred_intra))\n\n if (is_intra == 0).any():\n y_true_inter, y_pred_inter = y_true[~is_intra], y_pred[~is_intra]\n print('Inter accuracy: ', accuracy_score(y_true_inter, y_pred_inter))\n print(confusion_matrix(y_true_inter, y_pred_inter))\n\n\ndef evaluate(X_train, y_train, X_test, y_test, classifier, mask):\n ids = X_test[:, :2].astype(int)\n if classifier == 'random':\n y_pred = np.random.randint(0, 2, size=y_test.shape)\n else:\n scaler = StandardScaler()\n X_train_scaled = scaler.fit_transform(X_train[:, 2:], y_train)\n X_test_scaled = scaler.transform(X_test[:, 2:])\n\n start = time()\n classifier.fit(X_train_scaled, y_train)\n y_pred = classifier.predict(X_test_scaled)\n end = time()\n\n if mask is not None:\n confusion_matrix_distinct(y_test, y_pred, ids, mask)\n\n results = {}\n results['roc'] = roc_auc_score(y_test, y_pred)\n results['acc'] = accuracy_score(y_test, y_pred)\n results['f1'] = f1_score(y_test, y_pred)\n results['precision'] = precision_score(y_test, y_pred)\n results['recall'] = recall_score(y_test, y_pred)\n results['predictions'] = list(y_pred)\n return results\n\n\ndef evaluate_embedding(X_train, y_train, classifier_name, verbose=True, clf_params={}, cv_splits=5, mask=None, X_test=None,\n y_test=None):\n results = defaultdict(list)\n if X_test is None and y_test is None:\n skf = StratifiedKFold(n_splits=cv_splits, shuffle=True)\n for train_index, val_index in skf.split(X_train, y_train):\n classifier = select_classifier(classifier_name, clf_params)\n results_iter = evaluate(X_train[train_index], y_train[train_index], X_train[val_index],\n y_train[val_index], classifier, mask)\n if verbose:\n print(\"Accuracy:\", results_iter['acc'], \"- ROC:\", results_iter['roc'], \"- F1:\", results_iter['f1'],\n \"- Precision:\", results_iter['precision'], \"- Recall\", results_iter['recall'])\n\n for key in results_iter.keys():\n results[key].append(results_iter[key])\n else:\n classifier = select_classifier(classifier_name, clf_params)\n results = evaluate(X_train, y_train, X_test, y_test, classifier, mask)\n if verbose:\n print(\"Accuracy:\", results['acc'], \"- ROC:\", results['roc'], \"- F1:\", results['f1'],\n \"- Precision:\", results['precision'], \"- Recall\", results['recall'])\n return results\n\n\ndef generate_embedding(args, emb_path, interactions_path, command):\n os.makedirs('{}/{}/embeddings/{}/'.format(args.data_root, args.dataset, args.method.lower()), exist_ok=True)\n if not os.path.exists(\n '{}/{}/embeddings/{}/{}.npy'.format(args.data_root, args.dataset, args.method.lower(), emb_path)) or args.force:\n adj = np.load(interactions_path)\n graph = from_numpy_matrix(adj)\n\n nx.write_weighted_edgelist(graph, '{}/{}/chromatin_networks/{}.edgelist'.format(args.data_root, args.dataset, args.name))\n\n print(command)\n os.system(command)\n emb_dict = load_embedding(\n '{}/{}/embeddings/{}/{}.txt'.format(\n args.data_root, args.dataset, args.method.lower(), emb_path))\n\n emb = np.zeros((adj.shape[0], args.emb_size))\n\n disconnected_nodes = []\n\n print('N. genes', adj.shape[0])\n for gene in range(adj.shape[0]):\n try:\n emb[gene, :] = emb_dict[str(gene)]\n except KeyError:\n print('Node', gene, 'disconnected.')\n # np.delete(emb, i, axis=0)\n emb[gene, :] = np.nan\n disconnected_nodes.append(gene)\n\n os.makedirs('{}/{}/disconnected_nodes/'.format(args.data_root, args.dataset), exist_ok=True)\n np.save(\n '{}/{}/disconnected_nodes/{}.npy'.format(\n args.data_root, args.dataset, args.name), np.array(disconnected_nodes))\n\n if args.save_emb:\n np.save('{}/{}/embeddings/{}/{}.npy'.format(args.data_root, args.dataset, args.method.lower(), emb_path), emb)\n os.remove('{}/{}/embeddings/{}/{}.txt'.format(args.data_root, args.dataset, args.method.lower(), emb_path))\n os.remove('{}/{}/chromatin_networks/{}.edgelist'.format(args.data_root, args.dataset, args.name))\n return emb\n\n\ndef from_numpy_matrix(A):\n # IMPORTANT: do not use for the co-expression matrix, otherwise the nans will be ignored and considered as non_edges\n A[np.isnan(A)] = 0\n\n if A.shape[0] != A.shape[1]:\n graph = nx.algorithms.bipartite.from_biadjacency_matrix(sps.csr_matrix(A))\n else:\n graph = nx.from_numpy_array(A)\n return graph\n\n\ndef distance_embedding(path, dataset, edges, non_edges, chr_src=None):\n if chr_src is None:\n gene_info = pd.read_csv(\n '{}/{}/rna/expression_info_chr_all.csv'.format(\n path, dataset))\n else:\n gene_info = pd.read_csv(\n '{}/{}/rna/expression_info_chr_{}_rna.csv'.format(\n path, dataset, chr_src))\n\n pos_distances = np.abs(gene_info.iloc[edges[:, 0]]['Transcription start site (TSS)'].to_numpy() -\n gene_info.iloc[edges[:, 1]]['Transcription start site (TSS)'].to_numpy())\n\n neg_distances = np.abs(gene_info.iloc[non_edges[:, 0]]['Transcription start site (TSS)'].to_numpy() -\n gene_info.iloc[non_edges[:, 1]]['Transcription start site (TSS)'].to_numpy())\n\n pos_features = np.hstack((edges, pos_distances[:, None]))\n neg_features = np.hstack((non_edges, neg_distances[:, None]))\n X = np.vstack((pos_features, neg_features))\n return X\n\n\ndef add_topological_edge_embeddings(graph_hic, edges, non_edges, features_pos, features_neg):\n shortest_path_lengths_pos = np.array(list(\n map(lambda e: nx.shortest_path_length(graph_hic, e[0], e[1]) if nx.has_path(graph_hic, e[0],\n e[1]) else np.nan,\n edges)))\n shortest_path_lengths_neg = np.array(list(\n map(lambda e: nx.shortest_path_length(graph_hic, e[0], e[1]) if nx.has_path(graph_hic, e[0],\n e[1]) else np.nan,\n non_edges)))\n\n jaccard_index_pos = np.array(list(map(lambda e: e[2], nx.jaccard_coefficient(graph_hic, edges))))\n jaccard_index_neg = np.array(list(map(lambda e: e[2], nx.jaccard_coefficient(graph_hic, non_edges))))\n\n features_pos = np.hstack((features_pos, shortest_path_lengths_pos[:, None], jaccard_index_pos[:, None]))\n features_neg = np.hstack((features_neg, shortest_path_lengths_neg[:, None], jaccard_index_neg[:, None]))\n return features_pos, features_neg\n\n\ndef topological_features(args, edges, non_edges):\n adj_hic = np.load('{}/{}/chromatin_networks/{}.npy'.format(args.data_root, args.dataset, args.chromatin_network_name))\n graph_hic = from_numpy_matrix(adj_hic)\n graph_hic = nx.convert_node_labels_to_integers(graph_hic)\n\n if os.path.exists('{}/{}/embeddings/topological/{}.npy'.format(args.data_root, args.dataset, args.chromatin_network_name)):\n embeddings = np.load('{}/{}/embeddings/topological/{}.npy'.format(args.data_root, args.dataset, args.chromatin_network_name))\n else:\n degrees = np.array(list(dict(graph_hic.degree()).values()))\n betweenness = np.array(list(betweenness_centrality_parallel(graph_hic, 20).values()))\n clustering = np.array(list(nx.clustering(graph_hic).values()))\n\n embeddings = np.hstack((degrees[:, None], betweenness[:, None], clustering[:, None]))\n\n os.makedirs('{}/{}/embeddings/topological/'.format(args.data_root, args.dataset), exist_ok=True)\n np.save('{}/{}/embeddings/topological/{}.npy'.format(args.data_root, args.dataset, args.chromatin_network_name), embeddings)\n\n features_pos, features_neg = combine_embeddings(embeddings, args.aggregators, edges, non_edges)\n features_pos, features_neg = add_topological_edge_embeddings(graph_hic, edges, non_edges, features_pos,\n features_neg)\n X = np.vstack((features_pos, features_neg))\n\n imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=9999)\n X = imp.fit_transform(X)\n return X\n\n\ndef combine_embeddings(embeddings, aggregators, edges, non_edges):\n # Add edges and non_edges ids in dataset to identify the type of interaction for the confusion matrix\n # They will be removed from the dataset before training\n pos_features = edges\n neg_features = non_edges\n if 'hadamard' in aggregators:\n pos_features, neg_features = hadamard_embedding(pos_features, neg_features, embeddings, edges, non_edges)\n if 'avg' in aggregators:\n pos_features, neg_features = average_embedding(pos_features, neg_features, embeddings, edges, non_edges)\n if 'l1' in aggregators:\n pos_features, neg_features = l1_embedding(pos_features, neg_features, embeddings, edges, non_edges)\n \n if pos_features.shape[1] == 2 or neg_features.shape[1] == 2:\n raise ValueError('No aggregator defined.')\n\n return pos_features, neg_features\n\n\ndef method_embedding(args, n_nodes, edges, non_edges):\n if args.method == 'random':\n embeddings = np.random.rand(n_nodes, args.emb_size)\n else:\n embeddings = np.load(\n '{}/{}/embeddings/{}/{}.npy'.format(args.data_root, args.dataset, args.method, args.embedding))\n\n features_pos, features_neg = combine_embeddings(embeddings, args.aggregators, edges, non_edges)\n X = np.vstack((features_pos, features_neg))\n return X\n\n\ndef append_features(pos_features, neg_features, pos_features_partial, neg_features_partial):\n if pos_features is None or neg_features is None:\n pos_features = pos_features_partial\n neg_features = neg_features_partial\n else:\n pos_features = np.hstack((pos_features, pos_features_partial))\n neg_features = np.hstack((neg_features, neg_features_partial))\n return pos_features, neg_features\n\n\ndef hadamard_embedding(pos_features, neg_features, embeddings, edges, non_edges):\n pos_features_partial = embeddings[edges[:, 0]] * embeddings[edges[:, 1]]\n neg_features_partial = embeddings[non_edges[:, 0]] * embeddings[non_edges[:, 1]]\n return append_features(pos_features, neg_features, pos_features_partial, neg_features_partial)\n\n\ndef average_embedding(pos_features, neg_features, embeddings, edges, non_edges):\n pos_features_partial = np.array(\n list(map(lambda edge: np.mean((embeddings[edge[0]], embeddings[edge[1]]), axis=0), edges)))\n neg_features_partial = np.array(\n list(map(lambda edge: np.mean((embeddings[edge[0]], embeddings[edge[1]]), axis=0), non_edges)))\n return append_features(pos_features, neg_features, pos_features_partial, neg_features_partial)\n\n\ndef l1_embedding(pos_features, neg_features, embeddings, edges, non_edges):\n pos_features_partial = np.abs(embeddings[edges[:, 0]] - embeddings[edges[:, 1]])\n neg_features_partial = np.abs(embeddings[non_edges[:, 0]] - embeddings[non_edges[:, 1]])\n return append_features(pos_features, neg_features, pos_features_partial, neg_features_partial)\n\n\ndef setup_filenames_and_folders(args, chromosome_folder):\n hyperparameters = ''\n if args.method != 'distance':\n hyperparameters = 'es{}'.format(args.emb_size)\n\n if args.method == 'node2vec':\n hyperparameters += '_nw{}_wl{}_p{}_q{}'.format(args.num_walks, args.walk_len, args.p, args.q)\n\n args.aggregators = '_'.join(args.aggregators)\n\n args.embedding = args.chromatin_network_name + '_' + hyperparameters\n\n os.makedirs('{}/results/{}/chr_{}'.format(args.data_root, args.dataset, chromosome_folder), exist_ok=True)\n os.makedirs('{}/results/{}/predictions/chr_{}'.format(args.data_root, args.dataset, chromosome_folder), exist_ok=True)\n if args.test:\n os.makedirs('{}/results/{}/test/chr_{}'.format(args.data_root, args.dataset, chromosome_folder), exist_ok=True)\n os.makedirs('{}/results/{}/test/predictions/chr_{}'.format(args.data_root, args.dataset, chromosome_folder), exist_ok=True)\n if args.method == 'topological':\n filename = '{}chr_{}/{}_{}_{}_{}_{}.pkl'.format('test/' if args.test else '', chromosome_folder, args.classifier,\n args.method, args.chromatin_network_name, args.aggregators, args.times)\n else:\n filename = '{}chr_{}/{}_{}_{}_{}_{}_{}.pkl'.format('test/' if args.test else '', chromosome_folder, args.classifier,\n args.method, args.embedding, args.aggregators, args.coexp_thr, args.times)\n return args, filename\n\ndef load_coexpression(args, chromatin_network_name, chrs):\n coexpression = np.load(\n '{}/{}/coexpression_networks/coexpression_chr_{}_{}.npy'.format(args.data_root, args.dataset, chrs, args.coexp_thr))\n\n chromatin_network = np.load('{}/{}/chromatin_networks/{}.npy'.format(args.data_root, args.dataset, chromatin_network_name))\n degrees = np.nansum(chromatin_network, axis=0)\n disconnected_nodes = np.ravel(np.argwhere(degrees == 0))\n\n print(\"N. disconnected nodes:\", len(disconnected_nodes))\n if len(disconnected_nodes) > 0:\n coexpression[disconnected_nodes] = np.nan\n coexpression[:, disconnected_nodes] = np.nan\n return coexpression, disconnected_nodes\n\ndef get_edges(coexpression, n_eges_intra=None, inter_ratio=1.0):\n n_nodes = coexpression.shape[0]\n\n edges = np.array(np.argwhere(coexpression == 1))\n\n import random\n np.random.seed(42)\n random.seed(42)\n \n if n_eges_intra:\n if n_eges_intra > edges.shape[0]:\n n_edges_inter = edges.shape[0]\n else:\n n_edges_inter = int(n_eges_intra * inter_ratio)\n print('N. intra edges', n_eges_intra, '- N. inter edges ', edges.shape[0], '->',\n n_edges_inter)\n edges = edges[\n np.random.choice(edges.shape[0], n_edges_inter, replace=False)]\n\n # when make genome-wide intra-chrom prediction, could sample some edges as positive labels.\n # edges = edges[\n # np.random.choice(edges.shape[0], int(edges.shape[0]*0.8), replace=False)]\n edges_nodes = np.unique(edges)\n\n non_nodes = np.setdiff1d(np.arange(n_nodes), edges_nodes)\n\n coexpression_neg = coexpression.copy()\n coexpression_neg[non_nodes, :] = np.nan\n coexpression_neg[:, non_nodes] = np.nan\n\n non_edges = np.array(np.argwhere(coexpression_neg == 0))\n non_edges = non_edges[\n np.random.choice(non_edges.shape[0], edges.shape[0], replace=False)]\n\n return edges, non_edges\n\ndef build_dataset(args, edges, non_edges, n_nodes):\n if args.method == 'topological':\n X = topological_features(args, edges, non_edges)\n elif args.method == 'ids':\n X = np.vstack((edges, non_edges))\n elif args.method == 'distance':\n X = distance_embedding(args.data_root, args.dataset, edges, non_edges)\n elif args.method.split('_')[0] == 'GNN':\n X = np.vstack((edges, non_edges))\n else:\n X = method_embedding(args, n_nodes, edges, non_edges)\n y = np.hstack((np.ones(edges.shape[0]), np.zeros(non_edges.shape[0])))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n return X_train, X_test, y_train, y_test\n\ndef link_prediction(args, X_train, y_train, X_test, y_test, filename, verbose=False):\n results = defaultdict(list)\n if args.test:\n results = evaluate_embedding(X_train, y_train, args.classifier, verbose=verbose, clf_params={'n_estimators': 100},\n X_test=X_test, y_test=y_test)\n else:\n for i in range(args.n_iter):\n results_iter = evaluate_embedding(X_train, y_train, args.classifier, verbose=verbose,\n clf_params={'n_estimators': 100}, cv_splits=args.cv_splits)\n for key in results_iter.keys():\n results[key].extend(results_iter[key])\n\n with open('{}/results/{}/{}'.format(args.data_root, args.dataset, filename), 'wb') as file_save:\n pickle.dump(results, file_save)\n\n print(\"Mean Accuracy: {:.3f} - Mean F1: {:.3f} - Mean Precision: {:.3f} - Mean Recall: {:.3f}\".format(np.mean(results['acc']), np.mean(results['f1']), np.mean(results['precision']), np.mean(results['recall'])))\n print('')\n\ndef get_mask_intra(path, dataset):\n shapes = [np.load(\n '{}/{}/coexpression/coexpression_chr_{}_{}.npy'.format(path, dataset, i, i)).shape for i\n in\n range(1, 23)]\n\n mask = intra_mask(shapes, nans=True, values=np.ones)\n return mask\n\ndef chunks(l, n):\n \"\"\"Divide a list of nodes `l` in `n` chunks\"\"\"\n l_c = iter(l)\n while 1:\n x = tuple(itertools.islice(l_c, n))\n if not x:\n return\n yield x\n\n\ndef betweenness_centrality_parallel(G, processes=None):\n \"\"\"Parallel betweenness centrality function\"\"\"\n p = Pool(processes=processes)\n node_divisor = len(p._pool) * 4\n node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))\n num_chunks = len(node_chunks)\n bt_sc = p.starmap(\n nx.betweenness_centrality_source,\n zip([G] * num_chunks, [True] * num_chunks, [None] * num_chunks, node_chunks),\n )\n\n # Reduce the partial solutions\n bt_c = bt_sc[0]\n for bt in bt_sc[1:]:\n for n in bt:\n bt_c[n] += bt[n]\n return bt_c\n"
] |
[
[
"sklearn.neural_network.MLPClassifier",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.confusion_matrix",
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.random.randint",
"numpy.hstack",
"sklearn.ensemble.RandomForestClassifier",
"numpy.unique",
"numpy.arange",
"sklearn.impute.SimpleImputer",
"sklearn.model_selection.StratifiedKFold",
"numpy.nansum",
"numpy.load",
"numpy.zeros",
"numpy.random.choice",
"numpy.isnan",
"sklearn.metrics.precision_score",
"sklearn.model_selection.train_test_split",
"scipy.sparse.csr_matrix",
"numpy.random.rand",
"sklearn.svm.SVC",
"numpy.array",
"sklearn.metrics.recall_score",
"numpy.abs",
"numpy.random.seed",
"sklearn.linear_model.LogisticRegression",
"numpy.argwhere",
"numpy.ones",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack",
"sklearn.metrics.accuracy_score"
]
] |
python-recsys/mrec
|
[
"50c28a0384f2499bdc85afa3210eefed6b94011f"
] |
[
"mrec/__init__.py"
] |
[
"import numpy as np\nfrom scipy.sparse import coo_matrix, csr_matrix\nfrom scipy.io import mmread\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom sparse import fast_sparse_matrix, loadtxt, loadz\nfrom base_recommender import BaseRecommender\n\ndef load_fast_sparse_matrix(input_format,filepath):\n \"\"\"\n Load a fast_sparse_matrix from an input file of the specified format,\n by delegating to the appropriate static method.\n\n Parameters\n ----------\n input_format : str\n Specifies the file format:\n - tsv\n - csv\n - mm (MatrixMarket)\n - fsm (mrec.sparse.fast_sparse_matrix)\n filepath : str\n The file to load.\n \"\"\"\n if input_format == 'tsv':\n return fast_sparse_matrix.loadtxt(filepath)\n elif input_format == 'csv':\n return fast_sparse_matrix.loadtxt(filepath,delimiter=',')\n elif input_format == 'mm':\n return fast_sparse_matrix.loadmm(filepath)\n elif input_format == 'fsm':\n return fast_sparse_matrix.load(filepath)\n raise ValueError('unknown input format: {0}'.format(input_format))\n\ndef load_sparse_matrix(input_format,filepath):\n \"\"\"\n Load a scipy.sparse.csr_matrix from an input file of the specified format.\n\n Parameters\n ----------\n input_format : str\n Specifies the file format:\n - tsv\n - csv\n - mm (MatrixMarket)\n - npz (scipy.sparse.csr_matrix serialized with mrec.sparse.savez())\n - fsm (mrec.sparse.fast_sparse_matrix)\n filepath : str\n The file to load.\n \"\"\"\n if input_format == 'tsv':\n return loadtxt(filepath).tocsr()\n elif input_format == 'csv':\n return loadtxt(filepath,delimiter=',').tocsr()\n elif input_format == 'mm':\n return mmread(filepath).tocsr()\n elif input_format == 'npz':\n return loadz(filepath)\n elif input_format == 'fsm':\n return fast_sparse_matrix.load(filepath).X\n raise ValueError('unknown input format: {0}'.format(input_format))\n\ndef save_recommender(model,filepath):\n \"\"\"\n Save a recommender model to file. If the model holds similarity matrix\n then numpy.savez is used to save it to disk efficiently, otherwise the\n model is simply pickled.\n\n Parameters\n ----------\n filepath : str\n The filepath to write to.\n \"\"\"\n if hasattr(model,'similarity_matrix'):\n # pickle the model without its similarity matrix\n tmp = model.similarity_matrix\n model.similarity_matrix = None\n m = pickle.dumps(model)\n # use numpy to save the similarity matrix efficiently\n model.similarity_matrix = tmp\n if isinstance(model.similarity_matrix,np.ndarray):\n np.savez(filepath,mat=model.similarity_matrix,model=m)\n elif isinstance(model.similarity_matrix,csr_matrix):\n d = model.similarity_matrix.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,model=m)\n else:\n pickle.dump(model,open(filepath,'w'))\n else:\n pickle.dump(model,open(filepath,'w'))\n\ndef load_recommender(filepath):\n \"\"\"\n Load a recommender model from file after it has been saved by\n save_recommender().\n\n Parameters\n ----------\n filepath : str\n The filepath to read from.\n \"\"\"\n r = np.load(filepath)\n if isinstance(r,BaseRecommender):\n model = r\n else:\n model = np.loads(str(r['model']))\n if 'mat' in r.files:\n model.similarity_matrix = r['mat']\n elif 'row' in r.files:\n model.similarity_matrix = coo_matrix((r['data'],(r['row'],r['col'])),shape=r['shape']).tocsr()\n else:\n raise IOError('ERROR: unexpected serialization format.'\n 'Was this file created with save_recommender()?')\n return model\n\ndef read_recommender_description(filepath):\n \"\"\"\n Read a recommender model description from file after it has\n been saved by save_recommender(), without loading all the\n associated data into memory.\n\n Parameters\n ----------\n filepath : str\n The filepath to read from.\n \"\"\"\n r = np.load(filepath,mmap_mode='r')\n if isinstance(r,BaseRecommender):\n model = r\n else:\n model = np.loads(str(r['model']))\n return str(model)\n"
] |
[
[
"scipy.io.mmread",
"numpy.load",
"scipy.sparse.coo_matrix",
"numpy.savez"
]
] |
jsbyysheng/captcha-recognition
|
[
"69340a5a83451c4780b7a34729b572582ed3544b"
] |
[
"src/data.py"
] |
[
"import os\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_data(path):\n \"\"\"\n Get images name in path\n :param path: the path to save images\n :return: image list filled with names and their labels.\n \"\"\"\n image_names = os.listdir(path)\n image_names = [name for name in image_names if name.endswith(\".jpg\")]\n label2id, id2label = get_dict()\n results = [[label2id[name[:1]], label2id[name[1:2]], label2id[name[2:3]], label2id[name[3:4]]] for name in image_names]\n image_names = [os.path.join(path, name) for name in image_names]\n\n return image_names, np.array(results, dtype=np.int32) - 1\n\n\ndef get_dict():\n \"\"\"\n Get dictionary of id2label and label2id, id2label is a dictionary which indicates the label of an id and the label2id is a reversed from `label2id`\n :return: two dictionaries: label->id, id->label\n \"\"\"\n label2id = {}\n id2label = {}\n # upper case\n for i in range(26):\n label2id[chr(ord('A') + i)] = 1 + i\n id2label[1 + i] = chr(ord('A') + i)\n # lower case\n for i in range(26):\n label2id[chr(ord('a') + i)] = 1 + i + 26\n id2label[1 + i + 26] = chr(ord('a') + i)\n # numbers\n for i in range(10):\n label2id[chr(ord('0') + i)] = 53 + i\n id2label[53 + i] = chr(ord('0') + i)\n\n return label2id, id2label\n\n\ndef get_data_split(path, split=[6, 1, 1], save=True, out_dir='./data', modes=['train', 'dev', 'test']):\n \"\"\"\n Get data after split.\n :param path: the path to save images\n :param split: the ratio of train set, dev set and test set\n :param out_dir: the output directory to save data files\n :param modes: the modes at different timestamp, support modes like: (train, dev, test), (train, dev) and (test)\n :return: six data with ratio specified by `split`.\n \"\"\"\n\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n train_path, dev_path, test_path = os.path.join(out_dir, 'train.npy'), os.path.join(out_dir, 'dev.npy'), os.path.join(out_dir, 'test.npy')\n if os.path.exists(train_path) and os.path.exists(dev_path) and os.path.exists(test_path):\n\n if 'train' in modes:\n x_train, y_train = np.load(train_path, allow_pickle=True), np.load(os.path.join(out_dir, 'train.y.npy'), allow_pickle=True)\n if 'dev' in modes:\n x_dev, y_dev = np.load(dev_path, allow_pickle=True), np.load(os.path.join(out_dir, 'dev.y.npy'))\n if 'test' in modes:\n x_test, y_test = np.load(test_path, allow_pickle=True), np.load(os.path.join(out_dir, 'test.y.npy'))\n\n else:\n names, labels = get_data(path)\n\n ratios = np.array(split) / np.sum(split)\n\n x_train, x_dev_test, y_train, y_dev_test = train_test_split(names, labels, train_size=ratios[0])\n ratios = np.array(split[1:]) / np.sum(split[1:])\n x_dev, x_test, y_dev, y_test = train_test_split(x_dev_test, y_dev_test, train_size=ratios[0])\n\n if save:\n np.save(train_path, x_train, allow_pickle=True)\n np.save(os.path.join(out_dir, 'train.y.npy'), y_train, allow_pickle=True)\n np.save(dev_path, x_dev, allow_pickle=True)\n np.save(os.path.join(out_dir, 'dev.y.npy'), y_dev, allow_pickle=True)\n np.save(test_path, x_test, allow_pickle=True)\n np.save(os.path.join(out_dir, 'test.y.npy'), y_test, allow_pickle=True)\n\n if 'train' in modes and 'dev' in modes and 'test' in modes:\n return x_train, y_train, x_dev, y_dev, x_test, y_test\n elif 'train' in modes and 'dev' in modes:\n return x_train, y_train, x_dev, y_dev\n elif 'test' in modes:\n return x_test, y_test\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"numpy.save",
"numpy.load",
"numpy.array",
"numpy.sum"
]
] |
fred3m/astro_pypelines
|
[
"fbf62f2c4b8015fdb86192c7aed04e189cacf5a3"
] |
[
"astro_pypelines/pypelines/interactive_plots/plots.py"
] |
[
"from __future__ import division, print_function\nimport os\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.modeling import models, fitting\n\nfit_types = {\n 'linearLSQ': fitting.LinearLSQFitter(),\n 'levMarLSQ': fitting.LevMarLSQFitter(),\n 'SLSQPLSQ': fitting.SLSQPLSQFitter(),\n 'simplexLSQ': fitting.SimplexLSQFitter()\n}\n\ndef load_table(id, params):\n if params['format'] == 'npy':\n catalog = np.load(params['filename'])\n else:\n catalog = Table.read(params['filename'], format= params['format']);\n \n # Check each row for nan values and discard them\n for col in catalog.dtype.names:\n try:\n catalog= catalog[~np.isnan(catalog[col])]\n except TypeError:\n print(\"'{0}' column is not a float\".format(col))\n response = {\n 'id': 'plot table',\n 'columns': catalog.dtype.names,\n 'data': [np.array(record).tolist() for record in catalog],\n 'title': os.path.basename(params['filename'])\n }\n return response\n\ndef fit_data1d(id, params):\n fit_type = fit_types[params['fit_type']]\n if params['model'] == 'polynomial':\n fit = models.Polynomial1D(params['order'])\n coefficients = params['coefficients'].split(',')\n coefficients = [float(coeff.strip()) for coeff in coefficients]\n for i in range(params['order']+1):\n coeff = 'c'+str(i)\n setattr(fit, coeff, coefficients[i])\n elif params['model'] == 'gaussian':\n fit = models.Gaussian1D(amplitude=params['amplitude'], mean=params['mean'],\n stddev=params['std_dev'])\n best_fit = fit_type(fit, params['x'], params['y'])\n \n diff = best_fit(params['x'])-params['y']\n mean_sq_err = 1/len(params['x'])*np.sum(diff**2)\n rms_dev = np.sqrt(mean_sq_err)\n parameters = {}\n for n in range(len(best_fit._parameters)):\n parameters[best_fit.param_names[n]] = best_fit._parameters[n]\n array_x = np.array(params['x'])\n model_x = np.linspace(np.amin(array_x), np.amax(array_x),30)\n model_y = best_fit(model_x)\n model_data = zip(model_x.tolist(),model_y.tolist())\n response = {\n 'id': 'best fit',\n 'model': params['model'],\n 'parameters': parameters,\n 'columns': ['model_x', 'model_y'],\n 'data': model_data,\n 'title': 'fit',\n 'rms_dev': rms_dev\n }\n return response\n "
] |
[
[
"numpy.amax",
"numpy.sqrt",
"numpy.amin",
"numpy.isnan",
"numpy.load",
"numpy.array",
"numpy.sum"
]
] |
ahmhekal/Final_Project
|
[
"7d2137841c99239fe389754634c2185d9767a81f"
] |
[
"ros/src/tl_detector/tl_detector.py"
] |
[
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nfrom scipy.spatial import KDTree\n\nSTATE_COUNT_THRESHOLD = 3\nctr=0 #counter to read an image out of 3\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n \n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n self.waypoints_2d = None\n self.waypoints_tree = None\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoints_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n Args:\n msg (Image): image from car-mounted camera\n \"\"\"\n global ctr\n if ctr%15==0:\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n \n ctr=0\n ctr+=1\n\n \n \n def get_closest_waypoint(self, x, y):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n x (float): position to match a waypoint to\n y (float): position to match a waypoint to\n Returns:\n int: index of the closest waypoint in self.waypoints\n \"\"\"\n closest_idx = self.waypoints_tree.query([x, y], 1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n Args:\n light (TrafficLight): light to classify\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n\n return light.state\n\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n return self.light_classifier.get_classification(cv_image)\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n closest_light = None\n line_wp_idx = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)\n\n # TODO find the closest visible traffic light (if one exists)\n diff = len(self.waypoints.waypoints)\n for i, light in enumerate(self.lights):\n # Get stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0], line[1])\n # Find closest stop line waypoint index\n d = temp_wp_idx - car_wp_idx\n if 0 <= d < diff:\n diff = d\n closest_light = light\n line_wp_idx = temp_wp_idx\n\n if closest_light:\n state = self.get_light_state(closest_light)\n # rospy.logwarn(\"Nearest traffic light state: {0}\".format(state))\n return line_wp_idx, state\n\n self.waypoints = None\n rospy.logwarn(\"Nearest traffic light state: UNKNOWN\")\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n"
] |
[
[
"scipy.spatial.KDTree"
]
] |
fossabot/feast
|
[
"0c0b50927ce023315e25edba16b0f573431ef2d8"
] |
[
"tests/e2e/redis/basic-ingest-redis-serving.py"
] |
[
"import math\nimport os\nimport random\nimport tempfile\nimport time\nimport uuid\nfrom copy import copy\nfrom datetime import datetime, timedelta\n\nimport grpc\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport pytz\nfrom google.protobuf.duration_pb2 import Duration\n\nfrom feast.client import Client\nfrom feast.config import Config\nfrom feast.constants import CONFIG_AUTH_PROVIDER\nfrom feast.contrib.job_controller.client import Client as JCClient\nfrom feast.core import CoreService_pb2\nfrom feast.core.CoreService_pb2 import ApplyFeatureSetResponse, GetFeatureSetResponse\nfrom feast.core.CoreService_pb2_grpc import CoreServiceStub\nfrom feast.core.IngestionJob_pb2 import IngestionJobStatus\nfrom feast.entity import Entity\nfrom feast.feature import Feature\nfrom feast.feature_set import FeatureSet, FeatureSetRef\nfrom feast.grpc.auth import get_auth_metadata_plugin\nfrom feast.serving.ServingService_pb2 import (\n GetOnlineFeaturesRequest,\n GetOnlineFeaturesResponse,\n)\nfrom feast.source import KafkaSource\nfrom feast.type_map import ValueType\nfrom feast.types.Value_pb2 import Int64List\nfrom feast.types.Value_pb2 import Value as Value\nfrom feast.wait import wait_retry_backoff\n\nFLOAT_TOLERANCE = 0.00001\nPROJECT_NAME = \"basic_\" + uuid.uuid4().hex.upper()[0:6]\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nAUTH_PROVIDER = \"google\"\n\n\ndef basic_dataframe(entities, features, ingest_time, n_size, null_features=[]):\n \"\"\"\n Generate a basic feast-ingestable dataframe for testing.\n Entity value incrementally increase from 1 to n_size\n Features values are randomlly generated floats.\n entities - names of entities\n features - names of the features\n ingest_time - ingestion timestamp\n n_size - no. of rows in the generated dataframe.\n null_features - names of features that contain null values\n Returns the generated dataframe\n \"\"\"\n df_dict = {\n \"datetime\": [ingest_time.replace(tzinfo=pytz.utc) for _ in range(n_size)],\n }\n for entity_name in entities:\n df_dict[entity_name] = list(range(1, n_size + 1))\n for feature_name in features:\n df_dict[feature_name] = [np.random.rand() for _ in range(n_size)]\n for null_feature_name in null_features:\n df_dict[null_feature_name] = [None for _ in range(n_size)]\n return pd.DataFrame(df_dict)\n\n\ndef check_online_response(feature_ref, ingest_df, response):\n \"\"\"\n Check the feature value and status in the given online serving response.\n feature_refs - string feature ref used to access feature in response\n ingest_df - dataframe of ingested values\n response - response to extract retrieved feature value and metadata\n Returns True if given response has expected feature value and metadata, otherwise False.\n \"\"\"\n feature_ref_splits = feature_ref.split(\":\")\n if len(feature_ref_splits) == 1:\n feature_name = feature_ref\n else:\n _, feature_name = feature_ref_splits\n\n returned_status = response.field_values[0].statuses[feature_ref]\n if ingest_df.loc[0, feature_name] is None:\n return returned_status == GetOnlineFeaturesResponse.FieldStatus.NULL_VALUE\n else:\n sent_value = float(ingest_df.iloc[0][feature_name])\n returned_value = float(response.field_values[0].fields[feature_ref].float_val)\n return (\n math.isclose(sent_value, returned_value, abs_tol=FLOAT_TOLERANCE)\n and returned_status == GetOnlineFeaturesResponse.FieldStatus.PRESENT\n )\n\n\[email protected](scope=\"module\")\ndef core_url(pytestconfig):\n return pytestconfig.getoption(\"core_url\")\n\n\[email protected](scope=\"module\")\ndef serving_url(pytestconfig):\n return pytestconfig.getoption(\"serving_url\")\n\n\[email protected](scope=\"module\")\ndef jobcontroller_url(pytestconfig):\n return pytestconfig.getoption(\"jobcontroller_url\")\n\n\[email protected](scope=\"module\")\ndef allow_dirty(pytestconfig):\n return True if pytestconfig.getoption(\"allow_dirty\").lower() == \"true\" else False\n\n\[email protected](scope=\"module\")\ndef enable_auth(pytestconfig):\n return True if pytestconfig.getoption(\"enable_auth\").lower() == \"true\" else False\n\n\[email protected](scope=\"module\")\ndef kafka_brokers(pytestconfig):\n return pytestconfig.getoption(\"kafka_brokers\")\n\n\[email protected](scope=\"module\")\ndef client(core_url, serving_url, allow_dirty, enable_auth):\n # Get client for core and serving\n # if enable_auth is True, Google Id token will be\n # passed in the metadata for authentication.\n client = Client(\n core_url=core_url,\n serving_url=serving_url,\n enable_auth=enable_auth,\n auth_provider=AUTH_PROVIDER,\n )\n client.create_project(PROJECT_NAME)\n\n # Ensure Feast core is active, but empty\n if not allow_dirty:\n feature_sets = client.list_feature_sets()\n if len(feature_sets) > 0:\n raise Exception(\n \"Feast cannot have existing feature sets registered. Exiting tests.\"\n )\n\n return client\n\n\[email protected](scope=\"module\")\ndef jobcontroller_client(jobcontroller_url):\n client = JCClient(jobcontroller_url=jobcontroller_url)\n return client\n\n\[email protected](scope=\"module\")\ndef ingest_time():\n return datetime.utcnow()\n\n\[email protected](scope=\"module\")\ndef cust_trans_df(ingest_time):\n return basic_dataframe(\n entities=[\"customer_id\"],\n features=[\"daily_transactions\", \"total_transactions\"],\n null_features=[\"null_values\"],\n ingest_time=ingest_time,\n n_size=5,\n )\n\n\[email protected](scope=\"module\")\ndef driver_df(ingest_time):\n return basic_dataframe(\n entities=[\"driver_id\"],\n features=[\"rating\", \"cost\"],\n ingest_time=ingest_time,\n n_size=5,\n )\n\n\ndef test_version_returns_results(client):\n version_info = client.version()\n assert not version_info[\"core\"] == \"not configured\"\n assert not version_info[\"serving\"] == \"not configured\"\n\n\ndef test_list_feature_sets_when_auth_enabled_should_raise(enable_auth):\n if enable_auth:\n client = Client(core_url=core_url, serving_url=serving_url, enable_auth=False)\n with pytest.raises(ConnectionError):\n client.list_feature_sets()\n\n\[email protected](45)\[email protected](order=10)\ndef test_basic_register_feature_set_success(client):\n # Register feature set without project\n cust_trans_fs_expected = FeatureSet.from_yaml(\n f\"{DIR_PATH}/basic/cust_trans_fs.yaml\"\n )\n driver_fs_expected = FeatureSet.from_yaml(f\"{DIR_PATH}/basic/driver_fs.yaml\")\n client.apply(cust_trans_fs_expected)\n client.apply(driver_fs_expected)\n cust_trans_fs_actual = client.get_feature_set(\"customer_transactions\")\n assert cust_trans_fs_actual == cust_trans_fs_expected\n driver_fs_actual = client.get_feature_set(\"driver\")\n assert driver_fs_actual == driver_fs_expected\n\n # Register feature set with project\n cust_trans_fs_expected = FeatureSet.from_yaml(\n f\"{DIR_PATH}/basic/cust_trans_fs.yaml\"\n )\n client.set_project(PROJECT_NAME)\n client.apply(cust_trans_fs_expected)\n cust_trans_fs_actual = client.get_feature_set(\n \"customer_transactions\", project=PROJECT_NAME\n )\n assert cust_trans_fs_actual == cust_trans_fs_expected\n\n # Register feature set with labels\n driver_unlabelled_fs = FeatureSet(\n \"driver_unlabelled\",\n features=[Feature(\"rating\", ValueType.FLOAT), Feature(\"cost\", ValueType.FLOAT)],\n entities=[Entity(\"entity_id\", ValueType.INT64)],\n max_age=Duration(seconds=100),\n )\n driver_labeled_fs_expected = FeatureSet(\n \"driver_labeled\",\n features=[Feature(\"rating\", ValueType.FLOAT), Feature(\"cost\", ValueType.FLOAT)],\n entities=[Entity(\"entity_id\", ValueType.INT64)],\n max_age=Duration(seconds=100),\n labels={\"key1\": \"val1\"},\n )\n client.set_project(PROJECT_NAME)\n client.apply(driver_unlabelled_fs)\n client.apply(driver_labeled_fs_expected)\n driver_fs_actual = client.list_feature_sets(\n project=PROJECT_NAME, labels={\"key1\": \"val1\"}\n )[0]\n assert driver_fs_actual == driver_labeled_fs_expected\n\n # reset client's project for other tests\n client.set_project()\n\n\[email protected](300)\[email protected](order=11)\ndef test_basic_ingest_success(client, cust_trans_df, driver_df):\n cust_trans_fs = client.get_feature_set(name=\"customer_transactions\")\n driver_fs = client.get_feature_set(name=\"driver\")\n\n # Ingest customer transaction data\n client.ingest(cust_trans_fs, cust_trans_df)\n client.ingest(driver_fs, driver_df)\n time.sleep(5)\n\n\[email protected](90)\[email protected](order=12)\ndef test_basic_retrieve_online_success(client, cust_trans_df):\n feature_refs = [\"daily_transactions\", \"total_transactions\", \"null_values\"]\n\n # Poll serving for feature values until the correct values are returned\n def try_get_features():\n response = client.get_online_features(\n entity_rows=[\n GetOnlineFeaturesRequest.EntityRow(\n fields={\n \"customer_id\": Value(\n int64_val=cust_trans_df.iloc[0][\"customer_id\"]\n )\n }\n )\n ],\n feature_refs=feature_refs,\n ) # type: GetOnlineFeaturesResponse\n is_ok = all(\n [\n check_online_response(ref, cust_trans_df, response)\n for ref in feature_refs\n ]\n )\n return response, is_ok\n\n wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n\[email protected](90)\[email protected](order=13)\ndef test_basic_retrieve_online_multiple_featureset(client, cust_trans_df, driver_df):\n # Test retrieve with different variations of the string feature refs\n # ie feature set inference for feature refs without specified feature set\n feature_ref_df_mapping = [\n (\"customer_transactions:daily_transactions\", cust_trans_df),\n (\"driver:rating\", driver_df),\n (\"total_transactions\", cust_trans_df),\n ]\n\n # Poll serving for feature values until the correct values are returned\n def try_get_features():\n feature_refs = [mapping[0] for mapping in feature_ref_df_mapping]\n response = client.get_online_features(\n entity_rows=[\n GetOnlineFeaturesRequest.EntityRow(\n fields={\n \"customer_id\": Value(\n int64_val=cust_trans_df.iloc[0][\"customer_id\"]\n ),\n \"driver_id\": Value(int64_val=driver_df.iloc[0][\"driver_id\"]),\n }\n )\n ],\n feature_refs=feature_refs,\n ) # type: GetOnlineFeaturesResponse\n is_ok = all(\n [\n check_online_response(ref, df, response)\n for ref, df in feature_ref_df_mapping\n ]\n )\n return response, is_ok\n\n wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n\[email protected](scope=\"module\")\ndef nonlist_entity_dataframe():\n # Dataframe setup for feature retrieval with entity provided not in list format\n N_ROWS = 2\n time_offset = datetime.utcnow().replace(tzinfo=pytz.utc)\n customer_df = pd.DataFrame(\n {\n \"datetime\": [time_offset] * N_ROWS,\n \"customer_id2\": [i for i in range(N_ROWS)],\n \"customer2_rating\": [i for i in range(N_ROWS)],\n \"customer2_cost\": [float(i) + 0.5 for i in range(N_ROWS)],\n \"customer2_past_transactions_int\": [[i, i + 2] for i in range(N_ROWS)],\n \"customer2_past_transactions_double\": [\n [float(i) + 0.5, float(i) + 2] for i in range(N_ROWS)\n ],\n \"customer2_past_transactions_float\": [\n [float(i) + 0.5, float(i) + 2] for i in range(N_ROWS)\n ],\n \"customer2_past_transactions_string\": [\n [\"first_\" + str(i), \"second_\" + str(i)] for i in range(N_ROWS)\n ],\n \"customer2_past_transactions_bool\": [[True, False] for _ in range(N_ROWS)],\n }\n )\n return customer_df\n\n\[email protected](scope=\"module\")\ndef list_entity_dataframe():\n # Dataframe setup for feature retrieval with entity provided in list format\n N_ROWS = 2\n time_offset = datetime.utcnow().replace(tzinfo=pytz.utc)\n customer_df = pd.DataFrame(\n {\n \"datetime\": [time_offset] * N_ROWS,\n \"district_ids\": [\n [np.int64(i), np.int64(i + 1), np.int64(i + 2)] for i in range(N_ROWS)\n ],\n \"district_rating\": [i for i in range(N_ROWS)],\n \"district_cost\": [float(i) + 0.5 for i in range(N_ROWS)],\n \"district_past_transactions_int\": [[i, i + 2] for i in range(N_ROWS)],\n \"district_past_transactions_double\": [\n [float(i) + 0.5, float(i) + 2] for i in range(N_ROWS)\n ],\n \"district_past_transactions_float\": [\n [float(i) + 0.5, float(i) + 2] for i in range(N_ROWS)\n ],\n \"district_past_transactions_string\": [\n [\"first_\" + str(i), \"second_\" + str(i)] for i in range(N_ROWS)\n ],\n \"district_past_transactions_bool\": [[True, False] for _ in range(N_ROWS)],\n }\n )\n return customer_df\n\n\[email protected](600)\[email protected](order=14)\ndef test_basic_retrieve_online_entity_nonlistform(\n client, nonlist_entity_dataframe, list_entity_dataframe\n):\n # Case 1: Feature retrieval with multiple entities retrieval check\n customer_fs = FeatureSet(\n name=\"customer2\",\n features=[\n Feature(name=\"customer2_rating\", dtype=ValueType.INT64),\n Feature(name=\"customer2_cost\", dtype=ValueType.FLOAT),\n Feature(name=\"customer2_past_transactions_int\", dtype=ValueType.INT64_LIST),\n Feature(\n name=\"customer2_past_transactions_double\", dtype=ValueType.DOUBLE_LIST\n ),\n Feature(\n name=\"customer2_past_transactions_float\", dtype=ValueType.FLOAT_LIST\n ),\n Feature(\n name=\"customer2_past_transactions_string\", dtype=ValueType.STRING_LIST\n ),\n Feature(name=\"customer2_past_transactions_bool\", dtype=ValueType.BOOL_LIST),\n ],\n entities=[Entity(\"customer_id2\", ValueType.INT64)],\n max_age=Duration(seconds=3600),\n )\n\n client.set_project(PROJECT_NAME)\n client.apply(customer_fs)\n\n customer_fs = client.get_feature_set(name=\"customer2\")\n client.ingest(customer_fs, nonlist_entity_dataframe, timeout=600)\n time.sleep(15)\n\n online_request_entity = [{\"customer_id2\": 0}, {\"customer_id2\": 1}]\n online_request_features = [\n \"customer2_rating\",\n \"customer2_cost\",\n \"customer2_past_transactions_int\",\n \"customer2_past_transactions_double\",\n \"customer2_past_transactions_float\",\n \"customer2_past_transactions_string\",\n \"customer2_past_transactions_bool\",\n ]\n online_request_entity2 = [\n {\"customer_id2\": Value(int64_val=0)},\n {\"customer_id2\": Value(int64_val=1)},\n ]\n\n def try_get_features1():\n response = client.get_online_features(\n entity_rows=online_request_entity, feature_refs=online_request_features\n )\n is_ok = check_online_response(\n \"customer2_rating\", nonlist_entity_dataframe, response\n )\n return response, is_ok\n\n def try_get_features2():\n response = client.get_online_features(\n entity_rows=online_request_entity2, feature_refs=online_request_features\n )\n is_ok = check_online_response(\n \"customer2_rating\", nonlist_entity_dataframe, response\n )\n return response, is_ok\n\n online_features_actual1 = wait_retry_backoff(\n retry_fn=try_get_features1,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_actual2 = wait_retry_backoff(\n retry_fn=try_get_features2,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_expected = {\n \"customer_id2\": [0, 1],\n \"customer2_rating\": [0, 1],\n \"customer2_cost\": [0.5, 1.5],\n \"customer2_past_transactions_int\": [[0, 2], [1, 3]],\n \"customer2_past_transactions_double\": [[0.5, 2.0], [1.5, 3.0]],\n \"customer2_past_transactions_float\": [[0.5, 2.0], [1.5, 3.0]],\n \"customer2_past_transactions_string\": [\n [\"first_0\", \"second_0\"],\n [\"first_1\", \"second_1\"],\n ],\n \"customer2_past_transactions_bool\": [[True, False], [True, False]],\n }\n\n assert online_features_actual1.to_dict() == online_features_expected\n assert online_features_actual2.to_dict() == online_features_expected\n\n # Case 2: Feature retrieval with multiple entities retrieval check with mixed types\n with pytest.raises(TypeError) as excinfo:\n online_request_entity2 = [{\"customer_id\": 0}, {\"customer_id\": \"error_pls\"}]\n online_features_actual2 = client.get_online_features(\n entity_rows=online_request_entity2, feature_refs=online_request_features\n )\n\n assert (\n \"Input entity customer_id has mixed types, ValueType.STRING and ValueType.INT64. That is not allowed.\"\n in str(excinfo.value)\n )\n\n\[email protected](600)\[email protected](order=15)\ndef test_basic_retrieve_online_entity_listform(client, list_entity_dataframe):\n # Case 1: Features retrieval with entity in list format check\n district_fs = FeatureSet(\n name=\"district\",\n features=[\n Feature(name=\"district_rating\", dtype=ValueType.INT64),\n Feature(name=\"district_cost\", dtype=ValueType.FLOAT),\n Feature(name=\"district_past_transactions_int\", dtype=ValueType.INT64_LIST),\n Feature(\n name=\"district_past_transactions_double\", dtype=ValueType.DOUBLE_LIST\n ),\n Feature(\n name=\"district_past_transactions_float\", dtype=ValueType.FLOAT_LIST\n ),\n Feature(\n name=\"district_past_transactions_string\", dtype=ValueType.STRING_LIST\n ),\n Feature(name=\"district_past_transactions_bool\", dtype=ValueType.BOOL_LIST),\n ],\n entities=[Entity(\"district_ids\", dtype=ValueType.INT64_LIST)],\n max_age=Duration(seconds=3600),\n )\n\n client.set_project(PROJECT_NAME)\n client.apply(district_fs)\n\n district_fs = client.get_feature_set(name=\"district\")\n client.ingest(district_fs, list_entity_dataframe, timeout=600)\n time.sleep(15)\n\n online_request_entity = [{\"district_ids\": [np.int64(1), np.int64(2), np.int64(3)]}]\n online_request_features = [\n \"district_rating\",\n \"district_cost\",\n \"district_past_transactions_int\",\n \"district_past_transactions_double\",\n \"district_past_transactions_float\",\n \"district_past_transactions_string\",\n \"district_past_transactions_bool\",\n ]\n online_request_entity2 = [\n {\"district_ids\": Value(int64_list_val=Int64List(val=[1, 2, 3]))}\n ]\n\n def try_get_features1():\n response = client.get_online_features(\n entity_rows=online_request_entity, feature_refs=online_request_features\n )\n is_ok = check_online_response(\n \"district_rating\", list_entity_dataframe, response\n )\n return response, is_ok\n\n def try_get_features2():\n response = client.get_online_features(\n entity_rows=online_request_entity2, feature_refs=online_request_features\n )\n is_ok = check_online_response(\n \"district_rating\", list_entity_dataframe, response\n )\n return response, is_ok\n\n online_features_actual = wait_retry_backoff(\n retry_fn=try_get_features1,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_actual2 = wait_retry_backoff(\n retry_fn=try_get_features2,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_expected = {\n \"district_ids\": [[np.int64(1), np.int64(2), np.int64(3)]],\n \"district_rating\": [1],\n \"district_cost\": [1.5],\n \"district_past_transactions_int\": [[1, 3]],\n \"district_past_transactions_double\": [[1.5, 3.0]],\n \"district_past_transactions_float\": [[1.5, 3.0]],\n \"district_past_transactions_string\": [[\"first_1\", \"second_1\"]],\n \"district_past_transactions_bool\": [[True, False]],\n }\n\n assert online_features_actual.to_dict() == online_features_expected\n assert online_features_actual2.to_dict() == online_features_expected\n\n # Case 2: Features retrieval with entity in list format check with mixed types\n with pytest.raises(ValueError) as excinfo:\n online_request_entity2 = [{\"district_ids\": [np.int64(1), np.int64(2), True]}]\n online_features_actual2 = client.get_online_features(\n entity_rows=online_request_entity2, feature_refs=online_request_features\n )\n\n assert (\n \"List value type for field district_ids is inconsistent. ValueType.INT64 different from ValueType.BOOL.\"\n in str(excinfo.value)\n )\n\n\[email protected](600)\[email protected](order=16)\ndef test_basic_ingest_retrieval_fs(client):\n # Set to another project to test ingestion based on current project context\n client.set_project(PROJECT_NAME + \"_NS1\")\n driver_fs = FeatureSet(\n name=\"driver_fs\",\n features=[\n Feature(name=\"driver_fs_rating\", dtype=ValueType.FLOAT),\n Feature(name=\"driver_fs_cost\", dtype=ValueType.FLOAT),\n ],\n entities=[Entity(\"driver_fs_id\", ValueType.INT64)],\n max_age=Duration(seconds=3600),\n )\n client.apply(driver_fs)\n\n N_ROWS = 2\n time_offset = datetime.utcnow().replace(tzinfo=pytz.utc)\n driver_df = pd.DataFrame(\n {\n \"datetime\": [time_offset] * N_ROWS,\n \"driver_fs_id\": [i for i in range(N_ROWS)],\n \"driver_fs_rating\": [float(i) for i in range(N_ROWS)],\n \"driver_fs_cost\": [float(i) + 0.5 for i in range(N_ROWS)],\n }\n )\n client.ingest(driver_fs, driver_df, timeout=600)\n time.sleep(15)\n\n online_request_entity = [{\"driver_fs_id\": 0}, {\"driver_fs_id\": 1}]\n online_request_features = [\"driver_fs_rating\", \"driver_fs_cost\"]\n\n def try_get_features():\n response = client.get_online_features(\n entity_rows=online_request_entity, feature_refs=online_request_features\n )\n is_ok = check_online_response(\"driver_fs_rating\", driver_df, response)\n return response, is_ok\n\n online_features_actual = wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_expected = {\n \"driver_fs_id\": [0, 1],\n \"driver_fs_rating\": [0.0, 1.0],\n \"driver_fs_cost\": [0.5, 1.5],\n }\n\n assert online_features_actual.to_dict() == online_features_expected\n\n\[email protected](600)\[email protected](order=17)\ndef test_basic_ingest_retrieval_str(client):\n # Set to another project to test ingestion based on current project context\n client.set_project(PROJECT_NAME + \"_NS1\")\n customer_fs = FeatureSet(\n name=\"cust_fs\",\n features=[\n Feature(name=\"cust_rating\", dtype=ValueType.INT64),\n Feature(name=\"cust_cost\", dtype=ValueType.FLOAT),\n ],\n entities=[Entity(\"cust_id\", ValueType.INT64)],\n max_age=Duration(seconds=3600),\n )\n client.apply(customer_fs)\n\n N_ROWS = 2\n time_offset = datetime.utcnow().replace(tzinfo=pytz.utc)\n cust_df = pd.DataFrame(\n {\n \"datetime\": [time_offset] * N_ROWS,\n \"cust_id\": [i for i in range(N_ROWS)],\n \"cust_rating\": [i for i in range(N_ROWS)],\n \"cust_cost\": [float(i) + 0.5 for i in range(N_ROWS)],\n }\n )\n client.ingest(\"cust_fs\", cust_df, timeout=600)\n time.sleep(15)\n\n online_request_entity = [{\"cust_id\": 0}, {\"cust_id\": 1}]\n online_request_features = [\"cust_rating\", \"cust_cost\"]\n\n def try_get_features():\n response = client.get_online_features(\n entity_rows=online_request_entity, feature_refs=online_request_features\n )\n is_ok = check_online_response(\"cust_rating\", cust_df, response)\n return response, is_ok\n\n online_features_actual = wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_expected = {\n \"cust_id\": [0, 1],\n \"cust_rating\": [0, 1],\n \"cust_cost\": [0.5, 1.5],\n }\n\n assert online_features_actual.to_dict() == online_features_expected\n\n\[email protected](600)\[email protected](order=18)\ndef test_basic_ingest_retrieval_multi_entities(client):\n # Set to another project to test ingestion based on current project context\n client.set_project(PROJECT_NAME + \"_NS1\")\n merchant_fs = FeatureSet(\n name=\"merchant_fs\",\n features=[Feature(name=\"merchant_sales\", dtype=ValueType.FLOAT)],\n entities=[\n Entity(\"driver_id\", ValueType.INT64),\n Entity(\"merchant_id\", ValueType.INT64),\n ],\n max_age=Duration(seconds=3600),\n )\n client.apply(merchant_fs)\n\n N_ROWS = 2\n time_offset = datetime.utcnow().replace(tzinfo=pytz.utc)\n merchant_df = pd.DataFrame(\n {\n \"datetime\": [time_offset] * N_ROWS,\n \"driver_id\": [i for i in range(N_ROWS)],\n \"merchant_id\": [i for i in range(N_ROWS)],\n \"merchant_sales\": [float(i) + 0.5 for i in range(N_ROWS)],\n }\n )\n client.ingest(\"merchant_fs\", merchant_df, timeout=600)\n time.sleep(15)\n\n online_request_entity = [\n {\"driver_id\": 0, \"merchant_id\": 0},\n {\"driver_id\": 1, \"merchant_id\": 1},\n ]\n online_request_features = [\"merchant_sales\"]\n\n def try_get_features():\n response = client.get_online_features(\n entity_rows=online_request_entity, feature_refs=online_request_features\n )\n is_ok = check_online_response(\"merchant_sales\", merchant_df, response)\n return response, is_ok\n\n online_features_actual = wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n online_features_expected = {\n \"driver_id\": [0, 1],\n \"merchant_id\": [0, 1],\n \"merchant_sales\": [0.5, 1.5],\n }\n\n assert online_features_actual.to_dict() == online_features_expected\n\n\[email protected](600)\[email protected](order=19)\ndef test_basic_retrieve_feature_row_missing_fields(client, cust_trans_df):\n feature_refs = [\"daily_transactions\", \"total_transactions\", \"null_values\"]\n\n # apply cust_trans_fs and ingest dataframe\n client.set_project(PROJECT_NAME + \"_basic_retrieve_missing_fields\")\n old_cust_trans_fs = FeatureSet.from_yaml(f\"{DIR_PATH}/basic/cust_trans_fs.yaml\")\n client.apply(old_cust_trans_fs)\n client.ingest(old_cust_trans_fs, cust_trans_df)\n\n # update cust_trans_fs with one additional feature.\n # feature rows ingested before the feature set update will be missing a field.\n new_cust_trans_fs = client.get_feature_set(name=\"customer_transactions\")\n new_cust_trans_fs.add(Feature(\"n_trips\", ValueType.INT64))\n client.apply(new_cust_trans_fs)\n # sleep to ensure feature set update is propagated\n time.sleep(15)\n\n # attempt to retrieve features from feature rows with missing fields\n def try_get_features():\n response = client.get_online_features(\n entity_rows=[\n {\"customer_id\": np.int64(cust_trans_df.iloc[0][\"customer_id\"])}\n ],\n feature_refs=feature_refs + [\"n_trips\"],\n ) # type: GetOnlineFeaturesResponse\n # check if the ingested fields can be correctly retrieved.\n is_ok = all(\n [\n check_online_response(ref, cust_trans_df, response)\n for ref in feature_refs\n ]\n )\n # should return null_value status for missing field n_trips\n is_missing_ok = (\n response.field_values[0].statuses[\"n_trips\"]\n == GetOnlineFeaturesResponse.FieldStatus.NULL_VALUE\n )\n return response, is_ok and is_missing_ok\n\n wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n\[email protected](600)\[email protected](order=20)\ndef test_basic_retrieve_feature_row_extra_fields(client, cust_trans_df):\n feature_refs = [\"daily_transactions\", \"total_transactions\"]\n # apply cust_trans_fs and ingest dataframe\n client.set_project(PROJECT_NAME + \"_basic_retrieve_missing_fields\")\n old_cust_trans_fs = FeatureSet.from_yaml(f\"{DIR_PATH}/basic/cust_trans_fs.yaml\")\n client.apply(old_cust_trans_fs)\n client.ingest(old_cust_trans_fs, cust_trans_df)\n\n # update cust_trans_fs with the null_values feature dropped.\n # feature rows ingested before the feature set update will have an extra field.\n new_cust_trans_fs = client.get_feature_set(name=\"customer_transactions\")\n new_cust_trans_fs.drop(\"null_values\")\n client.apply(new_cust_trans_fs)\n # sleep to ensure feature set update is propagated\n time.sleep(15)\n\n # attempt to retrieve features from feature rows with extra fields\n def try_get_features():\n response = client.get_online_features(\n entity_rows=[\n {\"customer_id\": np.int64(cust_trans_df.iloc[0][\"customer_id\"])}\n ],\n feature_refs=feature_refs,\n ) # type: GetOnlineFeaturesResponse\n # check if the non dropped fields can be correctly retrieved.\n is_ok = all(\n [\n check_online_response(ref, cust_trans_df, response)\n for ref in feature_refs\n ]\n )\n return response, is_ok\n\n wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n\[email protected](scope=\"module\")\ndef all_types_dataframe():\n return pd.DataFrame(\n {\n \"datetime\": [datetime.utcnow().replace(tzinfo=pytz.utc) for _ in range(3)],\n \"user_id\": [1001, 1002, 1003],\n \"int32_feature\": [np.int32(1), np.int32(2), np.int32(3)],\n \"int64_feature\": [np.int64(1), np.int64(2), np.int64(3)],\n \"float_feature\": [np.float(0.1), np.float(0.2), np.float(0.3)],\n \"double_feature\": [np.float64(0.1), np.float64(0.2), np.float64(0.3)],\n \"string_feature\": [\"one\", \"two\", \"three\"],\n \"bytes_feature\": [b\"one\", b\"two\", b\"three\"],\n \"bool_feature\": [True, False, False],\n \"int32_list_feature\": [\n np.array([1, 2, 3, 4], dtype=np.int32),\n np.array([1, 2, 3, 4], dtype=np.int32),\n np.array([1, 2, 3, 4], dtype=np.int32),\n ],\n \"int64_list_feature\": [\n np.array([1, 2, 3, 4], dtype=np.int64),\n np.array([1, 2, 3, 4], dtype=np.int64),\n np.array([1, 2, 3, 4], dtype=np.int64),\n ],\n \"float_list_feature\": [\n np.array([1.1, 1.2, 1.3, 1.4], dtype=np.float32),\n np.array([1.1, 1.2, 1.3, 1.4], dtype=np.float32),\n np.array([1.1, 1.2, 1.3, 1.4], dtype=np.float32),\n ],\n \"double_list_feature\": [\n np.array([1.1, 1.2, 1.3, 1.4], dtype=np.float64),\n np.array([1.1, 1.2, 1.3, 1.4], dtype=np.float64),\n np.array([1.1, 1.2, 1.3, 1.4], dtype=np.float64),\n ],\n \"string_list_feature\": [\n np.array([\"one\", \"two\", \"three\"]),\n np.array([\"one\", \"two\", \"three\"]),\n np.array([\"one\", \"two\", \"three\"]),\n ],\n \"bytes_list_feature\": [\n np.array([b\"one\", b\"two\", b\"three\"]),\n np.array([b\"one\", b\"two\", b\"three\"]),\n np.array([b\"one\", b\"two\", b\"three\"]),\n ],\n # \"bool_list_feature\": [\n # np.array([True, False, True]),\n # np.array([True, False, True]),\n # np.array([True, False, True]),\n # ],\n # TODO: https://github.com/feast-dev/feast/issues/341\n }\n )\n\n\[email protected](45)\[email protected](order=21)\ndef test_all_types_register_feature_set_success(client):\n client.set_project(PROJECT_NAME)\n\n all_types_fs_expected = FeatureSet(\n name=\"all_types\",\n entities=[Entity(name=\"user_id\", dtype=ValueType.INT64)],\n features=[\n Feature(name=\"float_feature\", dtype=ValueType.FLOAT),\n Feature(name=\"int64_feature\", dtype=ValueType.INT64),\n Feature(name=\"int32_feature\", dtype=ValueType.INT32),\n Feature(name=\"string_feature\", dtype=ValueType.STRING),\n Feature(name=\"bytes_feature\", dtype=ValueType.BYTES),\n Feature(name=\"bool_feature\", dtype=ValueType.BOOL),\n Feature(name=\"double_feature\", dtype=ValueType.DOUBLE),\n Feature(name=\"double_list_feature\", dtype=ValueType.DOUBLE_LIST),\n Feature(name=\"float_list_feature\", dtype=ValueType.FLOAT_LIST),\n Feature(name=\"int64_list_feature\", dtype=ValueType.INT64_LIST),\n Feature(name=\"int32_list_feature\", dtype=ValueType.INT32_LIST),\n Feature(name=\"string_list_feature\", dtype=ValueType.STRING_LIST),\n Feature(name=\"bytes_list_feature\", dtype=ValueType.BYTES_LIST),\n ],\n max_age=Duration(seconds=3600),\n )\n\n # Register feature set\n client.apply(all_types_fs_expected)\n\n # Feast Core needs some time to fully commit the FeatureSet applied\n # when there is no existing job yet for the Featureset\n time.sleep(15)\n\n all_types_fs_actual = client.get_feature_set(name=\"all_types\")\n\n assert all_types_fs_actual == all_types_fs_expected\n\n if all_types_fs_actual is None:\n raise Exception(\n \"Client cannot retrieve 'all_types_fs' FeatureSet \"\n \"after registration. Either Feast Core does not save the \"\n \"FeatureSet correctly or the client needs to wait longer for FeatureSet \"\n \"to be committed.\"\n )\n\n\[email protected](300)\[email protected](order=22)\ndef test_all_types_ingest_success(client, all_types_dataframe):\n # Get all_types feature set\n all_types_fs = client.get_feature_set(name=\"all_types\")\n\n # Ingest user embedding data\n client.ingest(all_types_fs, all_types_dataframe)\n\n\[email protected](90)\[email protected](order=23)\ndef test_all_types_retrieve_online_success(client, all_types_dataframe):\n # Poll serving for feature values until the correct values are returned_float_list\n feature_refs = [\n \"float_feature\",\n \"int64_feature\",\n \"int32_feature\",\n \"double_feature\",\n \"string_feature\",\n \"bool_feature\",\n \"bytes_feature\",\n \"float_list_feature\",\n \"int64_list_feature\",\n \"int32_list_feature\",\n \"string_list_feature\",\n \"bytes_list_feature\",\n \"double_list_feature\",\n ]\n\n def try_get_features():\n response = client.get_online_features(\n entity_rows=[\n GetOnlineFeaturesRequest.EntityRow(\n fields={\n \"user_id\": Value(\n int64_val=all_types_dataframe.iloc[0][\"user_id\"]\n )\n }\n )\n ],\n feature_refs=feature_refs,\n ) # type: GetOnlineFeaturesResponse\n is_ok = check_online_response(\"float_feature\", all_types_dataframe, response)\n return response, is_ok\n\n response = wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n # check returned values\n returned_float_list = (\n response.field_values[0].fields[\"float_list_feature\"].float_list_val.val\n )\n sent_float_list = all_types_dataframe.iloc[0][\"float_list_feature\"]\n assert math.isclose(\n returned_float_list[0], sent_float_list[0], abs_tol=FLOAT_TOLERANCE\n )\n # check returned metadata\n assert (\n response.field_values[0].statuses[\"float_list_feature\"]\n == GetOnlineFeaturesResponse.FieldStatus.PRESENT\n )\n\n\[email protected](300)\[email protected](order=35)\ndef test_all_types_ingest_jobs(jobcontroller_client, client, all_types_dataframe):\n # list ingestion jobs given featureset\n client.set_project(PROJECT_NAME)\n\n all_types_fs = client.get_feature_set(name=\"all_types\")\n ingest_jobs = jobcontroller_client.list_ingest_jobs(\n feature_set_ref=FeatureSetRef.from_feature_set(all_types_fs)\n )\n # filter ingestion jobs to only those that are running\n ingest_jobs = [\n job for job in ingest_jobs if job.status == IngestionJobStatus.RUNNING\n ]\n assert len(ingest_jobs) >= 1\n\n ingest_job = ingest_jobs[0]\n # restart ingestion ingest_job\n # restart means stop current job\n # (replacement will be automatically spawned)\n jobcontroller_client.restart_ingest_job(ingest_job)\n # wait for replacement to be created\n time.sleep(15) # should be more than polling_interval\n\n # id without timestamp part\n # that remains the same between jobs\n shared_id = \"-\".join(ingest_job.id.split(\"-\")[:-1])\n ingest_jobs = jobcontroller_client.list_ingest_jobs(\n feature_set_ref=FeatureSetRef.from_feature_set(all_types_fs)\n )\n replacement_jobs = [\n job\n for job in ingest_jobs\n if job.status == IngestionJobStatus.RUNNING\n and job.id.startswith(shared_id)\n and job.id != ingest_job.id\n ]\n\n assert len(replacement_jobs) >= 1\n replacement_job = replacement_jobs[0]\n\n replacement_job.wait(IngestionJobStatus.RUNNING)\n assert replacement_job.status == IngestionJobStatus.RUNNING\n\n # stop ingestion ingest_job\n jobcontroller_client.stop_ingest_job(replacement_job)\n replacement_job.wait(IngestionJobStatus.ABORTED)\n assert replacement_job.status == IngestionJobStatus.ABORTED\n\n\[email protected](scope=\"module\")\ndef large_volume_dataframe():\n ROW_COUNT = 100000\n offset = random.randint(1000000, 10000000) # ensure a unique key space\n customer_data = pd.DataFrame(\n {\n \"datetime\": [\n datetime.utcnow().replace(tzinfo=pytz.utc) for _ in range(ROW_COUNT)\n ],\n \"customer_id\": [offset + inc for inc in range(ROW_COUNT)],\n \"daily_transactions_large\": [np.random.rand() for _ in range(ROW_COUNT)],\n \"total_transactions_large\": [256 for _ in range(ROW_COUNT)],\n }\n )\n return customer_data\n\n\[email protected](45)\[email protected](order=40)\ndef test_large_volume_register_feature_set_success(client):\n cust_trans_fs_expected = FeatureSet.from_yaml(\n f\"{DIR_PATH}/large_volume/cust_trans_large_fs.yaml\"\n )\n\n # Register feature set\n client.apply(cust_trans_fs_expected)\n\n # Feast Core needs some time to fully commit the FeatureSet applied\n # when there is no existing job yet for the Featureset\n time.sleep(10)\n cust_trans_fs_actual = client.get_feature_set(name=\"customer_transactions_large\")\n\n assert cust_trans_fs_actual == cust_trans_fs_expected\n\n if cust_trans_fs_actual is None:\n raise Exception(\n \"Client cannot retrieve 'customer_transactions' FeatureSet \"\n \"after registration. Either Feast Core does not save the \"\n \"FeatureSet correctly or the client needs to wait longer for FeatureSet \"\n \"to be committed.\"\n )\n\n\[email protected](300)\[email protected](order=41)\ndef test_large_volume_ingest_success(client, large_volume_dataframe):\n # Get large volume feature set\n cust_trans_fs = client.get_feature_set(name=\"customer_transactions_large\")\n\n # Ingest customer transaction data\n client.ingest(cust_trans_fs, large_volume_dataframe)\n\n\[email protected](90)\[email protected](order=42)\ndef test_large_volume_retrieve_online_success(client, large_volume_dataframe):\n # Poll serving for feature values until the correct values are returned\n feature_refs = [\n \"daily_transactions_large\",\n \"total_transactions_large\",\n ]\n while True:\n response = client.get_online_features(\n entity_rows=[\n GetOnlineFeaturesRequest.EntityRow(\n fields={\n \"customer_id\": Value(\n int64_val=large_volume_dataframe.iloc[0][\"customer_id\"]\n )\n }\n )\n ],\n feature_refs=feature_refs,\n ) # type: GetOnlineFeaturesResponse\n is_ok = all(\n [\n check_online_response(ref, large_volume_dataframe, response)\n for ref in feature_refs\n ]\n )\n return None, is_ok\n\n\[email protected](scope=\"module\")\ndef all_types_parquet_file():\n COUNT = 20000\n\n df = pd.DataFrame(\n {\n \"datetime\": [datetime.utcnow() for _ in range(COUNT)],\n \"customer_id\": [np.int32(random.randint(0, 10000)) for _ in range(COUNT)],\n \"int32_feature_parquet\": [\n np.int32(random.randint(0, 10000)) for _ in range(COUNT)\n ],\n \"int64_feature_parquet\": [\n np.int64(random.randint(0, 10000)) for _ in range(COUNT)\n ],\n \"float_feature_parquet\": [np.float(random.random()) for _ in range(COUNT)],\n \"double_feature_parquet\": [\n np.float64(random.random()) for _ in range(COUNT)\n ],\n \"string_feature_parquet\": [\n \"one\" + str(random.random()) for _ in range(COUNT)\n ],\n \"bytes_feature_parquet\": [b\"one\" for _ in range(COUNT)],\n \"int32_list_feature_parquet\": [\n np.array([1, 2, 3, random.randint(0, 10000)], dtype=np.int32)\n for _ in range(COUNT)\n ],\n \"int64_list_feature_parquet\": [\n np.array([1, random.randint(0, 10000), 3, 4], dtype=np.int64)\n for _ in range(COUNT)\n ],\n \"float_list_feature_parquet\": [\n np.array([1.1, 1.2, 1.3, random.random()], dtype=np.float32)\n for _ in range(COUNT)\n ],\n \"double_list_feature_parquet\": [\n np.array([1.1, 1.2, 1.3, random.random()], dtype=np.float64)\n for _ in range(COUNT)\n ],\n \"string_list_feature_parquet\": [\n np.array([\"one\", \"two\" + str(random.random()), \"three\"])\n for _ in range(COUNT)\n ],\n \"bytes_list_feature_parquet\": [\n np.array([b\"one\", b\"two\", b\"three\"]) for _ in range(COUNT)\n ],\n }\n )\n\n # TODO: Boolean list is not being tested.\n # https://github.com/feast-dev/feast/issues/341\n\n file_path = os.path.join(tempfile.mkdtemp(), \"all_types.parquet\")\n df.to_parquet(file_path, allow_truncated_timestamps=True)\n return file_path\n\n\[email protected](300)\[email protected](order=50)\ndef test_all_types_parquet_register_feature_set_success(client):\n # Load feature set from file\n all_types_parquet_expected = FeatureSet.from_yaml(\n f\"{DIR_PATH}/all_types_parquet/all_types_parquet.yaml\"\n )\n\n # Register feature set\n client.apply(all_types_parquet_expected)\n\n # Feast Core needs some time to fully commit the FeatureSet applied\n # when there is no existing job yet for the Featureset\n time.sleep(30)\n\n all_types_parquet_actual = client.get_feature_set(name=\"all_types_parquet\")\n\n assert all_types_parquet_actual == all_types_parquet_expected\n\n if all_types_parquet_actual is None:\n raise Exception(\n \"Client cannot retrieve 'customer_transactions' FeatureSet \"\n \"after registration. Either Feast Core does not save the \"\n \"FeatureSet correctly or the client needs to wait longer for FeatureSet \"\n \"to be committed.\"\n )\n\n\[email protected](600)\[email protected](order=51)\ndef test_all_types_infer_register_ingest_file_success(client, all_types_parquet_file):\n # Get feature set\n all_types_fs = client.get_feature_set(name=\"all_types_parquet\")\n\n # Ingest user embedding data\n client.ingest(feature_set=all_types_fs, source=all_types_parquet_file)\n\n\[email protected](200)\[email protected](order=60)\ndef test_list_entities_and_features(client):\n customer_entity = Entity(\"customer_id\", ValueType.INT64)\n driver_entity = Entity(\"driver_id\", ValueType.INT64)\n\n customer_feature_rating = Feature(\n name=\"rating\", dtype=ValueType.FLOAT, labels={\"key1\": \"val1\"}\n )\n customer_feature_cost = Feature(name=\"cost\", dtype=ValueType.FLOAT)\n driver_feature_rating = Feature(name=\"rating\", dtype=ValueType.FLOAT)\n driver_feature_cost = Feature(\n name=\"cost\", dtype=ValueType.FLOAT, labels={\"key1\": \"val1\"}\n )\n\n filter_by_project_entity_labels_expected = dict(\n [(\"customer:rating\", customer_feature_rating)]\n )\n\n filter_by_project_entity_expected = dict(\n [(\"driver:cost\", driver_feature_cost), (\"driver:rating\", driver_feature_rating)]\n )\n\n filter_by_project_labels_expected = dict(\n [\n (\"customer:rating\", customer_feature_rating),\n (\"driver:cost\", driver_feature_cost),\n ]\n )\n\n customer_fs = FeatureSet(\n \"customer\",\n features=[customer_feature_rating, customer_feature_cost],\n entities=[customer_entity],\n max_age=Duration(seconds=100),\n )\n\n driver_fs = FeatureSet(\n \"driver\",\n features=[driver_feature_rating, driver_feature_cost],\n entities=[driver_entity],\n max_age=Duration(seconds=100),\n )\n\n client.set_project(PROJECT_NAME)\n client.apply(customer_fs)\n client.apply(driver_fs)\n\n # Test for listing of features\n # Case 1: Filter by: project, entities and labels\n filter_by_project_entity_labels_actual = client.list_features_by_ref(\n project=PROJECT_NAME, entities=[\"customer_id\"], labels={\"key1\": \"val1\"}\n )\n\n # Case 2: Filter by: project, entities\n filter_by_project_entity_actual = client.list_features_by_ref(\n project=PROJECT_NAME, entities=[\"driver_id\"]\n )\n\n # Case 3: Filter by: project, labels\n filter_by_project_labels_actual = client.list_features_by_ref(\n project=PROJECT_NAME, labels={\"key1\": \"val1\"}\n )\n\n assert set(filter_by_project_entity_labels_expected) == set(\n filter_by_project_entity_labels_actual\n )\n assert set(filter_by_project_entity_expected) == set(\n filter_by_project_entity_actual\n )\n assert set(filter_by_project_labels_expected) == set(\n filter_by_project_labels_actual\n )\n\n\[email protected](500)\[email protected](order=70)\ndef test_sources_deduplicate_ingest_jobs(client, jobcontroller_client, kafka_brokers):\n shared_source = KafkaSource(kafka_brokers, \"dup_shared\")\n dup_source_fs_1 = FeatureSet(\n name=\"duplicate_source_fs_1\",\n features=[Feature(\"fs1\", ValueType.FLOAT), Feature(\"fs2\", ValueType.FLOAT)],\n entities=[Entity(\"e2\", ValueType.INT64)],\n source=shared_source,\n )\n dup_source_fs_2 = copy(dup_source_fs_1)\n dup_source_fs_2.name = \"duplicate_source_fs_2\"\n\n def is_same_jobs():\n fs_1_jobs = jobcontroller_client.list_ingest_jobs(\n feature_set_ref=FeatureSetRef(\n name=dup_source_fs_1.name, project=dup_source_fs_1.project\n )\n )\n fs_2_jobs = jobcontroller_client.list_ingest_jobs(\n feature_set_ref=FeatureSetRef(\n name=dup_source_fs_2.name, project=dup_source_fs_2.project\n )\n )\n same = True\n if not (len(fs_1_jobs) > 0 and len(fs_1_jobs) == len(fs_2_jobs)):\n same = False\n for fs_1_job in fs_1_jobs:\n for fs_2_job in fs_2_jobs:\n if (\n not fs_1_job.source.to_proto() == fs_2_job.source.to_proto()\n and fs_1_job.source.to_proto() == shared_source.to_proto()\n ):\n same = False\n if fs_1_job.id != fs_2_job.id:\n same = False\n return same\n\n def is_different_jobs():\n fs_1_jobs = jobcontroller_client.list_ingest_jobs(\n feature_set_ref=FeatureSetRef(\n name=dup_source_fs_1.name, project=dup_source_fs_1.project\n )\n )\n fs_2_jobs = jobcontroller_client.list_ingest_jobs(\n feature_set_ref=FeatureSetRef(\n name=dup_source_fs_2.name, project=dup_source_fs_2.project\n )\n )\n different = True\n if not (len(fs_1_jobs) > 0 and len(fs_2_jobs) > 0):\n different = False\n for fs_1_job in fs_1_jobs:\n if fs_1_job.source.to_proto() == alt_source.to_proto():\n different = False\n for fs_2_job in fs_2_jobs:\n if fs_2_job.source.to_proto() == shared_source.to_proto():\n different = False\n for fs_1_job in fs_1_jobs:\n for fs_2_job in fs_2_jobs:\n if fs_1_job.id == fs_2_job.id:\n different = False\n return different\n\n # register multiple feature sets with the same source\n # only one ingest job should spawned due to test ingest job deduplication\n client.apply(dup_source_fs_1)\n client.apply(dup_source_fs_2)\n\n while not is_same_jobs():\n time.sleep(1)\n\n # update feature sets with different sources, should have different jobs\n alt_source = KafkaSource(kafka_brokers, \"alt_source\")\n dup_source_fs_2.source = alt_source\n client.apply(dup_source_fs_2)\n\n while not is_different_jobs():\n time.sleep(1)\n\n # update feature sets with same source again, should have the same job\n dup_source_fs_2.source = shared_source\n client.apply(dup_source_fs_2)\n\n while not is_same_jobs():\n time.sleep(1)\n\n\[email protected](order=30)\ndef test_sink_writes_only_recent_rows(client):\n client.set_project(\"default\")\n\n feature_refs = [\"driver:rating\", \"driver:cost\"]\n\n later_df = basic_dataframe(\n entities=[\"driver_id\"],\n features=[\"rating\", \"cost\"],\n ingest_time=datetime.utcnow(),\n n_size=5,\n )\n\n earlier_df = basic_dataframe(\n entities=[\"driver_id\"],\n features=[\"rating\", \"cost\"],\n ingest_time=datetime.utcnow() - timedelta(minutes=5),\n n_size=5,\n )\n\n def try_get_features():\n response = client.get_online_features(\n entity_rows=[\n GetOnlineFeaturesRequest.EntityRow(\n fields={\"driver_id\": Value(int64_val=later_df.iloc[0][\"driver_id\"])}\n )\n ],\n feature_refs=feature_refs,\n ) # type: GetOnlineFeaturesResponse\n is_ok = all(\n [check_online_response(ref, later_df, response) for ref in feature_refs]\n )\n return response, is_ok\n\n # test compaction within batch\n client.ingest(\"driver\", pd.concat([earlier_df, later_df]))\n wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n # test read before write\n client.ingest(\"driver\", earlier_df)\n time.sleep(10)\n wait_retry_backoff(\n retry_fn=try_get_features,\n timeout_secs=90,\n timeout_msg=\"Timed out trying to get online feature values\",\n )\n\n\n# TODO: rewrite these using python SDK once the labels are implemented there\nclass TestsBasedOnGrpc:\n GRPC_CONNECTION_TIMEOUT = 3\n LABEL_KEY = \"my\"\n LABEL_VALUE = \"label\"\n\n @pytest.fixture(scope=\"module\")\n def core_service_stub(self, core_url):\n if core_url.endswith(\":443\"):\n core_channel = grpc.secure_channel(core_url, grpc.ssl_channel_credentials())\n else:\n core_channel = grpc.insecure_channel(core_url)\n\n try:\n grpc.channel_ready_future(core_channel).result(\n timeout=self.GRPC_CONNECTION_TIMEOUT\n )\n except grpc.FutureTimeoutError:\n raise ConnectionError(\n f\"Connection timed out while attempting to connect to Feast \"\n f\"Core gRPC server {core_url} \"\n )\n core_service_stub = CoreServiceStub(core_channel)\n return core_service_stub\n\n @pytest.fixture(scope=\"module\")\n def auth_meta_data(self, enable_auth):\n if not enable_auth:\n return None\n else:\n metadata = {CONFIG_AUTH_PROVIDER: AUTH_PROVIDER}\n metadata_plugin = get_auth_metadata_plugin(config=Config(metadata))\n return metadata_plugin.get_signed_meta()\n\n def apply_feature_set(self, core_service_stub, feature_set_proto, auth_meta_data):\n try:\n apply_fs_response = core_service_stub.ApplyFeatureSet(\n CoreService_pb2.ApplyFeatureSetRequest(feature_set=feature_set_proto),\n timeout=self.GRPC_CONNECTION_TIMEOUT,\n metadata=auth_meta_data,\n ) # type: ApplyFeatureSetResponse\n except grpc.RpcError as e:\n raise grpc.RpcError(e.details())\n return apply_fs_response.feature_set\n\n def get_feature_set(self, core_service_stub, name, project, auth_meta_data):\n try:\n get_feature_set_response = core_service_stub.GetFeatureSet(\n CoreService_pb2.GetFeatureSetRequest(\n project=project, name=name.strip(),\n ),\n metadata=auth_meta_data,\n ) # type: GetFeatureSetResponse\n except grpc.RpcError as e:\n raise grpc.RpcError(e.details())\n return get_feature_set_response.feature_set\n\n @pytest.mark.timeout(45)\n @pytest.mark.run(order=51)\n def test_register_feature_set_with_labels(self, core_service_stub, auth_meta_data):\n feature_set_name = \"test_feature_set_labels\"\n feature_set_proto = FeatureSet(\n name=feature_set_name,\n project=PROJECT_NAME,\n labels={self.LABEL_KEY: self.LABEL_VALUE},\n ).to_proto()\n self.apply_feature_set(core_service_stub, feature_set_proto, auth_meta_data)\n\n retrieved_feature_set = self.get_feature_set(\n core_service_stub, feature_set_name, PROJECT_NAME, auth_meta_data\n )\n\n assert self.LABEL_KEY in retrieved_feature_set.spec.labels\n assert retrieved_feature_set.spec.labels[self.LABEL_KEY] == self.LABEL_VALUE\n\n @pytest.mark.timeout(45)\n @pytest.mark.run(order=52)\n def test_register_feature_with_labels(self, core_service_stub, auth_meta_data):\n feature_set_name = \"test_feature_labels\"\n feature_set_proto = FeatureSet(\n name=feature_set_name,\n project=PROJECT_NAME,\n features=[\n Feature(\n name=\"rating\",\n dtype=ValueType.INT64,\n labels={self.LABEL_KEY: self.LABEL_VALUE},\n )\n ],\n ).to_proto()\n self.apply_feature_set(core_service_stub, feature_set_proto, auth_meta_data)\n\n retrieved_feature_set = self.get_feature_set(\n core_service_stub, feature_set_name, PROJECT_NAME, auth_meta_data\n )\n retrieved_feature = retrieved_feature_set.spec.features[0]\n\n assert self.LABEL_KEY in retrieved_feature.labels\n assert retrieved_feature.labels[self.LABEL_KEY] == self.LABEL_VALUE\n"
] |
[
[
"pandas.concat",
"numpy.int32",
"pandas.DataFrame",
"numpy.int64",
"numpy.random.rand",
"numpy.float64",
"numpy.array",
"numpy.float"
]
] |
QuVil/mon-bot-le-dj
|
[
"e9320fc19b1665dbb023c5eac015a208ba612750",
"e9320fc19b1665dbb023c5eac015a208ba612750"
] |
[
"src/ach.py",
"src/muzik.py"
] |
[
"import time\n\nimport pandas as pd\nimport numpy as np\nfrom googleapiclient.discovery import build\nfrom google.oauth2.service_account import Credentials\n\nfrom src.util import create_cache_dir, cache\n\nCREDENTIALS_PATH_GOOGLE = 'google-credentials.json'\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\nSPREADSHEET = '1b75J-QTGrujSgF9r0_JPOKkcXAwzFVwpETOAyVBw8ak'\nACH_SHEETS = \"achmusik.pkl\"\nACH_SHEET_NAME = \"Notations\"\nACH_SHEET_ID = 0\nAPI_PREFIX = \"api:\"\n\n\nclass Ach:\n\n def __init__(self):\n create_cache_dir()\n\n def __check_empty_row(self):\n \"\"\"Simple sanity check to see if there is rows with missing\n artist, album, song\n \"\"\"\n # Extract only the indexes and put them in a DataFrame\n df_index = self.ach.index.to_frame(index=False)\n # Check if there are rows with ONLY nan values\n # and get their indexes\n empty = df_index[\n df_index.replace(r\"^\\s$\", value=np.NaN, regex=True)\n .isnull()\n .all(axis=1)\n ].index\n if len(empty) > 0:\n print(\"WARNING some empty rows in the datasheet:\")\n for idx in empty:\n # Display the indexes, need to shift the result by\n # 2 because arrays start at 1 lol (not in Sheets)\n # and the header doesn't count\n print(f\"Empty row at index {idx + 2}\")\n\n def __check_for_duplicates(self):\n \"\"\"Simple sanity check to see if there are duplicates in the sheet\n \"\"\"\n duplicates = self.ach.index[self.ach.index.duplicated()].tolist()\n if len(duplicates) > 0:\n print(\"WARNING some duplicated in the datasheet:\")\n for duplicate in duplicates:\n print(duplicate)\n\n def __column_to_letter(self, idx):\n \"\"\"Helper function to _translate_ an integer column index to a\n letter index\n \"\"\"\n character = chr(ord('A') + idx % 26)\n remainder = idx // 26\n if idx >= 26:\n return self.__column_to_letter(remainder-1) + character\n else:\n return character\n\n def __get_api_columns(self, headers):\n \"\"\"Retrieve the column index for the APIs missing id list\n \"\"\"\n api_col = {}\n for idx, name in enumerate(headers):\n if API_PREFIX in name:\n api_col[name] = {\n \"letter\": self.__column_to_letter(idx),\n \"index\": idx\n }\n self.api_columns = api_col\n\n def __drop_api_columns(self, ach):\n \"\"\"Remove the api missing id column from the ach sheet\"\"\"\n return ach.drop(self.api_columns, axis=1)\n\n def __load_from_cache(self):\n \"\"\"Load the sheet from the cache\"\"\"\n print(\"Reading from cache\")\n return pd.read_pickle(cache(ACH_SHEETS))\n\n def __load_from_google(self):\n \"\"\"Fetches the sheet from the google API\"\"\"\n # Load service account credentials.\n credentials = Credentials.from_service_account_file(\n CREDENTIALS_PATH_GOOGLE, scopes=SCOPES)\n\n # Creates Google Sheets API (v4/latest) service.\n self.service = build('sheets', 'v4', credentials=credentials)\n # Gets values from Ach! Musik: Notations sheet.\n values = self.service.spreadsheets().values()\\\n .get(spreadsheetId=SPREADSHEET, range=ACH_SHEET_NAME)\\\n .execute()['values']\n headers = values.pop(0)\n # Get the api column index\n self.__get_api_columns(headers)\n # Format data as pd.DataFrame\n ach = pd.DataFrame.from_records(values)\n # Remove any _additional_ columns (usually the one with)\n # comments in them\n if ach.shape[1] > len(headers):\n ach.drop(ach.columns[len(headers):], axis=1, inplace=True)\n # Apply the columns and the index\n ach.columns = headers\n ach.set_index(['genre', 'sub_genre', 'artist', 'album', 'song'],\n inplace=True)\n # Remove the APIs missing id list column\n ach = self.__drop_api_columns(ach)\n return ach\n\n def get_sheets(self):\n \"\"\"Returns the sheet, checks if it can get it from Google\n directly, otherwise tries to get the one from the cache\"\"\"\n # Check if we get the sheet from Google (last updated version)\n self.updated = False\n try:\n self.ach = self.__load_from_google()\n self.ach.to_pickle(cache(ACH_SHEETS))\n self.updated = True\n except Exception:\n print(\"Error while reading from google\")\n self.ach = self.__load_from_cache()\n # Sanity checks\n self.__check_empty_row()\n self.__check_for_duplicates()\n return self.ach\n\n def update_missing(self, ids: pd.Series, api_name: str):\n \"\"\"Update the API missing id columns list\"\"\"\n if not self.updated:\n # exit the function since we may have a version not\n # up to date\n raise Exception(\"Cannot update tracks from a not \"\n \"updated version of the sheet\")\n # admit that we have the last updated version of the sheet\n print(\"Updating missing songs...\")\n # get the index order from the updated version of the sheet\n ordered_index = self.ach.index\n # get the column where to write the missing list\n column = self.api_columns[f\"{API_PREFIX}{api_name}\"]\n # reindex the ids list with the updated ordered index\n # and fill the empty values with none\n ids_strings = ids.reindex(ordered_index, fill_value=\"none\")\n payload = {\n \"majorDimension\": \"ROWS\",\n # needs to be a 2d array\n \"values\": ids_strings.values.reshape(-1, 1).tolist()\n }\n # calculate the range to write the list\n range_ = (f\"{ACH_SHEET_NAME}!{column['letter']}2:\"\n f\"{column['letter']}{len(ids_strings)+1}\")\n # push the list\n self.service.spreadsheets()\\\n .values()\\\n .update(spreadsheetId=SPREADSHEET,\n range=range_,\n valueInputOption=\"RAW\",\n body=payload)\\\n .execute()\n # update the note\n self.__update_cell_note(column)\n\n def __update_cell_note(self, column):\n \"\"\"Update the note in the header of the missing ids columns list\n to know when the last ids were updated\"\"\"\n print(\"Updating update note...\")\n # Get the note string\n note = f\"Last updated : {time.ctime()}\"\n # create the payload (cmon google...)\n notes = {\n \"updateCells\": {\n \"fields\": \"note\",\n \"range\": {\n \"sheetId\": ACH_SHEET_ID,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": column['index'],\n \"endColumnIndex\": column['index'] + 1\n },\n \"rows\": [\n {\n \"values\": [\n {\n \"note\": note\n }\n ]\n }\n ],\n }\n }\n body = {\"requests\": [notes]}\n # push the note\n self.service.spreadsheets()\\\n .batchUpdate(spreadsheetId=SPREADSHEET,\n body=body)\\\n .execute()\n",
"\nimport math\nimport json\nimport hashlib\nimport os\nimport base64\n\nimport pandas as pd\nimport numpy as np\nfrom spotipy.oauth2 import SpotifyClientCredentials, SpotifyOAuth\nimport spotipy\nfrom spotipy import SpotifyException\n\nfrom src.color import Color\nfrom src.util import create_cache_dir, CACHE_DIR\n\nACH_IDS = \"ids.pkl\"\nMISSING_IDS = \"missing.csv\"\nCRED_PATH_SPOTIFY = \"credentials-spotify.json\"\nAPI_NAME = \"Spotify\"\nUNAUTHORIZED_ST_CODE = 401\nMAX_TRACK_PER_REQUESTS = 100\nMARKETS = [\"FR\", \"US\"]\nPLAYLIST_NAME = \"Mon Bot le DJ\"\nPLAYLIST_COVER = \"data/playlist_cover.jpg\"\nPLAYLIST_DESC = \"Auto generated playlist for the\"\\\n \" project mon-bot-le-dj, visit\"\\\n \" https://github.com/QuVil/mon-bot-le-dj\"\\\n \" for more information\"\n\n\nclass Muzik:\n\n def __init__(self, public_api=False):\n create_cache_dir()\n self.ids = self.__read_cached_ids()\n if public_api:\n self.__sp = self.__connect_spotify()\n self.__sp_user = self.__connect_spotify_user()\n self.__user_id = self.__sp_user.me()[\"id\"]\n self.name = API_NAME\n\n def __read_cached_ids(self) -> pd.Series:\n \"\"\"\n Read the cached already fetched ids from the cache folder\n either returns the cached pd.Series, or empty series if\n no file there\n \"\"\"\n path = CACHE_DIR + ACH_IDS\n if os.path.exists(path):\n print(f\"Reading data from cache file {path}\")\n df = pd.read_pickle(path)\n else:\n df = pd.Series()\n print(f\"Local library contains {len(df)} songs\")\n return df\n\n def __read_credentials(self):\n \"\"\"\n Opens and return the content of `CRED_PATH_SPOTIFY` as\n a python dict\n \"\"\"\n with open(CRED_PATH_SPOTIFY, 'r') as handle:\n data = json.load(handle)\n return data\n\n def __connect_spotify_user(self):\n \"\"\"\n Connect to the API using the spotify user credentials\n needs more informations than the other method, but can\n access to personnal informations (including playlists :o)\n of the user\n \"\"\"\n data = self.__read_credentials()\n # generate a unique random number to prevent csrf\n state = hashlib.sha256(os.urandom(1024)).hexdigest()\n self.__user_credentials = SpotifyOAuth(\n **data,\n state=state,\n )\n return self.__get_spotify_user(\n self.__user_credentials.get_access_token(as_dict=(False))\n )\n\n def __get_spotify_user(self, token):\n \"\"\"\n Returns a Spotify client authentified with the provided\n token\n \"\"\"\n return spotipy.Spotify(\n auth=token\n )\n\n def __connect_spotify(self):\n \"\"\"\n Connect to the public API of Spotify, useful to fetch songs ids\n since the API limite rate is higher here, however not really\n useful to create playlists and stuff\n \"\"\"\n data = self.__read_credentials()\n auth = {}\n auth[\"client_id\"] = data[\"client_id\"]\n auth[\"client_secret\"] = data[\"client_secret\"]\n return spotipy.Spotify(auth_manager=SpotifyClientCredentials(\n **auth\n ))\n\n def __refresh_token(self):\n \"\"\"\n Refreshes the tokenn if it has expired or not\n and updates the sp_user Spotify Interface with\n the new token\n \"\"\"\n cached = self.__user_credentials.get_cached_token()\n refreshed = self.__user_credentials.refresh_access_token(\n cached[\"refresh_token\"]\n )\n self.__sp_user = self.__get_spotify_user(\n refreshed[\"access_token\"]\n )\n\n def __update_token(self):\n \"\"\"\n Updates the token if it has expired\n !! may not work flawlessely (probably not in fact),\n hard to test since the token lasts for 1 hour haha\n \"\"\"\n # built-in function, does not work always good\n cached = self.__user_credentials.get_cached_token()\n if self.__user_credentials.is_token_expired(cached):\n print(\"Token expired, refreshing now\")\n self.__refresh_token()\n # handmade function just in case the one above fails\n try:\n _ = self.__sp_user.me()\n except SpotifyException as e:\n if e.http_status == UNAUTHORIZED_ST_CODE:\n print(\"Token expired, refreshing now\")\n self.__refresh_token()\n\n def __search_strings(self, row):\n \"\"\"\n Creates the search string for the Spotify API based on\n the information of the row\n Returns a list of multiple strings, to take into account\n if there is special characters (like \"'\")\n or multiple markets\n input:\n - row : pd.Series with genre, artists, songs,...\n output:\n - searches : list of tuples (search string, market)\n \"\"\"\n search = \"\"\n # artists\n artists = list(map(str.strip, row.artist.split(\",\")))\n if len(artists) > 1:\n sep = '\" AND \"'\n search += f\"artist:\\\"{sep.join(artists)}\\\"\"\n else:\n search += f\"artist:\\\"{artists[0]}\\\"\"\n # album\n if row.album != \"N/A\":\n search += f\" album:\\\"{row.album}\\\"\"\n # track name\n search += f\" track:\\\"{row.song}\\\"\"\n # dealing with \"'\"\"\n # sometimes it will work with the \"'\" and sometimes not\n if \"'\" in search:\n searches_s = [search, search.replace(\"'\", \"\")]\n else:\n searches_s = [search]\n searches = []\n for market in MARKETS:\n for search in searches_s:\n searches.append((search, market))\n return searches\n\n def __fetch_id(self, df):\n \"\"\"\n Fetches the Spotify songs id for each provided songs\n If it cannot find ids for a song, it will be set to None\n input:\n - df : a pd.DataFrame with a random index and the\n song specific columns (genre, artist, ...)\n \"\"\"\n # small hack to access the data from the index & the columns\n indexs = pd.MultiIndex.from_frame(df)\n songs = pd.DataFrame(data=df.values, index=indexs,\n columns=df.columns).dropna(how=\"all\")\n ids = pd.Series(index=indexs,\n dtype=str, name=\"ids\")\n bad_formats = []\n # chosing the endpoint\n # by default try to take the public one, if doesn't exists\n # (public_api = False), use the private one\n try:\n endpoint = self.__sp\n except AttributeError:\n endpoint = self.__sp_user\n # format string padding used for the debug output\n str_format = int(math.log(len(songs), 10)) + 1\n for idx, (_, content) in enumerate(songs.iterrows()):\n searches = self.__search_strings(content)\n bad_format = []\n for search, market in searches:\n try:\n res = endpoint.search(search, market=market)\n track = res['tracks']['items'][0]\n except IndexError:\n bad_format.append((search, market))\n else:\n # succeed to fetch an id\n break\n else:\n # did not managed to find an id with all the search strings\n # provided, set the id of the song to None\n bad_formats.append(bad_format)\n ids.iloc[idx] = None\n print(f\"{Color.FAIL}\"\n f\"{idx + 1:<{str_format}}/{len(df)}\"\n f\"{Color.ENDC}\"\n f\" : {search} not in Spotify\")\n continue\n album = track['album']['name']\n name = track['name']\n artist = track['artists'][0]['name']\n id = track['id']\n ids.iloc[idx] = id\n print(f\"{Color.OKGREEN}\"\n f\"{idx + 1:<{str_format}}/{len(df)}\"\n f\"{Color.ENDC}\"\n f\" : {id} {name} {artist} {album}\")\n return ids\n\n def __update_missing_list(self):\n \"\"\"\n Create a csv file containing every tracks that were not\n available on spotify\n \"\"\"\n missing = self.ids[self.ids.isnull()]\n missing.index.to_frame(index=False).to_csv(CACHE_DIR + MISSING_IDS)\n\n def __create_user_playlist(self):\n \"\"\"\n Creates a new playlist using PLAYLIST_NAME, PLAYLIST_DESC\n also pushes playlist cover PLAYLIST_COVER\n return:\n - playlist_id : string containing the id of the playlist\n \"\"\"\n # create the playlist with name, description, visibility\n print(f\"Creating {PLAYLIST_NAME}...\")\n ret = self.__sp_user.user_playlist_create(user=self.__user_id,\n name=PLAYLIST_NAME,\n public=True,\n description=PLAYLIST_DESC)\n playlist_id = ret[\"id\"]\n # most important, upload the playlist image\n print(f\"Uploading playlist cover from {PLAYLIST_COVER}\")\n with open(PLAYLIST_COVER, \"rb\") as image_file:\n cover = base64.b64encode(image_file.read())\n ret = self.__sp_user.playlist_upload_cover_image(playlist_id, cover)\n return playlist_id\n\n def __get_playlist_id(self):\n \"\"\"\n Returns the playlist id of PLAYLIST_NAME\n if the playlist doesn't exists yet, it will create it\n and return the id of the newly created playlist\n \"\"\"\n # check if the playlist already exists\n user_playlists = self.__sp_user.user_playlists(self.__user_id)\n playlist_id = None\n if len(user_playlists[\"items\"]) > 0:\n for user_pl in user_playlists[\"items\"]:\n if user_pl[\"name\"] == PLAYLIST_NAME:\n playlist_id = user_pl[\"id\"]\n break\n # at this point, if the playlist exists, the id is stored in\n # playlist_id, otherwise we have still a None value\n if playlist_id is None:\n print(f\"Playlist {PLAYLIST_NAME} doesn't exists yet\")\n playlist_id = self.__create_user_playlist()\n print(f\"Using playlist {PLAYLIST_NAME} : {playlist_id}\")\n return playlist_id\n\n def update(self, ach):\n \"\"\"\n updates the known list of ids with the newer version of the\n ach musik sheet\n input:\n - ach : raw sheet from google with multiindex\n \"\"\"\n self.__update_token()\n # turn the index to DataFrame objects\n new_songs = ach.index.to_frame().reset_index(drop=True)\n if self.ids.empty:\n # in case the cached list was empty, simply fetch the whole\n # list\n self.ids = self.__fetch_id(new_songs)\n else:\n old_songs = self.ids.index.to_frame().reset_index(drop=True)\n # get the list of the common values\n common_songs = new_songs.merge(old_songs, how='inner')\n # remove the songs that are not anymore in the cached df\n depr = pd.concat([common_songs, old_songs]\n ).drop_duplicates(keep=False)\n to_remove = pd.MultiIndex.from_frame(depr)\n if len(to_remove) > 0:\n self.ids = self.ids.drop(to_remove)\n # adds the new songs from the ach sheet\n news = pd.concat([common_songs, new_songs]\n ).drop_duplicates(keep=False)\n if len(news) > 0:\n new_ids = self.__fetch_id(news)\n self.ids = pd.concat([self.ids, new_ids])\n else:\n print(\"Local list already updated\")\n # save updated list in cache\n self.ids.to_pickle(CACHE_DIR + ACH_IDS)\n # also updates missing ID list\n self.__update_missing_list()\n return self.ids[~self.ids.isnull()]\n\n def create_playlist(self, playlist):\n \"\"\"\n Create (or replace) a playlist containing all the songs provided\n in the playlists DataFrame\n input:\n - playlist : pd.DataFrame indexed by a MultiIndex with\n genre, artist, song, ...\n \"\"\"\n self.__update_token()\n # get the playlist id of PLAYLIST_NAME\n playlist_id = self.__get_playlist_id()\n # get the tracks\n print(self.ids[self.ids.index.duplicated()])\n tracks_all = self.ids[playlist.index]\n tracks_results = tracks_all.isnull().value_counts()\n print(f\"Adding {tracks_results[False]} tracks\")\n if True in tracks_results:\n # some tracks are missing\n print(f\" Missing {tracks_results[True]} tracks\")\n tracks_id = tracks_all.dropna().values\n print(f\"Inserting {len(tracks_id)} songs in the playlist...\")\n # spotify api \"only\" handles 100 tracks by requests\n # so here we split the data\n batch_size = int(len(tracks_id)/MAX_TRACK_PER_REQUESTS) + 1\n batches = np.array_split(tracks_id, batch_size)\n str_format = int(math.log(len(batches), 10)) + 1\n print(f\"{0:<{str_format}}/{len(batches)} batch inserting...\")\n # the first call `replace_tracks` clear the playlist AND\n # adds the supplied tracks\n self.__sp_user.user_playlist_replace_tracks(\n self.__user_id,\n playlist_id=playlist_id,\n tracks=batches[0]\n )\n if len(batches) > 1:\n for idx, batch in enumerate(batches[1:]):\n print(f\"{idx+2:<{str_format}}/{len(batches)}\"\n \" batch inserting...\")\n # add the rest of the tracks\n self.__sp_user.user_playlist_add_tracks(\n self.__user_id,\n playlist_id=playlist_id,\n tracks=batch\n )\n\n print(\"Playlist done\")\n"
] |
[
[
"pandas.DataFrame.from_records"
],
[
"pandas.concat",
"pandas.MultiIndex.from_frame",
"pandas.Series",
"pandas.DataFrame",
"numpy.array_split",
"pandas.read_pickle"
]
] |
SimonTheVillain/ActiveStereoNet
|
[
"708bddce844998b366be1a1ec8a72a31ccd26f8c",
"708bddce844998b366be1a1ec8a72a31ccd26f8c"
] |
[
"Data/pfm_helper.py",
"utils/cost_volume.py"
] |
[
"import re\nimport numpy as np\n \nimport pdb\n\ndef read_pfm(file):\n \n file = open(file, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n header = str(header, 'utf-8')\n \n if header == 'PF':\n color = True\n elif header == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$', str(file.readline(), 'utf-8'))\n \n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception('Malformed PFM header.')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n data = data[np.newaxis,:,:].copy()\n return data, scale\n\n",
"import torch\nimport numpy as np\n\n#This seems like a valid implementation of StereoNet\n# https://github.com/zhixuanli/StereoNet/blob/master/utils/cost_volume.py\n\ndef CostVolume(input_feature, candidate_feature, position=\"left\", method=\"subtract\", k=4, batch_size=4, channel=32, D=192, H=256, W=512):\n \"\"\"\n Some parameters:\n position\n means whether the input feature img is left or right\n k\n the conv counts of the first stage, the feature extraction stage\n \"\"\"\n origin = input_feature # img shape : [batch_size, channel, H // 2**k, W // 2**k]\n candidate = candidate_feature\n \"\"\" if the input image is the left image, and needs to compare with the right candidate.\n Then it should move to left and pad in right\"\"\"\n if position == \"left\":\n leftMinusRightMove_List = []\n for disparity in range(D // 2**k):\n if disparity == 0:\n if method == \"subtract\":\n \"\"\" subtract method\"\"\"\n leftMinusRightMove = origin - candidate\n else:\n \"\"\" concat mathod \"\"\"\n leftMinusRightMove = torch.cat((origin, candidate), 1)\n leftMinusRightMove_List.append(leftMinusRightMove)\n else:\n zero_padding = np.zeros((origin.shape[0], channel, origin.shape[2], disparity))\n zero_padding = torch.from_numpy(zero_padding).float()\n zero_padding = zero_padding.cuda()\n\n left_move = torch.cat((origin, zero_padding), 3)\n\n if method == \"subtract\":\n \"\"\" subtract method\"\"\"\n leftMinusRightMove = left_move[:, :, :, :origin.shape[3]] - candidate\n else:\n \"\"\" concat mathod \"\"\"\n leftMinusRightMove = torch.cat((left_move[:, :, :, :origin.shape[3]], candidate), 1) # concat the channels\n\n leftMinusRightMove_List.append(leftMinusRightMove)\n cost_volume = torch.stack(leftMinusRightMove_List, dim=1) # [batch_size, count(disparitys), channel, H, W]\n\n return cost_volume"
] |
[
[
"numpy.reshape",
"numpy.fromfile",
"numpy.flipud"
],
[
"torch.stack",
"numpy.zeros",
"torch.from_numpy",
"torch.cat"
]
] |
CJWorkbench/arrow-tools
|
[
"1944e40853d82d7dad3d47a72958326cefff367a"
] |
[
"tests/util.py"
] |
[
"from contextlib import contextmanager, suppress\nimport pathlib\nimport os\nimport tempfile\nfrom typing import ContextManager\nimport unittest\nfrom pandas.testing import assert_series_equal\nimport pyarrow\n\n\ndef assert_table_equals(actual: pyarrow.Table, expected: pyarrow.Table) -> None:\n assertEqual = unittest.TestCase().assertEqual\n assertEqual(actual.num_rows, expected.num_rows)\n assertEqual(actual.num_columns, expected.num_columns)\n\n for (\n column_number,\n actual_name,\n actual_column,\n expected_name,\n expected_column,\n ) in zip(\n range(actual.num_columns),\n actual.column_names,\n actual.columns,\n expected.column_names,\n expected.columns,\n ):\n assertEqual(\n actual_name, expected_name, f\"column {column_number} has wrong name\"\n )\n assertEqual(\n actual_column.type,\n expected_column.type,\n f\"column {actual_name} has wrong type\",\n )\n actual_data = actual_column.to_pandas()\n expected_data = expected_column.to_pandas()\n assert_series_equal(\n actual_data, expected_data, f\"column {actual_name} has wrong data\"\n )\n\n\n@contextmanager\ndef empty_file(suffix: str = \"\") -> ContextManager[pathlib.Path]:\n \"\"\"Yield a path that will be deleted when exiting the context.\"\"\"\n fd, filename = tempfile.mkstemp(suffix=suffix)\n try:\n os.close(fd)\n yield pathlib.Path(filename)\n finally:\n with suppress(FileNotFoundError):\n os.unlink(filename)\n\n\n@contextmanager\ndef arrow_file(table: pyarrow.Table) -> ContextManager[pathlib.Path]:\n with empty_file(suffix=\".arrow\") as path:\n with path.open(\"wb\") as f:\n writer = pyarrow.RecordBatchFileWriter(f, table.schema)\n writer.write(table)\n writer.close()\n yield path\n"
] |
[
[
"pandas.testing.assert_series_equal"
]
] |
ZAKAUDD/-GEU-Net
|
[
"5251d329afb80c74328e72fd2fc21ff691ef3353"
] |
[
"model/sparse_switchable_norm.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nfrom utils.ssn_utils import sparsestmax\r\n\r\n\r\nclass SSN2d(nn.Module):\r\n def __init__(self, num_features, eps=1e-5, momentum=0.997, using_moving_average=True, last_gamma=False):\r\n super(SSN2d, self).__init__()\r\n self.eps = eps\r\n self.momentum = momentum\r\n self.using_moving_average = using_moving_average\r\n self.last_gamma = last_gamma\r\n self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))\r\n self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))\r\n\r\n self.mean_weight = nn.Parameter(torch.ones(3))\r\n self.var_weight = nn.Parameter(torch.ones(3))\r\n self.register_buffer('running_mean', torch.zeros(1, num_features, 1))\r\n self.register_buffer('running_var', torch.zeros(1, num_features, 1))\r\n\r\n self.rad = 0.\r\n self.register_buffer('mean_fixed', torch.LongTensor([0]))\r\n self.register_buffer('var_fixed', torch.LongTensor([0]))\r\n self.register_buffer('radius', torch.zeros(1))\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n self.running_mean.zero_()\r\n self.running_var.zero_()\r\n if self.last_gamma:\r\n self.weight.data.fill_(0)\r\n else:\r\n self.weight.data.fill_(1)\r\n self.mean_fixed.data.fill_(0)\r\n self.var_fixed.data.fill_(0)\r\n self.bias.data.zero_()\r\n\r\n def _check_input_dim(self, input):\r\n if input.dim() != 4:\r\n raise ValueError('expected 4D input (got {}D input)'\r\n .format(input.dim()))\r\n\r\n def forward(self, x):\r\n self._check_input_dim(x)\r\n N, C, H, W = x.size()\r\n x = x.view(N, C, -1)\r\n mean_in = x.mean(-1, keepdim=True)\r\n var_in = x.var(-1, keepdim=True)\r\n\r\n mean_ln = mean_in.mean(1, keepdim=True)\r\n temp = var_in + mean_in ** 2\r\n var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2\r\n\r\n if self.training:\r\n mean_bn = mean_in.mean(0, keepdim=True)\r\n var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2\r\n if self.using_moving_average:\r\n self.running_mean.mul_(self.momentum)\r\n self.running_mean.add_((1 - self.momentum) * mean_bn.data)\r\n self.running_var.mul_(self.momentum)\r\n self.running_var.add_((1 - self.momentum) * var_bn.data)\r\n else:\r\n self.running_mean.add_(mean_bn.data)\r\n self.running_var.add_(mean_bn.data ** 2 + var_bn.data)\r\n else:\r\n mean_bn = torch.autograd.Variable(self.running_mean)\r\n var_bn = torch.autograd.Variable(self.running_var)\r\n\r\n if not self.mean_fixed:\r\n self.mean_weight_ = sparsestmax(self.mean_weight, self.rad)\r\n if max(self.mean_weight_) - min(self.mean_weight_) >= 1:\r\n self.mean_fixed.data.fill_(1)\r\n self.mean_weight.data = self.mean_weight_.data\r\n self.mean_weight_ = self.mean_weight.detach()\r\n else:\r\n self.mean_weight_ = self.mean_weight.detach()\r\n\r\n if not self.var_fixed:\r\n self.var_weight_ = sparsestmax(self.var_weight, self.rad)\r\n if max(self.var_weight_) - min(self.var_weight_) >= 1:\r\n self.var_fixed.data.fill_(1)\r\n self.var_weight.data = self.var_weight_.data\r\n self.var_weight_ = self.var_weight.detach()\r\n else:\r\n self.var_weight_ = self.var_weight.detach()\r\n\r\n mean = self.mean_weight_[0] * mean_in + self.mean_weight_[1] * mean_ln + self.mean_weight_[2] * mean_bn\r\n var = self.var_weight_[0] * var_in + self.var_weight_[1] * var_ln + self.var_weight_[2] * var_bn\r\n\r\n x = (x - mean) / (var + self.eps).sqrt()\r\n x = x.view(N, C, H, W)\r\n return x * self.weight + self.bias\r\n\r\n def get_mean(self):\r\n return self.mean_weight_\r\n\r\n def get_var(self):\r\n return self.var_weight_\r\n\r\n def set_rad(self, rad):\r\n self.radius[0].fill_(rad)\r\n self.rad = torch.squeeze(self.radius)\r\n\r\n def get_rad(self):\r\n return torch.squeeze(self.radius)"
] |
[
[
"torch.LongTensor",
"torch.ones",
"torch.zeros",
"torch.squeeze",
"torch.autograd.Variable"
]
] |
Byung-June/oil_future_forecasting
|
[
"c94368e93cb6df52d53bdb3feb32265b922a0cd7"
] |
[
"data_preprocessing/ml_data_preprocessing/utils.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.utils import shuffle\n\n\ndef add_name(list_, name):\n list_ = list(list_)\n for i, elt in enumerate(list_):\n list_[i] = elt + name\n return list_\n\n\ndef make_lag(df, num_lags=10, fillna=True):\n concat = []\n if fillna:\n df = df.fillna(method='ffill')\n for lag in range(num_lags):\n lagged_df = df.shift(lag)\n name = \"_lag\" + str(lag)\n columns = add_name(lagged_df.columns, name)\n lagged_df.columns = columns\n concat.append(lagged_df)\n concat = pd.concat(concat, axis=1)\n return concat\n\n\ndef make_diff_lag(df, num_diffs=1, num_lags=10, fillna=True):\n concat = []\n if fillna:\n df = df.fillna(method='ffill')\n for lag in range(num_lags):\n lagged_df = df.diff(num_diffs).shift(lag)\n name = '_lag' + str(lag) + '_diff' + str(num_diffs)\n columns = add_name(lagged_df.columns, name)\n lagged_df.columns = columns\n concat.append(lagged_df)\n concat = pd.concat(concat, axis=1)\n return concat\n\n\ndef make_pct_change_lag(df, num_diffs=1, num_lags=10, fillna=True):\n concat = []\n if fillna:\n df = df.fillna(method='ffill')\n for lag in range(num_lags):\n lagged_df = df.pct_change(num_diffs).shift(lag)\n name = '_lag' + str(lag) + '_pct_change' + str(num_diffs)\n columns = add_name(lagged_df.columns, name)\n lagged_df.columns = columns\n concat.append(lagged_df)\n concat = pd.concat(concat, axis=1)\n return concat\n\n\ndef make_moving_average(df, num_rolls=10, fillna=True):\n if fillna:\n df = df.fillna(method='ffill')\n name = '_ma' + str(num_rolls)\n columns = add_name(df.columns, name)\n df = df.rolling(window=num_rolls).mean()\n df.columns = columns\n return df\n\n\ndef make_momentum(df, num_rolls=10, fillna=True):\n if fillna:\n df = df.fillna(method='ffill')\n name = '_momentum' + str(num_rolls)\n columns = add_name(df.columns, name)\n df = df.pct_change() + 1.0\n df = df.rolling(window=num_rolls).apply(np.prod, raw=True) - 1.0\n df.columns = columns\n return df\n\n\ndef make_std(df, num_rolls=10, fillna=True):\n if fillna:\n df = df.fillna(method='ffill')\n name = '_std' + str(num_rolls)\n columns = add_name(df.columns, name)\n df = df.rolling(window=num_rolls).std()\n df.columns = columns\n return df\n\n\ndef make_skew(df, num_rolls=10, fillna=True):\n if fillna:\n df = df.fillna(method='ffill')\n name = '_skew' + str(num_rolls)\n columns = add_name(df.columns, name)\n df = df.rolling(window=num_rolls).skew()\n df.columns = columns\n return df\n\n\ndef make_kurt(df, num_rolls=10, fillna=True):\n if fillna:\n df = df.fillna(method='ffill')\n name = '_kurtosis' + str(num_rolls)\n columns = add_name(df.columns, name)\n df = df.rolling(window=num_rolls).kurt()\n df.columns = columns\n return df\n\n\ndef lagging(\n df,\n num_diff_list=[1, 2, 3], num_lags=10,\n num_rolls_list=[10, 20, 30], scrambled=False\n):\n concat = []\n concat.append(make_lag(df, num_lags))\n\n for num_diff in num_diff_list:\n concat.append(\n make_diff_lag(\n df, num_diffs=num_diff, num_lags=num_lags\n )\n )\n concat.append(\n make_pct_change_lag(\n df, num_diffs=num_diff, num_lags=num_lags\n )\n )\n for num_rolls in num_rolls_list:\n concat.append(make_moving_average(df, num_rolls=num_rolls))\n concat.append(make_momentum(df, num_rolls=num_rolls))\n concat.append(make_std(df, num_rolls=num_rolls))\n concat.append(make_skew(df, num_rolls=num_rolls))\n concat.append(make_kurt(df, num_rolls=num_rolls))\n concat = pd.concat(concat, axis=1)\n if scrambled:\n concat = shuffle(concat, random_state=1)\n return concat\n\n\ndef get_r2_score(y_true, y_pred):\n assert y_pred.shape == y_true.shape\n numerator = (y_pred - y_true) ** 2\n denominator = y_true ** 2\n return 1 - (numerator.sum() / denominator.sum())\n\n\nif __name__ == \"__main__\":\n xls = pd.ExcelFile('petro_spot_price.xls')\n df1 = pd.read_excel(xls, 'Data 1')\n df1 = df1[2:]\n df1.columns = [\"date\", \"wti\", \"brent\"]\n df1[\"date\"] = pd.to_datetime(df1[\"date\"])\n df1.set_index(df1.columns[0], inplace=True)\n df = lagging(df1)\n print(df)\n"
] |
[
[
"pandas.concat",
"pandas.read_excel",
"pandas.to_datetime",
"sklearn.utils.shuffle",
"pandas.ExcelFile"
]
] |
tarpas/sktime
|
[
"a46596f6e7756d3ca5c0e617c0b61f561eacf280"
] |
[
"sktime/classification/compose/_pipeline.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Pipeline with a classifier.\"\"\"\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\nimport numpy as np\nfrom sklearn.base import clone\n\nfrom sktime.base import _HeterogenousMetaEstimator\nfrom sktime.classification.base import BaseClassifier\nfrom sktime.transformations.base import BaseTransformer\nfrom sktime.transformations.compose import TransformerPipeline\n\n__author__ = [\"fkiraly\"]\n__all__ = [\"ClassifierPipeline\"]\n\n\nclass ClassifierPipeline(BaseClassifier, _HeterogenousMetaEstimator):\n \"\"\"Pipeline of transformers and a classifier.\n\n The `ClassifierPipeline` compositor chains transformers and a single classifier.\n The pipeline is constructed with a list of sktime transformers, plus a classifier,\n i.e., estimators following the BaseTransformer resp BaseClassifier interface.\n The transformer list can be unnamed - a simple list of transformers -\n or string named - a list of pairs of string, estimator.\n\n For a list of transformers `trafo1`, `trafo2`, ..., `trafoN` and a classifier `clf`,\n the pipeline behaves as follows:\n `fit(X, y)` - changes styte by running `trafo1.fit_transform` on `X`,\n them `trafo2.fit_transform` on the output of `trafo1.fit_transform`, etc\n sequentially, with `trafo[i]` receiving the output of `trafo[i-1]`,\n and then running `clf.fit` with `X` being the output of `trafo[N]`,\n and `y` identical with the input to `self.fit`\n `predict(X)` - result is of executing `trafo1.transform`, `trafo2.transform`, etc\n with `trafo[i].transform` input = output of `trafo[i-1].transform`,\n then running `clf.predict` on the output of `trafoN.transform`,\n and returning the output of `clf.predict`\n `predict_proba(X)` - result is of executing `trafo1.transform`, `trafo2.transform`,\n etc, with `trafo[i].transform` input = output of `trafo[i-1].transform`,\n then running `clf.predict_proba` on the output of `trafoN.transform`,\n and returning the output of `clf.predict_proba`\n\n `get_params`, `set_params` uses `sklearn` compatible nesting interface\n if list is unnamed, names are generated as names of classes\n if names are non-unique, `f\"_{str(i)}\"` is appended to each name string\n where `i` is the total count of occurrence of a non-unique string\n inside the list of names leading up to it (inclusive)\n\n `ClassifierPipeline` can also be created by using the magic multiplication\n on any classifier, i.e., if `my_clf` inherits from `BaseClassifier`,\n and `my_trafo1`, `my_trafo2` inherit from `BaseTransformer`, then,\n for instance, `my_trafo1 * my_trafo2 * my_clf`\n will result in the same object as obtained from the constructor\n `ClassifierPipeline(classifier=my_clf, transformers=[my_trafo1, my_trafo2])`\n magic multiplication can also be used with (str, transformer) pairs,\n as long as one element in the chain is a transformer\n\n Parameters\n ----------\n classifier : sktime classifier, i.e., estimator inheriting from BaseClassifier\n this is a \"blueprint\" classifier, state does not change when `fit` is called\n transformers : list of sktime transformers, or\n list of tuples (str, transformer) of sktime transformers\n these are \"blueprint\" transformers, states do not change when `fit` is called\n\n Attributes\n ----------\n classifier_ : sktime classifier, clone of classifier in `classifier`\n this clone is fitted in the pipeline when `fit` is called\n transformers_ : list of tuples (str, transformer) of sktime transformers\n clones of transformers in `transformers` which are fitted in the pipeline\n is always in (str, transformer) format, even if transformers is just a list\n strings not passed in transformers are unique generated strings\n i-th transformer in `transformers_` is clone of i-th in `transformers`\n\n Examples\n --------\n >>> from sktime.transformations.panel.pca import PCATransformer\n >>> from sktime.classification.interval_based import TimeSeriesForestClassifier\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(split=\"train\")\n >>> X_test, y_test = load_unit_test(split=\"test\")\n >>> pipeline = ClassifierPipeline(\n ... TimeSeriesForestClassifier(n_estimators=5), [PCATransformer()]\n ... )\n >>> pipeline.fit(X_train, y_train)\n ClassifierPipeline(...)\n >>> y_pred = pipeline.predict(X_test)\n\n Alternative construction via dunder method:\n >>> pipeline = PCATransformer() * TimeSeriesForestClassifier(n_estimators=5)\n \"\"\"\n\n _tags = {\n \"X_inner_mtype\": \"pd-multiindex\", # which type do _fit/_predict accept\n \"capability:multivariate\": False,\n \"capability:unequal_length\": False,\n \"capability:missing_values\": False,\n \"capability:train_estimate\": False,\n \"capability:contractable\": False,\n \"capability:multithreading\": False,\n }\n\n _required_parameters = [\"classifier\"]\n\n # no default tag values - these are set dynamically below\n\n def __init__(self, classifier, transformers):\n\n self.classifier = classifier\n self.classifier_ = clone(classifier)\n self.transformers = transformers\n self.transformers_ = TransformerPipeline(transformers)\n\n super(ClassifierPipeline, self).__init__()\n\n # can handle multivariate iff: both classifier and all transformers can\n multivariate = classifier.get_tag(\"capability:multivariate\", False)\n multivariate = multivariate and not self.transformers_.get_tag(\n \"univariate-only\", True\n )\n # can handle missing values iff: both classifier and all transformers can,\n # *or* transformer chain removes missing data\n missing = classifier.get_tag(\"capability:missing_values\", False)\n missing = missing and self.transformers_.get_tag(\"handles-missing-data\", False)\n missing = missing or self.transformers_.get_tag(\n \"capability:missing_values:removes\", False\n )\n # can handle unequal length iff: classifier can and transformers can,\n # *or* transformer chain renders the series equal length\n unequal = classifier.get_tag(\"capability:unequal_length\")\n unequal = unequal and self.transformers_.get_tag(\n \"capability:unequal_length\", False\n )\n unequal = unequal or self.transformers_.get_tag(\n \"capability:unequal_length:removes\", False\n )\n # last three tags are always False, since not supported by transformers\n tags_to_set = {\n \"capability:multivariate\": multivariate,\n \"capability:missing_values\": missing,\n \"capability:unequal_length\": unequal,\n \"capability:contractable\": False,\n \"capability:train_estimate\": False,\n \"capability:multithreading\": False,\n }\n self.set_tags(**tags_to_set)\n\n @property\n def _transformers(self):\n return self.transformers_._steps\n\n @_transformers.setter\n def _transformers(self, value):\n self.transformers_._steps = value\n\n def __rmul__(self, other):\n \"\"\"Magic * method, return concatenated ClassifierPipeline, transformers on left.\n\n Implemented for `other` being a transformer, otherwise returns `NotImplemented`.\n\n Parameters\n ----------\n other: `sktime` transformer, must inherit from BaseTransformer\n otherwise, `NotImplemented` is returned\n\n Returns\n -------\n ClassifierPipeline object, concatenation of `other` (first) with `self` (last).\n \"\"\"\n if isinstance(other, BaseTransformer):\n # use the transformers dunder to get a TransformerPipeline\n trafo_pipeline = other * self.transformers_\n # then stick the expanded pipeline in a ClassifierPipeline\n new_pipeline = ClassifierPipeline(\n classifier=self.classifier,\n transformers=trafo_pipeline.steps,\n )\n return new_pipeline\n else:\n return NotImplemented\n\n @staticmethod\n def _is_name_and_trafo(obj):\n if not isinstance(obj, tuple) or len(obj) != 2:\n return False\n if not isinstance(obj[0], str) or not isinstance(obj[1], BaseTransformer):\n return False\n return True\n\n def _anytagis(self, tag_name, value):\n \"\"\"Return whether any estimator in list has tag `tag_name` of value `value`.\"\"\"\n tagis = [est.get_tag(tag_name, value) == value for _, est in self.transformers_]\n return any(tagis)\n\n def _anytagis_then_set(self, tag_name, value, value_if_not):\n \"\"\"Set self's `tag_name` tag to `value` if any estimator on the list has it.\"\"\"\n if self._anytagis(tag_name=tag_name, value=value):\n self.set_tags(**{tag_name: value})\n else:\n self.set_tags(**{tag_name: value_if_not})\n\n def _anytag_notnone_val(self, tag_name):\n \"\"\"Return first non-'None' value of tag `tag_name` in estimator list.\"\"\"\n for _, est in self.transformers_:\n tag_val = est.get_tag(tag_name)\n if tag_val != \"None\":\n return tag_val\n return tag_val\n\n def _anytag_notnone_set(self, tag_name):\n \"\"\"Set self's `tag_name` tag to first non-'None' value in estimator list.\"\"\"\n tag_val = self._anytag_notnone_val(tag_name=tag_name)\n if tag_val != \"None\":\n self.set_tags(**{tag_name: tag_val})\n\n def _fit(self, X, y):\n \"\"\"Fit time series classifier to training data.\n\n core logic\n\n Parameters\n ----------\n X : Training data of type self.get_tag(\"X_inner_mtype\")\n y : array-like, shape = [n_instances] - the class labels\n\n Returns\n -------\n self : reference to self.\n\n State change\n ------------\n creates fitted model (attributes ending in \"_\")\n \"\"\"\n Xt = self.transformers_.fit_transform(X)\n self.classifier_.fit(Xt, y)\n\n return self\n\n def _predict(self, X) -> np.ndarray:\n \"\"\"Predict labels for sequences in X.\n\n core logic\n\n Parameters\n ----------\n X : data not used in training, of type self.get_tag(\"X_inner_mtype\")\n\n Returns\n -------\n y : predictions of labels for X, np.ndarray\n \"\"\"\n Xt = self.transformers_.transform(X)\n return self.classifier_.predict(Xt)\n\n def _predict_proba(self, X) -> np.ndarray:\n \"\"\"Predicts labels probabilities for sequences in X.\n\n Default behaviour is to call _predict and set the predicted class probability\n to 1, other class probabilities to 0. Override if better estimates are\n obtainable.\n\n Parameters\n ----------\n X : data to predict y with, of type self.get_tag(\"X_inner_mtype\")\n\n Returns\n -------\n y : predictions of probabilities for class values of X, np.ndarray\n \"\"\"\n Xt = self.transformers_.transform(X)\n return self.classifier_.predict_proba(Xt)\n\n def get_params(self, deep=True):\n \"\"\"Get parameters of estimator in `transformers`.\n\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained sub-objects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n params = dict()\n trafo_params = self._get_params(\"_transformers\", deep=deep)\n params.update(trafo_params)\n\n return params\n\n def set_params(self, **kwargs):\n \"\"\"Set the parameters of estimator in `transformers`.\n\n Valid parameter keys can be listed with ``get_params()``.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n if \"classifier\" in kwargs.keys():\n if not isinstance(kwargs[\"classifier\"], BaseClassifier):\n raise TypeError('\"classifier\" arg must be an sktime classifier')\n trafo_keys = self._get_params(\"_transformers\", deep=True).keys()\n classif_keys = self.classifier.get_params(deep=True).keys()\n trafo_args = self._subset_dict_keys(dict_to_subset=kwargs, keys=trafo_keys)\n classif_args = self._subset_dict_keys(dict_to_subset=kwargs, keys=classif_keys)\n if len(classif_args) > 0:\n self.classifier.set_params(**classif_args)\n if len(trafo_args) > 0:\n self._set_params(\"_transformers\", **trafo_args)\n return self\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n # imports\n from sktime.classification.distance_based import KNeighborsTimeSeriesClassifier\n from sktime.transformations.series.exponent import ExponentTransformer\n\n t1 = ExponentTransformer(power=2)\n t2 = ExponentTransformer(power=0.5)\n c = KNeighborsTimeSeriesClassifier()\n\n # construct without names\n return {\"transformers\": [t1, t2], \"classifier\": c}\n"
] |
[
[
"sklearn.base.clone"
]
] |
tnakaicode/SimpleQmap-Win
|
[
"b533bef650f8f1388f2cbb99aef3d5e8f8aa2860"
] |
[
"SimpleQmap/unittest/test_state.py"
] |
[
"import unittest\nimport numpy as np\nimport SimpleQmap as S\n\n\nclass TestState(unittest.TestCase):\n def setUp(self):\n self.dim = int(np.random.randint(1,100)*2)\n xmax = np.random.random()\n xmin = -xmax\n ymax = np.random.random()\n ymin = -ymax\n self.domain = [[xmin, xmax], [ymin, ymax]]\n k=np.random.random()\n self.scl = S.ScaleInfo(dim=self.dim, domain=self.domain)\n self.map = S.StandardMap(k=k)\n self.qmap = S.Qmap(map = self.map, dim=self.dim, domain=self.domain)\n\n\n def test_ScaleInfo(self):\n dim = np.random.randint(1,100)\n xmax = np.random.random()\n xmin = -xmax\n ymax = np.random.random()\n ymin = -ymax\n domain = [[xmin, xmax], [ymin, ymax]]\n scl = S.ScaleInfo(dim=dim, domain=domain)\n self.assertTrue(scl.dim == dim)\n self.assertTrue(scl.domain == domain)\n x = np.linspace(xmin, xmax, dim, endpoint=False)\n y = np.linspace(ymin, ymax, dim, endpoint=False)\n self.assertTrue(np.all(scl.x[0] == x))\n self.assertTrue(np.all(scl.x[1] == y))\n xmin, xmax = np.random.random(), -np.random.random()\n ymin, ymax = np.random.random(), -np.random.random()\n domain = [[xmin, xmax],[ymin, ymax]]\n\n with self.assertRaises(ValueError):\n S.ScaleInfo(dim, domain)\n \n def test_transform(self):\n state = S.State(self.scl)\n q_c = np.random.random()\n p_c = np.random.random()\n cs = state.cs(q_c,p_c)\n\n self.assertAlmostEqual(cs.norm(), 1.0)\n self.assertTrue(isinstance(cs.norm(),np.float64))\n cs_prep = cs.q2p()\n self.assertAlmostEqual(cs_prep.norm(), 1.0)\n self.assertTrue(isinstance(cs_prep,S.State))\n cs_qrep = cs_prep.p2q()\n self.assertAlmostEqual(cs_qrep.norm(), 1.0)\n self.assertTrue(isinstance(cs_prep,S.State))\n ovl = np.abs(cs.inner(cs_qrep)*cs_qrep.inner(cs))\n self.assertAlmostEqual(ovl, 1.0)\n inn = cs.qrep().inner(cs.prep())\n self.assertTrue(isinstance(inn, np.complex128))\n self.assertNotAlmostEqual(np.abs(inn*np.conj(inn)), 1.0)\n\n \n \nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.random",
"numpy.conj",
"numpy.linspace",
"numpy.all",
"numpy.random.randint"
]
] |
AKI-maggie/adapted_deep_embeddings
|
[
"a93c5061c09fa1a42d54053cd82e71cef447e4b8"
] |
[
"models/baseline_model.py"
] |
[
"'''\nAdapted from https://danijar.com/structuring-your-tensorflow-models/\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import partial\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom .model import Model\nfrom .utils import assign_to_device, _conv, define_scope, _fully_connected, get_available_gpus, _max_pooling, _relu, _softmax\n\nclass BaselineModel(Model):\n\n def __init__(self, config):\n self.config = self.get_config(config)\n self.saver = None\n self.learning_rate = tf.placeholder(tf.float32)\n self.is_train = tf.placeholder(tf.bool)\n self.is_task1 = tf.placeholder(tf.bool)\n\n def create_saver(self):\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\n\n def save_model(self, sess, step):\n self.saver.save(sess, os.path.join(self.config.save_dir_by_rep, 'model.ckpt'), global_step=step)\n\n def restore_model(self, sess):\n checkpoint = tf.train.latest_checkpoint(self.config.save_dir_by_rep)\n if checkpoint is None:\n sys.exit('Cannot restore model that does not exist')\n self.saver.restore(sess, checkpoint)\n\n def get_single_device(self):\n devices = get_available_gpus()\n d = self.config.controller\n if devices:\n d = devices[0]\n return d\n\n @define_scope\n def optimize(self):\n d = self.get_single_device()\n with tf.device(assign_to_device(d, self.config.controller)):\n pred = self.prediction\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, \n labels=tf.one_hot(self.target, self.config.n)))\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(cost)\n\n return train_op, cost\n\n @define_scope(scope='stream_metrics')\n def metrics(self):\n d = self.get_single_device()\n with tf.device(assign_to_device(d, self.config.controller)):\n pred = self.prediction\n acc, update_acc = tf.metrics.accuracy(self.target, tf.argmax(_softmax(pred), axis=1))\n return update_acc\n\nclass MNISTBaselineModel(BaselineModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.input = tf.placeholder(tf.float32, [None, 784])\n self.target = tf.placeholder(tf.int64, [None])\n self.prediction\n self.optimize\n self.metrics\n\n @define_scope\n def prediction(self):\n d = self.get_single_device()\n with tf.device(assign_to_device(d, self.config.controller)):\n x = self.input\n x = tf.reshape(x, [-1, 28, 28, 1])\n x = _relu(_conv('conv1', x, 3, x.get_shape()[-1], 32, 1))\n x = _max_pooling('pool2', _relu(_conv('conv2', x, 3, x.get_shape()[-1], 32, 1)), 2, 2)\n x = tf.contrib.layers.flatten(x)\n x = _relu(_fully_connected('fc1', x, 128))\n x = _fully_connected('fc2', x, self.config.n)\n return x\n\nclass IsoletBaselineModel(BaselineModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.input = tf.placeholder(tf.float32, [None, 617])\n self.target = tf.placeholder(tf.int64, [None])\n self.prediction\n self.optimize\n self.metrics\n\n @define_scope\n def prediction(self):\n d = self.get_single_device()\n with tf.device(assign_to_device(d, self.config.controller)):\n x = self.input\n x = _relu(_fully_connected('fc1', x, 128))\n x = _relu(_fully_connected('fc2', x, 64))\n x = _fully_connected('fc3', x, self.config.n)\n return x\n\nclass OmniglotBaselineModel(BaselineModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.input = tf.placeholder(tf.float32, [None, 784])\n self.target = tf.placeholder(tf.int64, [None])\n self.batch_norm = partial(tf.layers.batch_normalization,\n momentum=0.9, epsilon=1e-5, fused=True, center=True, scale=False)\n self.prediction\n self.optimize\n self.metrics\n\n @define_scope\n def prediction(self):\n d = self.get_single_device()\n with tf.device(assign_to_device(d, self.config.controller)):\n x = self.input\n x = tf.reshape(x, [-1, 28, 28, 1])\n x = _max_pooling('pool1', _relu(self.batch_norm(_conv('conv1', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train)), 2, 2)\n x = _max_pooling('pool2', _relu(self.batch_norm(_conv('conv2', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train)), 2, 2)\n x = _relu(self.batch_norm(_conv('conv3', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train))\n x = tf.contrib.layers.flatten(x)\n x = _relu(_fully_connected('fc1', x, 128))\n x = _fully_connected('fc2', x, self.config.n)\n return x\n\nclass TinyImageNetBaselineModel(BaselineModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.input = tf.placeholder(tf.float32, [None, 64, 64, 3])\n self.target = tf.placeholder(tf.int64, [None])\n self.batch_norm = partial(tf.layers.batch_normalization,\n momentum=0.9, epsilon=1e-5, fused=True, center=True, scale=False)\n self.prediction\n self.optimize\n self.metrics\n\n @define_scope\n def prediction(self):\n d = self.get_single_device()\n with tf.device(assign_to_device(d, self.config.controller)):\n x = self.input\n x = _max_pooling('pool1', _relu(self.batch_norm(_conv('conv1', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train)), 2, 2)\n x = _max_pooling('pool2', _relu(self.batch_norm(_conv('conv2', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train)), 2, 2)\n x = _max_pooling('pool3', _relu(self.batch_norm(_conv('conv3', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train)), 2, 2)\n x = _relu(self.batch_norm(_conv('conv4', x, 3, x.get_shape()[-1], 32, 1), training=self.is_train))\n x = tf.contrib.layers.flatten(x)\n x = _relu(_fully_connected('fc1', x, 128))\n x = _fully_connected('fc2', x, self.config.n)\n return x\n"
] |
[
[
"tensorflow.train.latest_checkpoint",
"tensorflow.control_dependencies",
"tensorflow.get_collection",
"tensorflow.global_variables",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.contrib.layers.flatten",
"tensorflow.one_hot",
"tensorflow.train.AdamOptimizer"
]
] |
shifaoh/dna2vec
|
[
"9ad055d8c772187261dad17d44ceee465c636ba9"
] |
[
"scripts/train_dna2vec.py"
] |
[
"#!/usr/bin/env python3\n\nimport sys\nsys.path.extend(['.', '..'])\n\nimport glob\nimport logbook\nfrom logbook.compat import redirect_logging\nimport configargparse\nimport numpy as np\nfrom Bio import SeqIO\nfrom attic_util.time_benchmark import Benchmark\nfrom attic_util import util\nfrom attic_util.tee import Tee\nfrom dna2vec.histogram import Histogram\nfrom dna2vec.generators import SeqGenerator, KmerSeqIterable, SeqMapper, SeqFragmenter\nfrom dna2vec.generators import DisjointKmerFragmenter, SlidingKmerFragmenter\n\nfrom gensim.models import word2vec\n\nclass InvalidArgException(Exception):\n pass\n\nclass Learner:\n def __init__(self, out_fileroot, context_halfsize, gensim_iters, vec_dim):\n self.logger = logbook.Logger(self.__class__.__name__)\n assert(word2vec.FAST_VERSION >= 0)\n self.logger.info('word2vec.FAST_VERSION (should be >= 0): {}'.format(word2vec.FAST_VERSION))\n self.model = None\n self.out_fileroot = out_fileroot\n self.context_halfsize = context_halfsize\n self.gensim_iters = gensim_iters\n self.use_skipgram = 1\n self.vec_dim = vec_dim\n\n self.logger.info('Context window half size: {}'.format(self.context_halfsize))\n self.logger.info('Use skipgram: {}'.format(self.use_skipgram))\n self.logger.info('gensim_iters: {}'.format(self.gensim_iters))\n self.logger.info('vec_dim: {}'.format(self.vec_dim))\n\n def train(self, kmer_seq_generator):\n self.model = word2vec.Word2Vec(\n sentences=kmer_seq_generator,\n size=self.vec_dim,\n window=self.context_halfsize,\n min_count=5,\n workers=4,\n sg=self.use_skipgram,\n iter=self.gensim_iters)\n\n # self.logger.info(model.vocab)\n\n def write_vec(self):\n out_filename = '{}.w2v'.format(self.out_fileroot)\n self.model.save_word2vec_format(out_filename, binary=False)\n\ndef run_main(args, inputs, out_fileroot):\n logbook.info(' '.join(sys.argv))\n if not args.debug:\n import logging\n logging.getLogger('gensim.models.word2vec').setLevel(logging.INFO)\n\n np.random.seed(args.rseed)\n\n benchmark = Benchmark()\n\n if args.kmer_fragmenter == 'disjoint':\n kmer_fragmenter = DisjointKmerFragmenter(args.k_low, args.k_high)\n elif args.kmer_fragmenter == 'sliding':\n kmer_fragmenter = SlidingKmerFragmenter(args.k_low, args.k_high)\n else:\n raise InvalidArgException('Invalid kmer fragmenter: {}'.format(args.kmer_fragmenter))\n\n logbook.info('kmer fragmenter: {}'.format(args.kmer_fragmenter))\n\n histogram = Histogram()\n kmer_seq_iterable = KmerSeqIterable(\n args.rseed_trainset,\n SeqGenerator(inputs, args.epochs),\n SeqMapper(),\n SeqFragmenter(),\n kmer_fragmenter,\n histogram,\n )\n\n learner = Learner(out_fileroot, args.context, args.gensim_iters, args.vec_dim)\n learner.train(kmer_seq_iterable)\n learner.write_vec()\n\n histogram.print_stat(sys.stdout)\n\n benchmark.print_time()\n\ndef main():\n argp = configargparse.get_argument_parser()\n argp.add('-c', is_config_file=True, help='config file path')\n argp.add_argument('--kmer-fragmenter', help='disjoint or sliding', choices=['disjoint', 'sliding'], default='sliding')\n argp.add_argument('--vec-dim', help='vector dimension', type=int, default=12)\n argp.add_argument('--rseed', help='general np.random seed', type=int, default=7)\n argp.add_argument('--rseed-trainset', help='random seed for generating training data', type=int, default=123)\n argp.add_argument('--inputs', help='FASTA files', nargs='+', required=True)\n argp.add_argument('--k-low', help='k-mer start range (inclusive)', type=int, default=5)\n argp.add_argument('--k-high', help='k-mer end range (inclusive)', type=int, default=5)\n argp.add_argument('--context', help='half size of context window (the total size is 2*c+1)', type=int, default=4)\n argp.add_argument('--epochs', help='number of epochs', type=int, default=1)\n argp.add_argument('--gensim-iters', help=\"gensim's internal iterations\", type=int, default=1)\n argp.add_argument('--out-dir', help=\"output directory\", default='../dataset/dna2vec/results')\n argp.add_argument('--debug', help='', action='store_true')\n args = argp.parse_args()\n\n if args.debug:\n out_dir = '/tmp'\n log_level = 'DEBUG'\n else:\n out_dir = args.out_dir\n log_level = 'INFO'\n\n inputs = []\n for s in args.inputs:\n inputs.extend(list(glob.glob(s)))\n\n mbytes = util.estimate_bytes(inputs) // (10 ** 6)\n out_fileroot = util.get_output_fileroot(\n out_dir,\n 'dna2vec',\n 'k{}to{}-{}d-{}c-{}Mbp-{}'.format(\n args.k_low,\n args.k_high,\n args.vec_dim,\n args.context,\n mbytes * args.epochs, # total Mb including epochs\n args.kmer_fragmenter))\n\n out_txt_filename = '{}.txt'.format(out_fileroot)\n with open(out_txt_filename, 'w') as summary_fptr:\n with Tee(summary_fptr):\n logbook.StreamHandler(sys.stdout, level=log_level).push_application()\n redirect_logging()\n run_main(args, inputs, out_fileroot)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.seed"
]
] |
FazelYU/Adaptive-Navigation
|
[
"95d4aa7603fb416c5c4ebc4560724f85c93cee22"
] |
[
"environments/sumo/Utils.py"
] |
[
"import numpy\nimport torch\nimport json\nimport random\nimport traci\nfrom inspect import currentframe, getframeinfo\nimport networkx as nx\nimport pymorton as pm\nimport pandas as pd\nfrom sklearn.manifold import TSNE\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\n\nfrom sklearn.decomposition import PCA\nimport igraph as ig\nfrom scipy.stats import entropy\n\n\nclass Utils(object):\n \"\"\"docstring for Utils\"\"\"\n def __init__(self,config):\n super(Utils, self).__init__()\n self.config=config\n torch.autograd.set_detect_anomaly(self.config.Constants[\"Analysis_Mode\"])\n self.seeded_random_generator=numpy.random.RandomState(config.envm_seed)\n # breakpoint()\n self.network=config.network\n\n # self.pressure_matrix=[[0]*dim for i in range(dim)]\n # self.network_embd_dim=9\n # self.environment=environment\n # self.vc_count_dic=self.creat_vc_count_dic()\n self.Num_Flow_Types=3\n self.slow_vc_speed=7\n self.fast_vc_speed=2*self.slow_vc_speed\n\n\n self.agent_dic=self.create_agent_dic()#{node:[len(in edges), len(out edges)]}\n self.agent_list=list(self.agent_dic)\n self.all_pairs_shortest_path_matrix=self.create_all_pairs_shortest_path_matrix(self.network.all_pairs_shortest_path)\n self.agent_id_embedding_dic,self.agnet_id_embedding_size=self.create_agent_id_embedding_dic()\n self.agent_label_dic=self.create_agent_labels_dic()\n # self.tSNE_plot()\n\n self.agent_index_dic={self.agent_list[idx]:idx for idx in range(len(self.agent_list))}\n self.agent_path_dic=self.create_agent_path_dic()\n self.agent_adjacency_list_dict=self.create_agent_adjacency_list_dic()\n self.max_len_out_edges=max([self.agent_dic[agent_id][1] for agent_id in self.agent_dic])\n self.config.edge_index=self.create_edge_index()\n self.config.network_state=self.create_network_state()\n # if self.config.routing_mode=='Q_routing_1_hop' or\\\n # self.config.routing_mode=='Q_routing_2_hop':\n # self.aggregated_network_state=self.graph_attention_network(self.edge_index,self.config.network_state)\n\n self.edge_action_mask_dic=self.create_edge_action_mask_dic()\n self.induction_loops=[il for il in traci.inductionloop.getIDList() if \"TLS\" not in il]\n\n for il in self.induction_loops:\n traci.inductionloop.subscribe(il,[\\\n self.config.Constants['il_last_step_vc_count_subscribtion_code'],\\\n self.config.Constants['il_last_step_vc_IDs_subscribtion_code']\n ])\n\n\n def get_state(self,source_edge,source_node,sink_node):\n action_mask,action_mask_index=self.get_edge_action_mask(source_edge,source_node)\n dest_embed=self.agent_id_embedding_dic[sink_node]\n destination_aware_network_state=self.get_destination_aware_network_state(sink_node).detach().clone()\n # if dest_embed.device.type!='cuda':\n # breakpoint()\n # source_node_state=self.get_agent_state(source_node)\n # embeding=torch.cat((dest_embed,source_node_state),0)\n\n # if self.config.Constants['Analysis_Mode']:\n # try:\n # assert(len(embeding)==self.get_state_diminsion(source_node))\n # except Exception as e:\n # breakpoint()\n return {\n \"agent_id\": source_node,\n \"agent_idx\": self.agent_index_dic[source_node],\n \"action_mask\": action_mask,\n \"action_mask_index\":action_mask_index,\n \"embeding\": torch.cat((dest_embed,destination_aware_network_state.view(-1)),-1)\n # \"network_state\":self.config.network_state.detach().clone()\n }\n\n def get_state_diminsion(self,node_id): \n if self.config.does_need_network_state:\n return self.agnet_id_embedding_size+1\n\n return self.agnet_id_embedding_size\n\n def get_network_state_size(self):\n if self.config.does_need_network_state:\n return self.max_len_out_edges+1\n return 0\n\n def get_intersection_id_size(self):\n return self.agnet_id_embedding_size\n \n def create_network_state(self):\n return torch.vstack([torch.zeros(self.max_len_out_edges,device=self.config.device) for agent_id in self.agent_dic]).detach()\n\n def set_network_state(self):\n # the network state changes randomly. However, the random changes are the same among the benchmarks.\n for agent_id in self.agent_path_dic:\n out_edeges_list=list(self.agent_path_dic[agent_id])\n for edge_number in range(len(out_edeges_list)):\n path_key=out_edeges_list[edge_number]\n if self.seeded_random_generator.random()>self.config.congestion_epsilon:\n # no congestion for the edge\n # try:\n # self.config.network_state[self.agent_index_dic[agent_id]][edge_number]=0\n # except Exception as e:\n # breakpoint()\n self.config.network_state[self.agent_index_dic[agent_id]][edge_number]=0 \n for edge in self.agent_path_dic[agent_id][path_key]:\n traci.edge.setMaxSpeed(edge,self.network.edge_speed_dic[edge]['speed'])\n self.network.edge_speed_dic[edge]['is_congested']=False\n else:\n #congestion \n self.config.network_state[self.agent_index_dic[agent_id]][edge_number]=1\n for edge in self.agent_path_dic[agent_id][path_key]:\n traci.edge.setMaxSpeed(edge,self.network.edge_speed_dic[edge]['speed']*self.config.congestion_speed_factor)\n self.network.edge_speed_dic[edge]['is_congested']=True\n if self.config.Constants['Vis_GAT']:\n self.visualize_gat_properties(self.agent_list)\n # breakpoint()\n\n def state2torch(self,state):\n state=torch.tensor(state, device=self.config.device, dtype=torch.float)\n return state.unsqueeze(0)\n# ---------------------------------------------------------------------------\n def create_all_pairs_shortest_path_matrix(self,all_pairs_shortest_path_dic):\n matrix= torch.tensor(\n [\n [all_pairs_shortest_path_dic[agent_id_out][agent_id_in] for agent_id_in in self.agent_list] \n for agent_id_out in self.agent_list\n ],\n device=self.config.device)\n matrix/=matrix.max(1,keepdim=True)[0] # normalize\n return matrix\n \n def get_shortet_path_distances(self,destination_agent_id):\n return self.all_pairs_shortest_path_matrix[self.agent_index_dic[destination_agent_id]]\n\n def get_destination_aware_network_state(self,destination_agent_id):\n network_state=self.config.network_state\n shortest_path_distances=self.get_shortet_path_distances(destination_agent_id)\n destination_aware_network_state=torch.cat((network_state,shortest_path_distances.unsqueeze(1)),1)\n return destination_aware_network_state\n# --------------------------------------------------------------------------- \n def generate_uniform_demand(self,sim_time):\n if self.config.training_mode:\n trip=self.create_sample_uniform_trip()\n else:\n trip=self.config.uniform_demands[self.config.next_uniform_demand_index]\n\n self.config.next_uniform_demand_index+=1\n self.config.num_uniform_vc_dispatched+=1\n source_edge=trip[0]\n sink_edge=trip[1]\n destinatino_node=self.network.get_edge_head_node(sink_edge)\n\n new_vcs=[]\n trip_id=\"trip_{}\".format(sim_time)\n traci.route.add(trip_id,[source_edge,sink_edge])\n deadline=4*self.get_shortest_path_time(self.network.get_edge_tail_node(source_edge),self.network.get_edge_head_node(sink_edge))\\\n +sim_time\n\n for i in range(0,self.config.demand_scale):\n vid=\"vehicle_{}_{}_{}\".format(sim_time,i,destinatino_node)\n traci.vehicle.add(vid,trip_id)\n new_vcs.append(vid)\n # self.subscribe_vehicle(vid)\n # traci.vehicle.setColor(\"vehicle_{}\".format(sim_time),(255,0,255))\n # traci.vehicle.setShapeClass(\"vehicle_{}\".format(sim_time),\"truck\")\n return new_vcs,source_edge,self.network.get_edge_head_node(sink_edge),deadline\n \n def generate_biased_demand(self,sim_time,trip): \n source_edge=trip[0]\n sink_edge=trip[1]\n destinatino_node=self.network.get_edge_head_node(sink_edge)\n trip_id=\"biased_trip_{}\".format(sim_time)\n self.config.num_biased_vc_dispatched+=1\n\n new_vcs=[]\n traci.route.add(trip_id,[source_edge,sink_edge])\n deadline=4*self.get_shortest_path_time(self.network.get_edge_tail_node(source_edge),self.network.get_edge_head_node(sink_edge))\\\n +sim_time\n\n for i in range(0,self.config.demand_scale):\n vid=\"biased_vehicle_{}_{}_{}\".format(sim_time,i,destinatino_node)\n traci.vehicle.add(vid,trip_id)\n new_vcs.append(vid)\n # self.subscribe_vehicle(vid)\n return new_vcs,source_edge,self.network.get_edge_head_node(sink_edge),deadline\n \n def subscribe_vehicle(self,vc):\n traci.vehicle.subscribe(vc,[\\\n self.config.Constants['vc_lane_ID_subscribtion_code'],\n self.config.Constants['vc_road_ID_subscribtion_code']\n ])\n def create_sample_uniform_trip(self):\n source_node=random.choice(self.agent_list)\n sink_node=random.choice(self.agent_list)\n while (sink_node==source_node):\n sink_node=random.choice(self.agent_list)\n\n source_edge=random.choice(self.network.get_out_edges(source_node))\n sink_edge=random.choice(self.network.get_in_edges(sink_node))\n return [source_edge,sink_edge]\n# ------------------------------------------------------------------ \n def create_agent_dic(self):\n \"\"\"dictionary of all agents, \n agent_dic[0]:#in edges\n agent_dic[1]:#out edges\n agent_dic[2]: state of the out-going edges, 1 if an edge is congested 0 O.W.\"\"\"\n return {\\\n node: [\n len(self.network.get_in_edges(node)),\n len(self.network.get_out_edges(node)),\n ] \\\n for node in self.network.graph.nodes() if \\\n self.does_need_agent(node)\n }\n\n def does_need_agent(self,node):\n if node==None: \n return False\n \n if len(self.network.get_out_edges(node))<2:\n return False\n \n for edge in self.network.get_in_edges(node):\n if len(self.network.get_edge_connections(edge))>1:\n return True\n\n return False\n \n def create_agent_id_embedding_dic(self):\n z_order_dic={}\n agent_embedding_dic={}\n for agent_id in self.agent_dic:\n position=traci.junction.getPosition(agent_id)\n unique_Z_ID=pm.interleave(int(position[0]),int(position[1]))\n if self.config.Constants['Analysis_Mode']:\n try:\n assert(unique_Z_ID not in z_order_dic)\n except Exception as e:\n breakpoint()\n\n z_order_dic[unique_Z_ID]=agent_id\n sorted_z_vals=list(z_order_dic)\n sorted_z_vals.sort()\n \n ID_size=len(format(len(sorted_z_vals)-1,'b'))\n for index in range(0,len(sorted_z_vals)):\n z_val=sorted_z_vals[index]\n agent_id=z_order_dic[z_val]\n agent_id_embedding=[0]*ID_size\n index_bin=format(index,'b')\n for i in range(len(index_bin)):\n agent_id_embedding[-i-1]=int(index_bin[-i-1])\n agent_embedding_dic[agent_id]=torch.tensor(agent_id_embedding,dtype=torch.float,device=self.config.device)\n\n return agent_embedding_dic,ID_size\n\n def create_agent_labels_dic(self):\n agent_label_dic={}\n positions=numpy.array([list(traci.junction.getPosition(agent_id)) for agent_id in self.agent_list])\n max_X,max_Y=positions.max(0)\n min_X,min_Y=positions.min(0)\n range_X=max_X-min_X\n range_Y=max_Y-min_Y\n for agent_id in self.agent_dic:\n x,y=traci.junction.getPosition(agent_id)\n # if x==min_X:\n # breakpoint()\n # if y==min_Y:\n # breakpoint()\n if x==max_X:\n # breakpoint()\n x-=0.001\n if y==max_Y:\n # breakpoint()\n y-=0.001\n\n i=math.floor((x-min_X)/range_X*4)\n j=math.floor((y-min_Y)/range_Y*4)\n label=j*4+i\n agent_label_dic[agent_id]=label\n\n y=numpy.array([agent_label_dic[agent_id] for agent_id in self.agent_dic])\n # breakpoint()\n return agent_label_dic\n\n\n\n def vis_intersec_id_embedding(self,agent_id,transform_func):\n X=torch.vstack([self.agent_id_embedding_dic[agent_id] for agent_id in self.agent_dic])\n X_trns=transform_func(agent_id,X)\n \n X=X.detach().cpu().numpy()\n X_trns=X_trns.detach().cpu().numpy()\n \n y=numpy.array([self.agent_label_dic[agent_id] for agent_id in self.agent_dic])\n \n # self.tSNE_plot(X,y)\n # self.tSNE_plot(X_trns,y)\n self.pca_plot(X,y)\n self.pca_plot(X_trns,y)\n plt.show()\n breakpoint()\n\n # def tSNE_plot(self,X,y):\n # df=pd.DataFrame(X)\n # df['label-class']=y\n # df['label']=[int(lbl) for lbl in y]\n # breakpoint()\n # df.groupby('label-class', as_index=False).size().plot(kind='bar',x='label')\n # breakpoint()\n # tsne = TSNE(n_components=2, verbose=1, perplexity=10, n_iter=400)\n # tsne_results = tsne.fit_transform(df)\n # df['tsne-2d-one'] = tsne_results[:,0]\n # df['tsne-2d-two'] = tsne_results[:,1]\n # plt.figure(figsize=(16,10))\n # sns.scatterplot(x=\"tsne-2d-one\", y=\"tsne-2d-two\",hue=\"label\",size=\"label\",data=df,legend=\"full\")\n # # alpha=0.3\n \n def pca_plot(self,X,y):\n df=pd.DataFrame(X)\n df['label']=y\n # df.groupby('label', as_index=False).size().plot(kind='bar',x='label')\n # breakpoint()\n\n pca = PCA(n_components=2)\n pca_result = pca.fit_transform(df)\n df['pca-one'] = pca_result[:,0]\n df['pca-two'] = pca_result[:,1] \n print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))\n # tsne = TSNE(n_components=2, verbose=1, perplexity=4, n_iter=400)\n # tsne_results = tsne.fit_transform(df)\n # df['tsne-2d-one'] = tsne_results[:,0]\n # df['tsne-2d-two'] = tsne_results[:,1]\n plt.figure()\n sns.set(font_scale=2)\n sns.scatterplot(x=\"pca-one\", y=\"pca-two\",hue=\"label\",style=\"label\",data=df,legend=\"full\",s=400)\n \n\n def create_agent_path_dic(self):\n agent_paths={}\n for agent in self.agent_dic:\n agent_paths[agent]={}\n for out_edge in self.network.get_out_edges(agent):\n if self.config.Constants['Analysis_Mode']:\n assert(out_edge not in agent_paths)\n agent_paths[agent][out_edge]=self.create_edge_path(out_edge)\n return agent_paths\n\n \n def create_edge_path(self,edgeID):\n \"\"\"receives edgeID of the first edge returns edgeID of the path until there is only one connection\"\"\"\n path=[edgeID]\n path_head_connections=self.network.get_edge_connections(edgeID)\n\n while len(path_head_connections)==1:\n path.append(path_head_connections[0])\n path_head_connections=self.network.get_edge_connections(path[-1])\n\n\n return path\n \n def create_edge_action_mask_dic(self):\n edge_action_mask_dic={}\n for agent_id in self.agent_dic:\n for in_edge_id in self.network.get_in_edges(agent_id):\n if self.config.Constants['Analysis_Mode']:\n assert(in_edge_id not in edge_action_mask_dic)\n edge_action_mask_dic[in_edge_id]=self.create_edge_action_mask(in_edge_id)\n\n return edge_action_mask_dic\n\n def create_edge_action_mask(self,edge_id):\n node_id=self.network.get_edge_head_node(edge_id)\n node_out_edges=self.network.get_out_edges(node_id)\n edge_connections=self.network.get_edge_connections(edge_id)\n action_mask=torch.tensor([-math.inf if edge not in edge_connections else 0 for edge in node_out_edges],device=self.config.device)\n action_mask_index=[i for i in range(len(node_out_edges)) if node_out_edges[i] in edge_connections]\n return action_mask,action_mask_index\n\n def get_edge_path(self,node_id,edge_id):\n return self.agent_path_dic[node_id][edge_id]\n\n # def get_edge_path_head_node(self,edge):\n # return self.network.get_edge_head_node(self.get_edge_path(edge)[-1])\n\n def get_next_road_IDs(self,node,action_edge_number):\n action_edge_ID=self.network.get_out_edges(node)[action_edge_number]\n return self.agent_path_dic[node][action_edge_ID]\n\n \n def get_destination(self,vc):\n route_tail=traci.vehicle.getRoute(vc)[-1]\n return self.network.get_edge_head_node(route_tail)\n\n def is_valid(self,source):\n self.log(\"validity check may be wrong\",type='warn')\n return len(self.network.get_out_edges(source))!=0\n \n def get_time(self):\n return traci.simulation.getTime()\n\n def get_edge_weight(self,edge):\n return self.network.graph.get_edge_data(*edge)['weigh']\n\n def get_shortest_path_time(self,source,destination):\n return self.network.all_pairs_shortest_path[source][destination]\n \n\n\n def get_edge_action_mask(self,edge_id,node_id):\n if self.config.Constants['Analysis_Mode']:\n assert(node_id==self.network.get_edge_head_node(edge_id))\n return self.edge_action_mask_dic[edge_id]\n\n# GAT--------------------------------------------------\n def create_agent_adjacency_list_dic(self):\n agent_adjacency_list_dic={}\n for agent_id in self.agent_path_dic:\n agent_adjacency_list_dic[agent_id]=[]\n for path in self.agent_path_dic[agent_id]:\n path_head=self.agent_path_dic[agent_id][path][-1]\n path_head_node=self.network.get_edge_head_node(path_head)\n agent_adjacency_list_dic[agent_id].append(path_head_node)\n return agent_adjacency_list_dic\n\n def create_edge_index(self,add_self_edges=True):\n num_of_nodes=len(self.agent_adjacency_list_dict)\n source_nodes_ids, target_nodes_ids = [], []\n seen_edges = set()\n\n for src_node, neighboring_nodes in self.agent_adjacency_list_dict.items():\n\n if self.config.Constants['Analysis_Mode']:\n try:\n assert(src_node==list(self.agent_dic.keys())[self.agent_index_dic[src_node]])\n except Exception as e:\n breakpoint()\n \n src_node=self.agent_index_dic[src_node] \n source_nodes_ids.append(src_node)\n target_nodes_ids.append(src_node)\n seen_edges.add((src_node, src_node))\n\n for trg_node in neighboring_nodes:\n trg_node=self.agent_index_dic[trg_node]\n if (src_node, trg_node) not in seen_edges: # it'd be easy to explicitly remove self-edges (Cora has none..)\n source_nodes_ids.append(src_node)\n target_nodes_ids.append(trg_node)\n seen_edges.add((src_node, trg_node))\n\n # shape = (2, E+V), \n # where E is the number of edges in the graph\n # and V is the number of vertices in the graph\n edge_index = numpy.row_stack((source_nodes_ids, target_nodes_ids))\n\n return torch.tensor(edge_index,dtype=torch.long,device=self.config.device)\n\n def get_node_features(self):\n return self.config.network_state\n\n # def graph_attention_network(self,edge_index,node_features):\n # return self.gat((node_features, edge_index))[0]\n\n#helper-------------------------------------------------\n\n def log(self, log_str, type='info'):\n if self.config.Constants['LOG']:\n if type == 'info':\n print('-Info- ' + log_str)\n if type=='err':\n if self.config.Constants['WHERE']:\n print(self._where())\n print('-Error- ' + log_str)\n\n if type== 'warn' and self.config.Constants['WARNINGS']:\n if self.config.Constants['WHERE']:\n print(self._where())\n print('-Warning- ' + log_str) \n\n def _where(self):\n cf=currentframe()\n return \"@ file:\"+getframeinfo(cf).filename+\" line:\"+cf.f_back.f_lineno\n\n\n def visualize_gat_properties(self,nodes_of_interest,visualization_type=\"ATTENTION\"):\n gat=self.config.GAT\n node_features=self.config.network_state\n # device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # checking whether you have a GPU, I hope so!\n\n # config = {\n # 'dataset_name': dataset_name,\n # 'layer_type': LayerType.IMP3,\n # 'should_visualize': False, # don't visualize the dataset\n # 'batch_size': 2, # used only for PPI\n # 'ppi_load_test_only': True # used only for PPI (optimization, we're loading only test graphs)\n # }\n\n # Step 1: Prepare the data\n gat.eval() # some layers like nn.Dropout behave differently in train vs eval mode so this part is important\n with torch.no_grad():\n # Step 3: Run predictions and collect the high dimensional data\n all_nodes_unnormalized_scores = gat(node_features.view(1,-1,self.config.network_state_size)).view(-1,self.config.network_embed_size) # shape = (N, num of classes)\n all_nodes_unnormalized_scores = all_nodes_unnormalized_scores.cpu().numpy()\n # We'll need the edge index in different for multiple visualization types\n edge_index = self.config.edge_index\n\n \n # Step 4: Perform a specific visualization\n if visualization_type == \"ATTENTION\":\n # The number of nodes for which we want to visualize their attention over neighboring nodes\n # (2x this actually as we add nodes with highest degree + random nodes)\n # num_nodes_of_interest = 4 # 4 is an arbitrary number you can play with these numbers\n head_to_visualize = 0 # plot attention from this multi-head attention's head\n gat_layer_id = 0 # plot attention from this GAT layer\n\n # Build up the complete graph\n # node_features shape = (N, FIN), where N is the number of nodes and FIN number of inumpyut features\n total_num_of_nodes = len(node_features)\n complete_graph = ig.Graph()\n complete_graph.add_vertices(total_num_of_nodes) # igraph creates nodes with ids [0, total_num_of_nodes - 1]\n edge_index_tuples = list(zip(edge_index[0, :], edge_index[1, :])) # igraph requires this format\n complete_graph.add_edges(edge_index_tuples)\n\n # Pick the target nodes to plot (nodes with highest degree + random nodes)\n # Note: there could be an overlap between random nodes and nodes with highest degree - but highly unlikely\n # nodes_of_interest_idx = numpy.argpartition(complete_graph.degree(), -num_nodes_of_interest)[-num_nodes_of_interest:]\n # random_node_ids = numpy.random.randint(low=0, high=total_num_of_nodes, size=num_nodes_of_interest)\n # nodes_of_interest_idx = numpy.append(nodes_of_interest_idx, random_node_ids)\n # numpy.random.shuffle(nodes_of_interest_idx)\n # breakpoint()\n nodes_of_interest_idx=[self.agent_index_dic[agent_id] for agent_id in nodes_of_interest]\n\n target_node_ids = edge_index[1]\n source_nodes = edge_index[0]\n for target_node_id in nodes_of_interest_idx:\n # Step 1: Find the neighboring nodes to the target node\n # Note: self edges are included so the target node is it's own neighbor (Alexandro yo soy tu madre)\n src_nodes_indices = torch.eq(target_node_ids, target_node_id)\n source_node_ids = source_nodes[src_nodes_indices].cpu().numpy()\n size_of_neighborhood = len(source_node_ids)\n\n # Step 2: Fetch their labels\n # labels = node_labels[source_node_ids].cpu().numpy()\n\n # Step 3: Fetch the attention weights for edges (attention is logged during GAT's forward pass above)\n # attention shape = (N, NH, 1) -> (N, NH) - we just squeeze the last dim it's superfluous\n # breakpoint()\n all_attention_weights = gat.gat_net[gat_layer_id].attention_weights.squeeze(dim=-1).squeeze(0)\n attention_weights = all_attention_weights[src_nodes_indices, head_to_visualize].cpu().numpy()\n # This part shows that for CORA what GAT learns is pretty much constant attention weights! Like in GCN!\n # On the other hand PPI's attention pattern is much less uniform.\n print(f'Max attention weight = {numpy.max(attention_weights)} and min = {numpy.min(attention_weights)}')\n attention_weights /= numpy.max(attention_weights) # rescale the biggest weight to 1 for nicer plotting\n\n # Build up the neighborhood graph whose attention we want to visualize\n # igraph constraint - it works with contiguous range of ids so we map e.g. node 497 to 0, 12 to 1, etc.\n id_to_igraph_id = dict(zip(source_node_ids, range(len(source_node_ids))))\n ig_graph = ig.Graph()\n ig_graph.add_vertices(size_of_neighborhood)\n ig_graph.add_edges([(id_to_igraph_id[neighbor], id_to_igraph_id[target_node_id]) for neighbor in source_node_ids])\n\n # Prepare the visualization settings dictionary and plot\n # breakpoint()\n visual_style = {\n \"vertex_size\":30,\n \"vertex_label_size\":25,\n \"edge_width\": 5*attention_weights, # make edges as thick as the corresponding attention weight\n \"layout\": ig_graph.layout_reingold_tilford_circular(), # layout for tree-like graphs\n \"margin\":100,\n \"vertex_label_dist\":1,\n # \"layout\": ig_graph.layout(layout='layout_grid').scale(5),\n # \"layout\": ig_graph.layout_grid(),\n \"vertex_label\": [self.agent_list[idx] for idx in source_node_ids]\n }\n # This is the only part that's Cora specific as Cora has 7 labels\n # if dataset_name.lower() == DatasetType.CORA.name.lower():\n # visual_style[\"vertex_color\"] = [cora_label_to_color_map[label] for label in labels]\n # else:\n # print('Add custom color scheme for your specific dataset. Using igraph default coloring.')\n\n\n ig.plot(ig_graph, **visual_style)\n # fig,ax=plt.subplots()\n # # ig.plot(ig_graph,\n # # layout=ig_graph.layout_reingold_tilford_circular(),\n # # vertex_size=40,\n # # vertex_label=[self.agent_list[idx] for idx in source_node_ids],\n # # edge_width=5*attention_weights,\n # # # target=ax\n # # )\n # plt.show()\n\n\n # breakpoint()\n # We want our local probability distributions (attention weights over the neighborhoods) to be\n # non-uniform because that means that GAT is learning a useful pattern. Entropy histograms help us visualize\n # how different those neighborhood distributions are from the uniform distribution (constant attention).\n # If the GAT is learning const attention we could well be using GCN or some even simpler models.\n elif visualization_type == \"ENTROPY\":\n num_heads_per_layer = [layer.num_of_heads for layer in gat.gat_net]\n num_layers = len(num_heads_per_layer)\n num_of_nodes = len(node_features)\n\n target_node_ids = edge_index[1].cpu().numpy()\n\n # For every GAT layer and for every GAT attention head plot the entropy histogram\n for layer_id in range(num_layers):\n # Fetch the attention weights for edges (attention is logged during GAT's forward pass above)\n # attention shape = (N, NH, 1) -> (N, NH) - we just squeeze the last dim it's superfluous\n all_attention_weights = gat.gat_net[layer_id].attention_weights.squeeze(dim=-1).squeeze(0).cpu().numpy()\n\n # tmp fix for PPI there are some numerical problems and so most of attention coefficients are 0\n # and thus we can't plot entropy histograms\n # if dataset_name == DatasetType.PPI.name and layer_id > 0:\n # print(f'Entropy histograms for {dataset_name} are available only for the first layer.')\n # break\n\n for head_id in range(num_heads_per_layer[layer_id]):\n uniform_dist_entropy_list = [] # save the ideal uniform histogram as the reference\n neighborhood_entropy_list = []\n\n # This can also be done much more efficiently via scatter_add_ (no for loops)\n # pseudo: out.scatter_add_(node_dim, -all_attention_weights * log(all_attention_weights), target_index)\n for target_node_id in range(num_of_nodes): # find every the neighborhood for every node in the graph\n # These attention weights sum up to 1 by GAT design so we can treat it as a probability distribution\n neigborhood_attention = all_attention_weights[target_node_ids == target_node_id].flatten()\n # Reference uniform distribution of the same length\n ideal_uniform_attention = numpy.ones(len(neigborhood_attention))/len(neigborhood_attention)\n\n # Calculate the entropy, check out this video if you're not familiar with the concept:\n # https://www.youtube.com/watch?v=ErfnhcEV1O8 (Aurélien Géron)\n neighborhood_entropy_list.append(entropy(neigborhood_attention, base=2))\n uniform_dist_entropy_list.append(entropy(ideal_uniform_attention, base=2))\n\n title = f'{self.config.network_name} entropy histogram layer={layer_id}, attention head={head_id}'\n draw_entropy_histogram(uniform_dist_entropy_list, title, color='orange', uniform_distribution=True)\n draw_entropy_histogram(neighborhood_entropy_list, title, color='dodgerblue')\n\n fig = plt.gcf() # get current figure\n plt.show()\n # fig.savefig(os.path.join(DATA_DIR_PATH, f'layer_{layer_id}_head_{head_id}.jpg'))\n plt.close()\n else:\n raise Exception(f'Visualization type {visualization_type} not supported.')\n\n\n\ndef draw_entropy_histogram(entropy_array, title, color='blue', uniform_distribution=False, num_bins=30):\n max_value = numpy.max(entropy_array)\n bar_width = (max_value / num_bins) * (1.0 if uniform_distribution else 0.75)\n histogram_values, histogram_bins = numpy.histogram(entropy_array, bins=num_bins, range=(0.0, max_value))\n\n plt.bar(histogram_bins[:num_bins], histogram_values[:num_bins], width=bar_width, color=color)\n plt.xlabel(f'entropy bins')\n plt.ylabel(f'# of node neighborhoods')\n plt.title(title)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# deprecated-------------------------------------------------------------------------------------------------------------------\n def update_node_features(self):\n self.update_pressure_matrix()\n for row in range(0,self.dim):\n for column in range(0,self.dim):\n node_id=(column+row*3)\n try:\n self.agent_state_dic[node_id][0]=(self.pressure_matrix[column][row])\n except Exception as e:\n print(e)\n breakpoint()\n\n # def creat_vc_count_dic(self):\n # lane_vc_count_dic=self.environment.eng.get_lane_vehicle_count()\n # vc_count_dic={}\n # for lane in lane_vc_count_dic:\n # road= self.road2int(self.lane2road(lane))\n # if not road in vc_count_dic: vc_count_dic[road]=0\n # return vc_count_dic\n\n # def update_vc_count_dic(self):\n # lane_vc_count_dic=self.environment.eng.get_lane_vehicle_count()\n # self.refresh_vc_count_dic()\n # for lane in lane_vc_count_dic:\n # road= self.road2int(self.lane2road(lane))\n # # if road==10:\n # # breakpoint()\n # self.vc_count_dic[road]+=lane_vc_count_dic[lane]\n\n\n def refresh_vc_count_dic(self):\n \n for road in self.vc_count_dic:self.vc_count_dic[road]=0\n\n def update_pressure_matrix(self):\n self.update_vc_count_dic()\n for row in range(0,self.dim):\n for column in range(0,self.dim):\n try:\n self.pressure_matrix[column][row]=self.get_pressure(column,row)\n except Exception as e:\n print(e)\n breakpoint()\n\n def get_press_embd(self,road):\n press_embd=[0]*self.press_embd_dim\n\n nroad=self.move(road,0)\n nroad=[x-1 for x in nroad]\n if nroad[0]<self.dim and nroad[1]<self.dim:\n press_embd[0]=self.pressure_matrix[nroad[0]][nroad[1]]\n\n nroad=self.move(road,1)\n nroad=[x-1 for x in nroad]\n if nroad[0]<self.dim and nroad[1]<self.dim:\n press_embd[0]=self.pressure_matrix[nroad[0]][nroad[1]]\n\n nroad=self.move(road,2)\n nroad=[x-1 for x in nroad]\n if nroad[0]<self.dim and nroad[1]<self.dim:\n press_embd[0]=self.pressure_matrix[nroad[0]][nroad[1]]\n\n\n return press_embd\n\n def get_pressure(self,column,row):\n # column and rows are 1-indexed\n row+=1\n column+=1\n\n in_roads=[\n [column-1,row,0],\n [column+1,row,2],\n [column,row-1,1],\n [column,row+1,3]\n ]\n out_roads=[\n [column,row,0],\n [column,row,1],\n [column,row,2],\n [column,row,3],\n ]\n pressure=0\n\n for road in in_roads:\n pressure+=self.vc_count_dic[self.road2int(road)]\n\n for road in out_roads:\n pressure-=self.vc_count_dic[self.road2int(road)]\n\n return pressure\n def get_edge_index_among_node_out_edges(self,edge_id,node_id):\n return self.network.get_out_edges(node_id).index(edge_id) \n\n def get_edge_index_among_node_in_edges(self,edge_id,node_id):\n return self.network.get_in_edges(node_id).index(edge_id)"
] |
[
[
"torch.autograd.set_detect_anomaly",
"torch.zeros",
"pandas.DataFrame",
"torch.vstack",
"numpy.max",
"torch.no_grad",
"numpy.histogram",
"torch.eq",
"torch.tensor",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.array",
"numpy.random.RandomState",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"scipy.stats.entropy",
"numpy.row_stack",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel"
]
] |
adtamayop/Ejercicios_NN
|
[
"b5e9412ca03f6bb1f82ebe71a0c4ef16f80ec028"
] |
[
"tensorflow_graph_in_jupyter.py"
] |
[
"from __future__ import absolute_import, division, print_function, unicode_literals\r\n\r\n# This module defines the show_graph() function to visualize a TensorFlow graph within Jupyter.\r\n\r\n# As far as I can tell, this code was originally written by Alex Mordvintsev at:\r\n# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb\r\n\r\n# The original code only worked on Chrome (because of the use of <link rel=\"import\"...>, but the version below\r\n# uses Polyfill (copied from this StackOverflow answer: https://stackoverflow.com/a/41463991/38626)\r\n# so that it can work on other browsers as well.\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom IPython.display import clear_output, Image, display, HTML\r\n\r\ndef strip_consts(graph_def, max_const_size=32):\r\n \"\"\"Strip large constant values from graph_def.\"\"\"\r\n strip_def = tf.GraphDef()\r\n for n0 in graph_def.node:\r\n n = strip_def.node.add() \r\n n.MergeFrom(n0)\r\n if n.op == 'Const':\r\n tensor = n.attr['value'].tensor\r\n size = len(tensor.tensor_content)\r\n if size > max_const_size:\r\n tensor.tensor_content = b\"<stripped %d bytes>\"%size\r\n return strip_def\r\n\r\ndef show_graph(graph_def, max_const_size=32):\r\n \"\"\"Visualize TensorFlow graph.\"\"\"\r\n if hasattr(graph_def, 'as_graph_def'):\r\n graph_def = graph_def.as_graph_def()\r\n strip_def = strip_consts(graph_def, max_const_size=max_const_size)\r\n code = \"\"\"\r\n <script src=\"//cdnjs.cloudflare.com/ajax/libs/polymer/0.3.3/platform.js\"></script>\r\n <script>\r\n function load() {{\r\n document.getElementById(\"{id}\").pbtxt = {data};\r\n }}\r\n </script>\r\n <link rel=\"import\" href=\"https://tensorboard.appspot.com/tf-graph-basic.build.html\" onload=load()>\r\n <div style=\"height:600px\">\r\n <tf-graph-basic id=\"{id}\"></tf-graph-basic>\r\n </div>\r\n \"\"\".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))\r\n\r\n iframe = \"\"\"\r\n <iframe seamless style=\"width:1200px;height:620px;border:0\" srcdoc=\"{}\"></iframe>\r\n \"\"\".format(code.replace('\"', '"'))\r\n display(HTML(iframe))"
] |
[
[
"tensorflow.GraphDef",
"numpy.random.rand"
]
] |
dominiccarrano/backdoor-nn-geometry
|
[
"d1fa0754f1d57a9b303e2eb71edf0787a86529c8"
] |
[
"trojai_runner.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport torch\nimport os\nimport argparse\nfrom torch.utils.data import DataLoader, sampler, TensorDataset, ConcatDataset\nfrom attack_functions import *\nfrom trojai_utils import *\nfrom boundary_geometry import *\n\nparser = argparse.ArgumentParser(description=\"TrojAI Round 5 script for boundary thickness and tilting\")\nparser.add_argument('--N', type=int, help=\"number of embeddings of each class to use\")\nparser.add_argument('--embedding-type', type=str, \n choices = ['GPT-2', 'BERT', 'DistilBERT'], \n help='use which embedding')\nparser.add_argument('--architecture-type', type=str, \n choices = ['GruLinear', 'LstmLinear'], \n help='use which architecture')\nparser.add_argument('--batch-size', type=int, \n help='Batch size for the adversarial attacks')\nparser.add_argument('--eps', type=float,\n help='PGD attack strength')\nparser.add_argument('--iters', type=int,\n help='PGD attack iterations')\nargs = parser.parse_args()\n\n# For Round 5 (change as needed based on your file system's structure)\nTHICK_NAMES = [\"clean\", \"adv+to-\", \"adv-to+\", \"uap+to-\", \"uap-to+\"]\nTILT_NAMES = [\"adv_adv+to-\", \"adv_adv-to+\", \"uap_uap+to-\", \"uap_uap-to+\"]\nBASE_EMBEDDINGS_PATH = \"your embedding path\"\nRESULTS_PATH_TRAIN = \"your train results path\"\nRESULTS_PATH_TEST = \"your test results path\"\nRESULTS_PATH_HOLDOUT = \"your holdout results path\"\nMETADATA_TRAIN = pd.read_csv(\"place where training set's METADATA.csv is\")\nMETADATA_TEST = pd.read_csv(\"place where test set's METADATA.csv is\")\nMETADATA_HOLDOUT = pd.read_csv(\"place where holdout set's METADATA.csv is\")\nTRAIN_BASE_PATH = \"point me to round5-train-dataset\"\nTEST_BASE_PATH = \"point me to round5-test-dataset\"\nHOLDOUT_BASE_PATH = \"point me to round5-holdout-dataset\"\n\n# Round 5 reference models (50 per (embedding, architecture) type)\nREF_IDS = {\n \"BERT\": {\"LstmLinear\": [14, 68, 73, 74, 98, 110, 123, 138, 163, 168, 196, 234, 240, 256, 263, 274, 299, 303, 318, 320, 349, 364, 389, 395, 405, 422, 446, 450, 463, 503, 512, 517, 524, 526, 533, 542, 563, 576, 599, 605, 617, 643, 646, 706, 707, 709, 710, 716, 719, 720], \n \"GruLinear\": [20, 22, 30, 47, 67, 69, 79, 87, 92, 93, 97, 109, 112, 122, 152, 157, 165, 171, 175, 178, 181, 183, 185, 187, 190, 220, 230, 266, 273, 279, 294, 315, 322, 334, 336, 342, 354, 404, 415, 421, 431, 474, 477, 491, 497, 499, 502, 506, 511, 519]},\n \"DistilBERT\": {\"LstmLinear\": [2, 12, 83, 86, 104, 105, 127, 131, 134, 135, 141, 156, 159, 201, 243, 244, 254, 272, 288, 310, 321, 332, 374, 377, 387, 398, 399, 416, 427, 445, 449, 460, 464, 483, 510, 523, 532, 537, 541, 543, 551, 570, 583, 588, 631, 648, 669, 670, 673, 678], \n \"GruLinear\": [8, 17, 39, 41, 42, 45, 49, 55, 63, 76, 90, 96, 103, 149, 153, 176, 177, 179, 184, 193, 204, 208, 213, 231, 239, 245, 265, 270, 306, 347, 348, 350, 365, 371, 384, 391, 396, 419, 423, 425, 467, 468, 476, 487, 500, 516, 527, 529, 531, 548]},\n \"GPT-2\": {\"LstmLinear\": [13, 18, 29, 48, 61, 72, 80, 88, 95, 100, 108, 114, 121, 132, 151, 158, 161, 162, 197, 198, 226, 228, 258, 264, 285, 304, 312, 317, 325, 333, 337, 345, 351, 368, 373, 386, 401, 403, 418, 426, 433, 461, 466, 472, 479, 493, 507, 508, 514, 530], \n \"GruLinear\": [3, 7, 28, 32, 36, 52, 59, 71, 82, 89, 124, 126, 128, 148, 154, 191, 205, 206, 207, 224, 236, 237, 241, 246, 251, 253, 259, 260, 278, 284, 287, 289, 301, 335, 356, 360, 362, 366, 367, 378, 409, 411, 438, 471, 478, 485, 509, 513, 546, 547]}\n}\nUAP_MIN_SUCCESS_RATE = .80\n\ndtype = torch.float32\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\ndef filter_dataset(models, ds):\n # Keep all datapoints that are correct for at least 70% the models\n filtered_ds = []\n for _, (x, y) in enumerate(DataLoader(ds, batch_size=args.batch_size)):\n successes = torch.zeros(len(y), device=device)\n for model in models:\n pred = torch.argmax(model(x), dim=-1)\n successes += (pred == y)\n correct_pred_ids = successes >= (0.7 * len(models))\n filtered_ds.append(TensorDataset(x[correct_pred_ids], y[correct_pred_ids]))\n return ConcatDataset(filtered_ds)\n\ndef make_perturbed_datasets(models, pos_ds, neg_ds, batch_size, attack_type, eps, iters, step_size): \n # Run the attack\n attack = PGDAdversarialDataset(models, eps=eps, step_size=step_size, iters=iters, p=2, universal=(attack_type==\"uap\"))\n attacked_pos_ds, pos_loss_final = make_adversarial_dataset(pos_ds, attack, batch_size)\n attacked_neg_ds, neg_loss_final = make_adversarial_dataset(neg_ds, attack, batch_size)\n \n # Verify success\n mean_psr, mean_nsr = 0, 0\n for model in models:\n psr = flip_success(attacked_pos_ds, 0, model) # + == 1, so want it to flip to 0\n nsr = flip_success(attacked_neg_ds, 1, model) # - == 0, so want it to flip to 1\n mean_psr, mean_nsr = (mean_psr + psr / len(models)), (mean_nsr + nsr / len(models))\n if not (psr > UAP_MIN_SUCCESS_RATE and nsr > UAP_MIN_SUCCESS_RATE):\n print(\"psr {}, nsr {} failed to pass threshold {}\".format(psr, nsr, UAP_MIN_SUCCESS_RATE))\n raise RuntimeError()\n print(mean_psr, mean_nsr)\n return attacked_pos_ds, attacked_neg_ds, pos_loss_final, neg_loss_final\n\ndef compute_geometry(pos_ds, neg_ds, batch_size, eps, iters, step_size):\n # Get reference model's datasets\n ref_model_ids = REF_IDS[args.embedding_type][args.architecture_type]\n ref_models = [load_model(ref_model_id, TRAIN_BASE_PATH)[0] for ref_model_id in ref_model_ids]\n ref_filt_pos_ds, ref_filt_neg_ds = filter_dataset(ref_models, pos_ds), filter_dataset(ref_models, neg_ds)\n print(\"\\t ref model filter dataset lengths:\", len(ref_filt_pos_ds), len(ref_filt_neg_ds))\n ref_adv_pos_ds, ref_adv_neg_ds, _, _ = make_perturbed_datasets(ref_models, ref_filt_pos_ds, ref_filt_neg_ds, \n batch_size, \"adv\", eps, iters, step_size)\n ref_uap_pos_ds, ref_uap_neg_ds, _, _ = make_perturbed_datasets(ref_models, ref_filt_pos_ds, ref_filt_neg_ds, \n batch_size, \"uap\", eps, iters, step_size)\n \n # Compute features\n for which in [\"clean\", \"poisoned\"]:\n for metadata, base_path, results_path in zip([METADATA_TRAIN, METADATA_TEST, METADATA_HOLDOUT], [TRAIN_BASE_PATH, TEST_BASE_PATH, HOLDOUT_BASE_PATH], [RESULTS_PATH_TRAIN, RESULTS_PATH_TEST, RESULTS_PATH_HOLDOUT]):\n model_ids = metadata.index[(metadata.embedding==args.embedding_type)\n & (metadata.model_architecture==args.architecture_type)\n & (metadata.poisoned==(which==\"poisoned\"))].tolist()\n \n # Iterate over models\n for i, model_id in enumerate(model_ids):\n try:\n # Load model and only keep samples it correctly classifies\n model, _ = load_model(model_id, base_path)\n filt_pos_ds, filt_neg_ds = filter_dataset([model], pos_ds), filter_dataset([model], neg_ds)\n print(\"\\t model {} len(filt_pos_ds): {}, len(filt_neg_ds): {}\".format(model_id, \n len(filt_pos_ds), \n len(filt_neg_ds)))\n\n # Make adv and UAP datasets\n adv_pos_ds, adv_neg_ds, adv_pos_loss_final, adv_neg_loss_final = make_perturbed_datasets([model], \n filt_pos_ds, \n filt_neg_ds,\n batch_size, \n \"adv\", \n eps, \n iters, \n step_size)\n uap_pos_ds, uap_neg_ds, uap_pos_loss_final, uap_neg_loss_final = make_perturbed_datasets([model], \n filt_pos_ds, \n filt_neg_ds,\n batch_size, \n \"uap\", \n eps, \n iters, \n step_size)\n\n # Compute boundary thickness\n xr_ds_thick = [filt_pos_ds, filt_pos_ds, filt_neg_ds, filt_pos_ds, filt_neg_ds]\n xs_ds_thick = [filt_neg_ds, adv_pos_ds, adv_neg_ds, uap_pos_ds, uap_neg_ds]\n for xr_ds, xs_ds, file_suffix in zip(xr_ds_thick, xs_ds_thick, THICK_NAMES):\n # NOTE: batch_size in boundary_thickness has no effect on the statistical accuracy of the \n # computation, it only affects how many inputs go through the DNN at a time. We have to \n # set it to a low value (32, for our TrojAI experiments) since we sample 1000 points along \n # the line segment between each pair of inputs, implying 32 * 1000 points are going through\n # the DNN at a time; feel free to adjust based on how powerful your GPUs are\n thick = boundary_thickness(xr_ds, xs_ds, model, [(0, 0.75), (0, 1)], batch_size=32, num_points=1000)\n torch.save(thick, os.path.join(results_path, \n args.embedding_type, \n args.architecture_type, \n which + file_suffix + \"_thickness{}.pt\".format(model_id)))\n\n # Compute boundary tilting\n xr_ds_tilt = [filt_pos_ds, filt_neg_ds, filt_pos_ds, filt_neg_ds]\n xr_adv_ds_tilt = [adv_pos_ds, adv_neg_ds, uap_pos_ds, uap_neg_ds]\n xs_ds_tilt = [ref_adv_pos_ds, ref_adv_neg_ds, ref_uap_pos_ds, ref_uap_neg_ds] \n for xr_ds, xs_ds, xr_adv_ds, file_suffix in zip(xr_ds_tilt, xs_ds_tilt, xr_adv_ds_tilt, TILT_NAMES):\n tilt = boundary_tilting(xr_ds, xs_ds, xr_adv_ds, model, batch_size=args.batch_size, reduce_clean=False)\n torch.save(tilt, os.path.join(results_path, \n args.embedding_type,\n args.architecture_type, \n which + file_suffix + \"_tilting{}.pt\".format(model_id)))\n\n except Exception:\n print(\"Failed for model_id {}\".format(model_id))\n\n # Print progress\n print(\"{0} of {1} {2} models done\".format(i, len(model_ids), which))\n\ndef get_dataset(embeddings, labels):\n embeddings = embeddings.to(\"cuda\")\n labels = labels.to(\"cuda\")\n dataset = torch.utils.data.TensorDataset(embeddings, labels)\n return dataset\n\n# Load in embeddings to use\npos_embeddings = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, \"pos_embeddings{}.pt\".format(args.N)))\npos_labels = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, \"pos_labels{}.pt\".format(args.N)))\npos_ds = get_dataset(pos_embeddings, pos_labels)\n\nneg_embeddings = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, \"neg_embeddings{}.pt\".format(args.N)))\nneg_labels = torch.load(os.path.join(BASE_EMBEDDINGS_PATH, args.embedding_type, \"neg_labels{}.pt\".format(args.N)))\nneg_ds = get_dataset(neg_embeddings, neg_labels)\n\n# Compute and save features\nstep_size = 2 * args.eps / args.iters\ncompute_geometry(pos_ds, neg_ds, args.batch_size, args.eps, args.iters, step_size)"
] |
[
[
"pandas.read_csv",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.utils.data.ConcatDataset",
"torch.cuda.is_available",
"torch.device"
]
] |
KailongPeng/rt-cloud
|
[
"c19d524b9fa6f15966f1c0c4dd6fcbe55b386126"
] |
[
"tests/test_bidsIncremental.py"
] |
[
"from copy import deepcopy\nimport logging\nimport os\nimport pickle\n\nfrom bids.layout import BIDSImageFile\nfrom bids.layout.writing import build_path as bids_build_path\nimport nibabel as nib\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom rtCommon.bidsCommon import (\n BIDS_DIR_PATH_PATTERN,\n BIDS_FILE_PATTERN,\n PYBIDS_PSEUDO_ENTITIES,\n BidsFileExtension,\n getNiftiData,\n metadataFromProtocolName,\n)\nfrom rtCommon.bidsIncremental import BidsIncremental\nfrom rtCommon.bidsArchive import BidsArchive\nfrom rtCommon.errors import MissingMetadataError\nfrom tests.common import isValidBidsArchive\n\nlogger = logging.getLogger(__name__)\n\n\n# Test that construction fails for image metadata missing required fields\ndef testInvalidConstruction(sample2DNifti1, samplePseudo2DNifti1,\n sample4DNifti1, imageMetadata):\n # Test empty image\n with pytest.raises(TypeError):\n BidsIncremental(image=None,\n imageMetadata=imageMetadata)\n\n # Test 2-D image\n with pytest.raises(ValueError) as err:\n BidsIncremental(image=sample2DNifti1,\n imageMetadata=imageMetadata)\n assert \"Image must have at least 3 dimensions\" in str(err.value)\n\n # Test 2-D image masquerading as 4-D image\n with pytest.raises(ValueError) as err:\n BidsIncremental(image=samplePseudo2DNifti1,\n imageMetadata=imageMetadata)\n assert (\"Image's 3rd (and any higher) dimensions are <= 1, which means \"\n \"it is a 2D image; images must have at least 3 dimensions\" in\n str(err.value))\n\n # Test incomplete metadata\n protocolName = imageMetadata.pop(\"ProtocolName\")\n for key in BidsIncremental.REQUIRED_IMAGE_METADATA:\n value = imageMetadata.pop(key)\n\n assert not BidsIncremental.isCompleteImageMetadata(imageMetadata)\n with pytest.raises(MissingMetadataError):\n BidsIncremental(image=sample4DNifti1,\n imageMetadata=imageMetadata)\n\n imageMetadata[key] = value\n imageMetadata[\"ProtocolName\"] = protocolName\n\n # Test too-large repetition and echo times\n for key in [\"RepetitionTime\", \"EchoTime\"]:\n original = imageMetadata[key]\n imageMetadata[key] = 10**6\n\n with pytest.raises(ValueError):\n BidsIncremental(image=sample4DNifti1,\n imageMetadata=imageMetadata)\n\n imageMetadata[key] = original\n\n # Test non-image object\n with pytest.raises(TypeError) as err:\n notImage = \"definitely not an image\"\n BidsIncremental(image=notImage,\n imageMetadata=imageMetadata)\n assert (\"Image must be one of [nib.Nifti1Image, nib.Nifti2Image, \"\n f\"BIDSImageFile (got {type(notImage)})\" in str(err.value))\n\n # Test non-functional data\n with pytest.raises(NotImplementedError) as err:\n original = imageMetadata['datatype']\n invalidType = 'anat'\n imageMetadata['datatype'] = invalidType\n BidsIncremental(image=sample4DNifti1,\n imageMetadata=imageMetadata)\n imageMetadata['datatype'] = original\n\n assert (\"BIDS Incremental for BIDS datatypes other than 'func' is not \"\n f\"yet implemented (got '{invalidType}')\") in str(err.value)\n\n\n# Test that valid arguments produce a BIDS incremental\ndef testValidConstruction(sample3DNifti1, sample3DNifti2,\n sample4DNifti1, sampleNifti2, bidsArchive4D,\n imageMetadata):\n # 3-D should be promoted to 4-D\n assert BidsIncremental(sample3DNifti1, imageMetadata) is not None\n assert BidsIncremental(sample3DNifti2, imageMetadata) is not None\n\n # Both Nifti1 and Nifti2 images should work\n assert BidsIncremental(sample4DNifti1, imageMetadata) is not None\n assert BidsIncremental(sampleNifti2, imageMetadata) is not None\n\n # If the metadata provides a RepetitionTime or EchoTime that works without\n # adjustment, the construction should still work\n repetitionTimeKey = \"RepetitionTime\"\n original = imageMetadata[repetitionTimeKey]\n imageMetadata[repetitionTimeKey] = 1.5\n assert BidsIncremental(sample4DNifti1, imageMetadata) is not None\n imageMetadata[repetitionTimeKey] = original\n\n # Passing a BIDSImageFile is also valid\n image = bidsArchive4D.getImages()[0]\n assert type(image) is BIDSImageFile\n assert BidsIncremental(image, imageMetadata) is not None\n\n\n# Test that metadata values are of the correct types, if required by BIDS\ndef testMetadataTypes(validBidsI):\n typeDict = {\"RepetitionTime\": float, \"EchoTime\": float}\n\n for field, typ in typeDict.items():\n assert type(validBidsI.getMetadataField(field)) is typ\n\n\n# Test that the provided image metadata dictionary takes precedence over the\n# metadata parsed from the protocol name, if any\ndef testConstructionMetadataPrecedence(sample4DNifti1, imageMetadata):\n assert imageMetadata.get('ProtocolName', None) is not None\n metadata = metadataFromProtocolName(imageMetadata['ProtocolName'])\n assert len(metadata) > 0\n\n assert metadata.get('run', None) is not None\n newRunNumber = int(metadata['run']) + 1\n imageMetadata['run'] = newRunNumber\n assert metadata['run'] != imageMetadata['run']\n\n incremental = BidsIncremental(sample4DNifti1, imageMetadata)\n assert incremental.getMetadataField('run') == newRunNumber\n\n\n# Test that the string output of the BIDS-I is as expected\ndef testStringOutput(validBidsI):\n imageShape = str(validBidsI.getImageDimensions())\n keyCount = len(validBidsI._imgMetadata.keys())\n version = validBidsI.version\n assert str(validBidsI) == f\"Image shape: {imageShape}; \" \\\n f\"Metadata Key Count: {keyCount}; \" \\\n f\"BIDS-I Version: {version}\"\n\n\n# Test that equality comparison is as expected\ndef testEquals(sample4DNifti1, sample3DNifti1, imageMetadata):\n # Test images with different headers\n assert BidsIncremental(sample4DNifti1, imageMetadata) != \\\n BidsIncremental(sample3DNifti1, imageMetadata)\n\n # Test images with the same header, but different data\n newData = 2 * getNiftiData(sample4DNifti1)\n reversedNifti1 = nib.Nifti1Image(newData, sample4DNifti1.affine,\n header=sample4DNifti1.header)\n assert BidsIncremental(sample4DNifti1, imageMetadata) != \\\n BidsIncremental(reversedNifti1, imageMetadata)\n\n # Test different image metadata\n modifiedImageMetadata = deepcopy(imageMetadata)\n modifiedImageMetadata[\"subject\"] = \"newSubject\"\n assert BidsIncremental(sample4DNifti1, imageMetadata) != \\\n BidsIncremental(sample4DNifti1, modifiedImageMetadata)\n\n # Test different dataset metadata\n datasetMeta1 = {\"Name\": \"Dataset_1\", \"BIDSVersion\": \"1.0\"}\n datasetMeta2 = {\"Name\": \"Dataset_2\", \"BIDSVersion\": \"2.0\"}\n assert BidsIncremental(sample4DNifti1, imageMetadata, datasetMeta1) != \\\n BidsIncremental(sample4DNifti1, imageMetadata, datasetMeta2)\n\n # Test different readme\n incremental1 = BidsIncremental(sample4DNifti1, imageMetadata)\n incremental2 = BidsIncremental(sample4DNifti1, imageMetadata)\n readme1 = \"README 1\"\n readme2 = \"README 2\"\n\n incremental1.readme = readme1\n incremental2.readme = readme2\n assert incremental1 != incremental2\n\n # Test different events file\n incremental1 = BidsIncremental(sample4DNifti1, imageMetadata)\n incremental2 = BidsIncremental(sample4DNifti1, imageMetadata)\n\n events1 = {'onset': [1, 25, 50], 'duration': [10, 10, 10], 'response_time':\n [15, 36, 70]}\n events2 = {key: [v + 5 for v in events1[key]] for key in events1.keys()}\n\n incremental1.events = pd.DataFrame(data=events1)\n incremental2.events = pd.DataFrame(data=events2)\n assert incremental1 != incremental2\n\n\n# Test that image metadata dictionaries can be properly created by the class\ndef testImageMetadataDictCreation(imageMetadata):\n createdDict = BidsIncremental.createImageMetadataDict(\n subject=imageMetadata[\"subject\"],\n task=imageMetadata[\"task\"],\n suffix=imageMetadata[\"suffix\"],\n repetitionTime=imageMetadata[\"RepetitionTime\"],\n datatype='func')\n\n for key in createdDict.keys():\n assert createdDict.get(key) == imageMetadata.get(key)\n\n # Ensure that the method is in sync with the required metadata\n # Get all required fields as lowerCamelCase for passing as kwargs\n requiredFieldsCamel = [(key[0].lower() + key[1:]) for key in\n BidsIncremental.REQUIRED_IMAGE_METADATA]\n dummyValue = 'n/a'\n metadataDict = {key: dummyValue for key in requiredFieldsCamel}\n createdDict = BidsIncremental.createImageMetadataDict(**metadataDict)\n\n for field in BidsIncremental.REQUIRED_IMAGE_METADATA:\n assert createdDict[field] == dummyValue\n\n\n# Test that internal metadata dictionary is independent from the argument dict\ndef testMetadataDictionaryIndependence(sample4DNifti1, imageMetadata):\n incremental = BidsIncremental(sample4DNifti1, imageMetadata)\n\n key = 'subject'\n assert incremental.getMetadataField(key) == imageMetadata[key]\n old = incremental.getMetadataField(key)\n\n imageMetadata[key] = 'a brand-new subject'\n assert incremental.getMetadataField(key) == old\n assert incremental.getMetadataField(key) != imageMetadata[key]\n\n\n# Test that invalid dataset.json fields are rejected and valid ones are accepted\ndef testDatasetMetadata(sample4DNifti1, imageMetadata):\n # Test invalid dataset metadata\n with pytest.raises(MissingMetadataError):\n BidsIncremental(image=sample4DNifti1,\n imageMetadata=imageMetadata,\n datasetDescription={\"random_field\": \"doesnt work\"})\n\n # Test valid dataset metadata\n dataset_name = \"Test dataset\"\n bidsInc = BidsIncremental(image=sample4DNifti1,\n imageMetadata=imageMetadata,\n datasetDescription={\"Name\": dataset_name,\n \"BIDSVersion\": \"1.0\"})\n assert bidsInc.getDatasetName() == dataset_name\n\n\n# Test that extracting metadata from the BIDS-I using its provided API returns\n# the correct values\ndef testMetadataOutput(validBidsI, imageMetadata):\n with pytest.raises(ValueError):\n validBidsI.getMetadataField(\"InvalidEntityName\", strict=True)\n with pytest.raises(KeyError):\n validBidsI.getMetadataField(\"InvalidEntityName\")\n\n # Data type - always 'func' currently\n assert validBidsI.getDatatype() == \"func\"\n # Entities\n for entity in ['subject', 'task']:\n assert validBidsI.getMetadataField(entity) == imageMetadata[entity]\n # Suffix\n assert validBidsI.getSuffix() == imageMetadata[\"suffix\"]\n\n\n# Test setting BIDS-I metadata API works as expected\ndef testSetMetadata(validBidsI):\n # Test non-official BIDS entity fails with strict\n with pytest.raises(ValueError):\n validBidsI.setMetadataField(\"nonentity\", \"value\", strict=True)\n\n # Non-official BIDS entity succeeds without strict\n validBidsI.setMetadataField(\"nonentity\", \"value\", strict=False)\n assert validBidsI.getMetadataField(\"nonentity\", strict=False) == \"value\"\n validBidsI.removeMetadataField(\"nonentity\", strict=False)\n\n # None field is invalid\n with pytest.raises(ValueError):\n validBidsI.setMetadataField(None, \"test\")\n\n entityName = \"subject\"\n newValue = \"newValue\"\n originalValue = validBidsI.getMetadataField(entityName)\n\n validBidsI.setMetadataField(entityName, newValue)\n assert validBidsI.getMetadataField(entityName) == newValue\n\n validBidsI.setMetadataField(entityName, originalValue)\n assert validBidsI.getMetadataField(entityName) == originalValue\n\n\n# Test removing BIDS-I metadata API works as expected\ndef testRemoveMetadata(validBidsI):\n # Fail for entities that don't exist\n with pytest.raises(ValueError):\n validBidsI.removeMetadataField(\"nonentity\", strict=True)\n\n # Fail for entities that are required to be in the dictionary\n with pytest.raises(RuntimeError):\n validBidsI.removeMetadataField(\"subject\")\n\n entityName = \"ProtocolName\"\n originalValue = validBidsI.getMetadataField(entityName)\n\n validBidsI.removeMetadataField(entityName)\n with pytest.raises(KeyError):\n validBidsI.getMetadataField(entityName) is None\n\n validBidsI.setMetadataField(entityName, originalValue)\n assert validBidsI.getMetadataField(entityName) == originalValue\n\n\n# Test that the BIDS-I interface methods for extracting internal NIfTI data\n# return the correct values\ndef testQueryNifti(validBidsI):\n # Image data\n queriedData = validBidsI.getImageData()\n exactData = getNiftiData(validBidsI.image)\n assert np.array_equal(queriedData, exactData), \"{} elements not equal\" \\\n .format(np.sum(np.where(queriedData != exactData)))\n\n # Header Data\n queriedHeader = validBidsI.getImageHeader()\n exactHeader = validBidsI.image.header\n\n # Compare full image header\n assert queriedHeader.keys() == exactHeader.keys()\n for (field, queryValue) in queriedHeader.items():\n exactValue = exactHeader.get(field)\n if queryValue.dtype.char == 'S':\n assert queryValue == exactValue\n else:\n assert np.allclose(queryValue, exactValue, atol=0.0, equal_nan=True)\n\n # Compare Header field: Dimensions\n FIELD = \"dim\"\n assert np.array_equal(queriedHeader.get(FIELD), exactHeader.get(FIELD))\n\n\n# Test that constructing BIDS-compatible filenames from internal metadata\n# returns the correct filenames\ndef testFilenameConstruction(validBidsI, imageMetadata):\n \"\"\"\n General format:\n sub-<label>[_ses-<label>]_task-<label>[_acq-<label>] [_ce-<label>]\n [_dir-<label>][_rec-<label>][_run-<index>]\n [_echo-<index>]_<contrast_label >.ext\n \"\"\"\n baseFilename = bids_build_path(imageMetadata, BIDS_FILE_PATTERN)\n\n assert baseFilename + \".nii\" == \\\n validBidsI.makeBidsFileName(BidsFileExtension.IMAGE)\n assert baseFilename + \".json\" == \\\n validBidsI.makeBidsFileName(BidsFileExtension.METADATA)\n\n\n# Test that the hypothetical path for the BIDS-I if it were in an archive is\n# correct based on the metadata within it\ndef testArchivePathConstruction(validBidsI, imageMetadata):\n assert validBidsI.getDataDirPath() == \\\n bids_build_path(imageMetadata, BIDS_DIR_PATH_PATTERN)\n\n\n# Test that writing the BIDS-I to disk returns a properly formatted BIDS archive\n# in the correct location with all the data in the BIDS-I\ndef testDiskOutput(validBidsI, tmpdir):\n # Write the archive\n datasetRoot = os.path.join(tmpdir, \"bids-pytest-dataset\")\n validBidsI.writeToDisk(datasetRoot)\n\n # Validate the output can be opened by BidsArchive and verified against the\n # source BIDS-Incremental\n archive = BidsArchive(datasetRoot)\n archiveImage = archive.getImages()[0]\n\n # Remove pseudo entities to avoid conflict with the validBidsI\n metadata = archive.getSidecarMetadata(archiveImage, includeEntities=True)\n for entity in PYBIDS_PSEUDO_ENTITIES:\n metadata.pop(entity)\n\n incrementalFromArchive = BidsIncremental(archiveImage, metadata)\n assert incrementalFromArchive == validBidsI\n\n assert isValidBidsArchive(archive.rootPath)\n\n # Try only writing data\n datasetRoot = os.path.join(tmpdir, \"bids-pytest-dataset-2\")\n validBidsI.writeToDisk(datasetRoot, onlyData=True)\n assert not os.path.exists(os.path.join(datasetRoot, \"README\"))\n assert not os.path.exists(os.path.join(datasetRoot,\n \"dataset_description.json\"))\n\n\n# Test serialization results in equivalent BIDS-I object\ndef testSerialization(validBidsI, sample4DNifti1, imageMetadata, tmpdir):\n # Copy the NIfTI source image to a different location\n sourceFileName = 'test.nii'\n sourceFilePath = os.path.join(tmpdir, sourceFileName)\n nib.save(sample4DNifti1, sourceFilePath)\n\n sourceNifti = nib.load(sourceFilePath)\n incremental = BidsIncremental(sourceNifti, imageMetadata)\n\n # validBidsI is derived from a file elsewhere on disk, so we can use it as a\n # reference once the file 'incremental' is derived from is removed\n # Transitive property gives us:\n # IF incremental == validBidsI AND validBidsI == deserialized\n # THEN incremental == deserialized\n assert incremental == validBidsI\n\n # Serialize the object\n serialized = pickle.dumps(incremental)\n del incremental\n\n # Now remove image file so the deserialized object can't access it\n os.remove(sourceFilePath)\n\n # Deserialize the object\n deserialized = pickle.loads(serialized)\n\n # Compare equality\n assert validBidsI == deserialized\n\n # Check there's no file mapping\n assert deserialized.image.file_map['image'].filename is None\n"
] |
[
[
"numpy.array_equal",
"numpy.where",
"numpy.allclose",
"pandas.DataFrame"
]
] |
anonymouscodeeee/MBRL4FIN
|
[
"f7608b54e1a21be5f9e37ab9b249e825b872b35f"
] |
[
"dynamic_model.py"
] |
[
"import torch\nimport torch.nn as nn\nimport math\nimport copy\nfrom typing import Tuple, Optional, List, Callable\nimport abc\nimport random\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pathlib\nimport numpy as np\n\nimport sys, os\n\nos.environ[\"PATH\"] = os.environ[\"PATH\"] + \":\" + os.path.realpath(\"./third_party/mbrl\")\n\nfrom mbrl.models import Model\n\n# 30只股票一起放进去 \nclass ModelPredict(Model):\n def __init__(self, teacher_ratio=0.2, ensemble_nums=5, update_window=30):\n super().__init__()\n self.input_dim = 30\n self.ensemble_nums = ensemble_nums\n self.update_window = update_window\n self.register_buffer(\"indexes\", torch.zeros(self.input_dim, dtype=torch.long))\n # self.teacher_ratio = teacher_ratio\n lstm_func = lambda: nn.LSTM(\n input_size=2,\n hidden_size=32,\n num_layers=3,\n batch_first=True,\n dropout=0.2,\n )\n mlp_func = lambda: nn.Sequential(\n nn.Linear(32, 32),\n nn.ReLU(True),\n nn.Dropout(0.2),\n nn.Linear(32, 1),\n )\n self.lstms = nn.ModuleList()\n self.mlps = nn.ModuleList()\n for _ in range(ensemble_nums):\n self.lstms.append(lstm_func())\n self.mlps.append(mlp_func())\n\n self.loss_func = nn.MSELoss(reduction=\"none\")\n \n #加入(x_n+1 - x_n)为特征,进行forward\n def forward(\n self,\n history_price_seq: torch.tensor,\n actions,#每日单只股票的操作序列\n vol,#每日单只股票交易量\n ):\n history_price_seq = history_price_seq[\n :, max(0, history_price_seq.shape[1] - self.update_window) :\n ]\n #delta_price_seq <--> y_n = (x_n+1 - x_n)\n delta_price_seq = history_price_seq[:,1:] - history_price_seq[:,:-1]\n pad = torch.zeros((self.stock_dim,1),device = history_price_seq.device)\n delta_price_seq = torch.cat([pad,delta_price_seq],dim = 1)\n\n #加入log(action)/log(vol),表示每一操作对价格的影响,进而影响s_n+1\n action_inful = actions.log()/vol.log()\n\n #lstm input = (history_price_seq, delta_price_seq)\n lstm_input_seq = torch.cat([history_price_seq.unsqueeze(2),delta_price_seq.unsqueeze(2)],dim = 2)\n\n prices = torch.zeros(\n (history_price_seq.shape[0], self.ensemble_nums),\n device=history_price_seq.device,\n )\n \n for i, (lstm, mlp) in enumerate(zip(self.lstms, self.mlps)):\n _, (h, _) = lstm(lstm_input_seq)\n mlp_input = torch.cat([h[-1],action_inful.unsqueeze(1)],dim = 1)\n prices[:, i] = mlp(mlp_input).squeeze()\n return prices\n\n def loss(\n self,\n model_input: torch.tensor,\n target: Optional[torch.Tensor] = None,\n ):\n prices = self.forward(model_input, None)\n mse = self.loss_func(prices, target.unsqueeze(1).repeat(1, self.ensemble_nums))\n return prices, mse\n\n @torch.no_grad()\n def eval_score(\n self, model_input: torch.tensor, target: Optional[torch.Tensor] = None\n ):\n \"\"\"\n return:最大误差,最大误差百分比,最小误差,最小误差百分比,平均误差百分比,mse误差\n return(\n max((predict_price)-true_price)/true_price %),\n min((predict_price)-true_price)/true_price %),\n mean((predict_price)-true_price)/true_price %)\n \"\"\"\n next_price = self.inference(model_input)\n error = abs((next_price.squeeze() - target) / target)\n max_error_percent, _ = error.topk(1)\n error = -1 * error\n min_error_percent, _ = error.topk(1)\n mean_error_percent = -1 * error.mean()\n error_mse = self.loss_func(next_price.squeeze(), target)\n return (\n next_price,\n max_error_percent,\n -1 * min_error_percent,\n mean_error_percent,\n error_mse.mean(),\n )\n\n def update_indexes(self, indexes):\n self.indexes = indexes\n\n def inference(self, history_price_seq: torch.tensor, actions, vol):\n history_price_seq = history_price_seq[:, max(0, history_price_seq.shape[1] - self.update_window) :]\n\n delta_price_seq = history_price_seq[:,1:] - history_price_seq[:,:-1]\n pad = torch.zeros((self.stock_dim,1),device = history_price_seq.device)\n delta_price_seq = torch.cat([pad,delta_price_seq],dim = 1)\n\n action_inful = actions.log()/vol.log()\n\n lstm_input_seq = torch.cat([history_price_seq.unsqueeze(2),delta_price_seq.unsqueeze(2)],dim = 2)\n \n output_tensor = torch.zeros((self.input_dim, 1), device=history_price_seq.device)\n \"\"\"按照lstm_i(ensembel_number)对股票进行分块,将同一模型处理的股票同时计算\"\"\"\n index_arg = self.indexes.argsort() # argsort()返回 索引:处理对象 从小到大\n position = 0\n for i in range(self.ensemble_nums):\n num_i = (self.indexes == i).sum()\n if num_i > 0:\n indexes_i = index_arg[position : num_i + position]\n position += num_i\n lstm_input_seq_i = lstm_input_seq[indexes_i]\n _, (h, _) = self.lstms[i](lstm_input_seq_i)\n action_influ_i = action_inful.unsqueeze(1)[indexes_i]\n mlp_input_i = torch.cat([h[-1], action_influ_i],dim = 1)\n output_tensor[indexes_i] = self.mlps[i](mlp_input_i)\n return output_tensor\n\n\ndef train(\n model: ModelPredict,\n dataset_train,\n dataset_val,\n actions,\n trade_vol,\n num_epochs,\n device,\n teacher_forcing = 0.2,\n path: pathlib.Path = \"\",\n callback_train: Callable = None,\n callback_val: Callable = None,\n):\n epoch_iter = range(num_epochs)\n\n optim_lr = 0.001\n optimizer = torch.optim.Adam(model.parameters(), lr=optim_lr)\n\n model.to(device)\n for epoch in epoch_iter:\n print(f\"epoch ======================={epoch}==========================\")\n mses = torch.zeros((model.input_dim, model.ensemble_nums), device=device)\n predict_price: List = []\n for day in range(1, dataset_train.shape[1]): # dataset_train.shape[1]\n input = dataset_train[:, :day]\n # 加入teacher forcing\n if day > 1 and random.random() < teacher_forcing:\n input[:, -1] = predict_price[-1].squeeze()\n target = dataset_train[:, day]\n input, target = input.to(device), target.to(device)\n model.train()\n optimizer.zero_grad()\n # 找到30只股票的历史mse之和最小的为其预测index\n prices, mse = model.loss(input, target, actions, trade_vol)\n mses = mses + mse\n indexes = mses.argmin(-1)\n model.update_indexes(indexes)\n # 索引出预测价格\n price = prices.gather(1, indexes.unsqueeze(1))\n predict_price.append(price.clone().detach())\n mse.mean().backward()\n optimizer.step()\n print(mse.detach().mean().item())\n if callback_train is not None:\n callback_train(epoch, day, mse.detach().cpu().mean().numpy())\n model.save(path)\n\n res = torch.zeros((dataset_val.shape[1], 4))\n next_prices = torch.zeros((dataset_val.shape[0], dataset_val.shape[1]))\n for day in range(0, dataset_val.shape[1]):\n input = torch.cat([dataset_train,dataset_val[:, :day]], dim = 1)\n target = dataset_val[:, day]\n input, target = input.to(device), target.to(device)\n (\n next_prices[:, day:day+1],\n res[day, 0],\n res[day, 1],\n res[day, 2],\n res[day, 3],\n ) = model.eval_score(input, target, actions, )\n val_loss_set = res[day].detach().cpu().numpy()\n if callback_val is not None:\n callback_val(epoch, day, next_prices[:, day].detach().cpu().numpy(), val_loss_set)\n print(\n f\"max_error_percent = {res[:,0].max()}, min_error_percent = {res[:,1].min()}, mean_error_percent = {res[:,2].mean()}, error_mse = {res[:,3].mean()}\"\n )\n \n\n\n\n# train_loss val_loss plot\n\n# 5 个LSTM同时跑?\n# RL daily update price 不更新参数 ;p_n+1 = model(p_n); 30个(s_n,a,s_n+1,r),buffer_update:当新放入30个四元组,就讲原有的最旧的30个四元组排除; 4000(一只股票 rolling 4000 daily)*30 = buffer_size\n# If epoch/30 ==0,train a ensemble lstm ,每只股票给一个ensemble lstm的index,同时五个model save;dynamic_model 30 update\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.zeros",
"torch.nn.LSTM",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.MSELoss"
]
] |
tomwhite/covid-19-uk-data
|
[
"ca22568cea5a67863fe513eb6ad27aaf9ad58f48"
] |
[
"tools/crawl_all.py"
] |
[
"#!/usr/bin/env python\n\n# Crawls all the historical data in one go. This ensures that revisions of old data are accounted for.\n\nimport dateparser\nimport datetime\nimport json\nimport math\nimport numpy as np\nimport os\nimport pandas as pd\nimport requests\nimport sqlite3\nimport sys\nimport xmltodict\n\nfrom crawl import get_json_url\nfrom parsers import (\n parse_daily_areas_json,\n save_daily_areas,\n save_daily_areas_to_sqlite,\n)\nfrom util import format_country, la_to_hb, lookup_health_board_code, lookup_local_government_district_code, read_json, read_json_post\n\ndef save_indicator_to_sqlite(date, country, indicator, value):\n with sqlite3.connect('data/covid-19-uk.db') as conn:\n c = conn.cursor()\n c.execute(f\"INSERT OR REPLACE INTO indicators VALUES ('{date}', '{country}', '{indicator}', {value})\")\n\ndef save_indicators_df_to_sqlite(df, country, indicator):\n with sqlite3.connect('data/covid-19-uk.db') as conn:\n c = conn.cursor()\n # Clear out old data?\n #c.execute(f\"DELETE FROM indicators WHERE Country = '{country}' AND Indicator = '{indicator}'\")\n for index, row in df.iterrows():\n date = row[\"Date\"]\n value = row[indicator]\n c.execute(f\"INSERT OR REPLACE INTO indicators VALUES ('{date}', '{country}', '{indicator}', {value})\")\n\ndef save_cases_df_to_sqlite(df, country, delete_old=True):\n with sqlite3.connect('data/covid-19-uk.db') as conn:\n c = conn.cursor()\n if delete_old:\n c.execute(f\"DELETE FROM cases WHERE Country = '{country}'\")\n for index, row in df.iterrows():\n date = row[\"Date\"]\n area_code = row[\"AreaCode\"]\n area = row[\"Area\"]\n value = row[\"TotalCases\"]\n c.execute(f\"INSERT OR REPLACE INTO cases VALUES ('{date}', '{country}', '{area_code}', '{area}', '{value}')\")\n\n# Use Our World In Data for some UK stats that are not included in PHE's CSV or JSON files\n# UK historical test numbers (people tested)\ndef crawl_owid(use_local=False):\n if use_local:\n file = \"data/raw/owid/covid-testing-all-observations.csv\"\n else:\n file = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv\"\n df = pd.read_csv(file)\n df = df[df[\"Entity\"] == \"United Kingdom - people tested\"]\n df = df[[\"Date\", \"Cumulative total\"]]\n df.rename(columns={\"Cumulative total\": \"Tests\"}, inplace=True)\n save_indicators_df_to_sqlite(df, \"UK\", \"Tests\")\n\n # Not used due to data mismatch\n # UK historical confirmed cases\n # if use_local:\n # file = \"data/raw/owid/total_cases.csv\"\n # else:\n # file = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/total_cases.csv\"\n # df = pd.read_csv(file)\n # df = df[[\"date\", \"United Kingdom\"]]\n # df.rename(columns={\"date\": \"Date\", \"United Kingdom\": \"ConfirmedCases\"}, inplace=True)\n # save_indicators_df_to_sqlite(df, \"UK\", \"ConfirmedCases\")\n\n\n# UK (and all nations) historical deaths\n# England historical confirmed cases\n# England UTLAs historical confirmed cases\ndef crawl_phe(use_local=False):\n if use_local:\n file = \"data/raw/phe/data_latest.json\"\n else:\n file = \"https://coronavirus.data.gov.uk/downloads/data/data_latest.json\"\n\n json_data = read_json(file)\n\n def total_deaths_df(country_code, country):\n if country_code == \"K02000001\":\n cases = json_data[\"overview\"][country_code][\"dailyTotalDeaths\"]\n else:\n cases = json_data[\"countries\"][country_code][\"dailyTotalDeaths\"]\n cases = {elt[\"date\"]: [elt[\"value\"]] for elt in cases}\n df = pd.DataFrame.from_dict(cases, orient='index', columns=[country])\n df[\"Date\"] = df.index\n df.rename(columns={country: \"Deaths\"}, inplace=True)\n return df\n\n df = total_deaths_df(\"K02000001\", \"United Kingdom\")\n save_indicators_df_to_sqlite(df, \"UK\", \"Deaths\")\n\n df = total_deaths_df(\"E92000001\", \"England\")\n save_indicators_df_to_sqlite(df, \"England\", \"Deaths\")\n\n # Found from PHS data instead\n # df = total_deaths_df(\"S92000003\", \"Scotland\")\n # save_indicators_df_to_sqlite(df, \"Scotland\", \"Deaths\")\n\n # TBD\n # df = total_deaths_df(\"W92000004\", \"Wales\")\n # save_indicators_df_to_sqlite(df, \"Wales\", \"Deaths\")\n\n # TBD\n # df = total_deaths_df(\"N92000002\", \"Northern Ireland\")\n # save_indicators_df_to_sqlite(df, \"Northern Ireland\", \"Deaths\")\n\n # Get UK ConfirmedCases, but only latest value since historical data is not available\n last_updated = json_data[\"lastUpdatedAt\"]\n last_updated_date = last_updated.split(\"T\")[0]\n uk_cases = json_data[\"overview\"][\"K02000001\"][\"totalCases\"][\"value\"]\n save_indicator_to_sqlite(last_updated_date, \"UK\", \"ConfirmedCases\", uk_cases)\n\n def total_confirmed_cases_country_df(country_code, country):\n cases = json_data[\"countries\"][country_code][\"dailyTotalConfirmedCases\"]\n cases = {elt[\"date\"]: [elt[\"value\"]] for elt in cases}\n df = pd.DataFrame.from_dict(cases, orient='index', columns=[country])\n df[\"Date\"] = df.index\n df.rename(columns={country: \"ConfirmedCases\"}, inplace=True)\n return df\n\n df = total_confirmed_cases_country_df(\"E92000001\", \"England\")\n save_indicators_df_to_sqlite(df, \"England\", \"ConfirmedCases\")\n\n def total_confirmed_cases_utla_df(utla_code, utla):\n cases = json_data[\"utlas\"][utla_code][\"dailyTotalConfirmedCases\"]\n cases = {elt[\"date\"]: [elt[\"value\"]] for elt in cases}\n df = pd.DataFrame.from_dict(cases, orient='index', columns=[\"TotalCases\"])\n df[\"Date\"] = df.index\n df[\"AreaCode\"] = utla_code\n df[\"Area\"] = utla\n df[\"Country\"] = \"England\"\n df = df[[\"Date\", \"Country\", \"AreaCode\", \"Area\", \"TotalCases\"]]\n return df\n\n all_cases_dfs = []\n for utla_code in json_data[\"utlas\"].keys():\n cases = total_confirmed_cases_utla_df(utla_code, json_data[\"utlas\"][utla_code][\"name\"][\"value\"])\n all_cases_dfs.append(cases)\n area_cases = pd.concat(all_cases_dfs, ignore_index=True)\n save_cases_df_to_sqlite(area_cases, \"England\")\n\n\n# Scotland historical test numbers\n# Scotland historical confirmed cases\n# Scotland historical deaths\n# Scotland health board historical confirmed cases\ndef crawl_phs(use_local=False):\n if not use_local:\n urls = get_phs_xlsx_urls()\n\n if use_local:\n file = \"data/raw/phs/HSCA+-+SG+Website+-+Indicator+Trends+for+daily+data+publication.xlsx\"\n else:\n file = urls[\"totals\"]\n\n df = pd.read_excel(file, sheet_name=\"Table 5 - Testing\", skiprows=3)\n df.rename(columns={\"Unnamed: 0\": \"Date\"}, inplace=True)\n df[\"Date\"] = df[\"Date\"].apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)\n df = df[[\"Date\", \"Positive\", \"Total\"]]\n df.rename(columns={\"Total\": \"Tests\", \"Positive\": \"ConfirmedCases\"}, inplace=True)\n save_indicators_df_to_sqlite(df, \"Scotland\", \"Tests\")\n save_indicators_df_to_sqlite(df, \"Scotland\", \"ConfirmedCases\")\n\n df = pd.read_excel(file, sheet_name=\"Table 8 - Deaths\", skiprows=2)\n df.rename(columns={\"Number of COVID-19 confirmed deaths registered to date\": \"Deaths\"}, inplace=True)\n df[\"Date\"] = df[\"Date\"].apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)\n save_indicators_df_to_sqlite(df, \"Scotland\", \"Deaths\")\n\n if use_local:\n file = \"data/raw/phs/Board-level+figures+-+FOR+ONLINE+PUBLICATION.xlsx\"\n else:\n file = urls[\"areas\"]\n\n df = pd.read_excel(file, sheet_name=\"Table 1 - Cumulative cases\", skiprows=2)\n df[\"Date\"] = df[\"Date\"].apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)\n df = df.drop(columns=['Scotland'])\n df = df[[c for c in df.columns if not c.startswith('Unnamed')]]\n area_cases = df.melt(id_vars=[\"Date\"], var_name=\"Area\", value_name=\"TotalCases\")\n area_cases = area_cases.replace(\"*\", \"NaN\")\n area_cases[\"Area\"] = area_cases[\"Area\"].apply(lambda hb: hb.replace(\"NHS\", \"\").replace(\"&\", \"and\").strip())\n area_cases[\"AreaCode\"] = area_cases[\"Area\"].apply(lambda hb: lookup_health_board_code(hb))\n area_cases[\"Country\"] = \"Scotland\"\n area_cases = area_cases[[\"Date\", \"Country\", \"AreaCode\", \"Area\", \"TotalCases\"]]\n save_cases_df_to_sqlite(area_cases, \"Scotland\")\n\ndef get_phs_xlsx_urls():\n # URLs have dates embedded in them, so scrape them from HTML page\n from bs4 import BeautifulSoup\n from urllib.parse import urljoin\n url = \"https://www.gov.scot/publications/trends-in-number-of-people-in-hospital-with-confirmed-or-suspected-covid-19/\"\n html = requests.get(url).text\n soup = BeautifulSoup(html, features=\"html.parser\")\n urls = {}\n for link in soup.findAll(\"a\"):\n if link.get_text().startswith(\"Trends in daily COVID-19 data\"):\n urls[\"totals\"] = urljoin(url, link.get(\"href\"))\n elif link.get_text().startswith(\"COVID-19 data by NHS Board\"):\n urls[\"areas\"] = urljoin(url, link.get(\"href\"))\n return urls\n\n# Wales historical test numbers\n# Wales historical confirmed cases\n# Wales historical deaths\n# Wales health board historical confirmed cases\ndef crawl_phw(use_local=False):\n if use_local:\n file = \"data/raw/phw/Rapid COVID-19 surveillance data.xlsx\"\n else:\n file = \"http://www2.nphs.wales.nhs.uk:8080/CommunitySurveillanceDocs.nsf/3dc04669c9e1eaa880257062003b246b/77fdb9a33544aee88025855100300cab/$FILE/Rapid%20COVID-19%20surveillance%20data.xlsx\"\n\n df = pd.read_excel(file, sheet_name=\"Tests by specimen date\")\n df[\"Date\"] = df[\"Specimen date\"].apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)\n df.rename(columns={\"Cumulative testing episodes\": \"Tests\", \"Cumulative cases\": \"ConfirmedCases\"}, inplace=True)\n\n tests = df.groupby(\"Date\", as_index=False)[[\"Tests\"]].sum()\n cases = df.groupby(\"Date\", as_index=False)[[\"ConfirmedCases\"]].sum()\n\n save_indicators_df_to_sqlite(tests, \"Wales\", \"Tests\")\n save_indicators_df_to_sqlite(cases, \"Wales\", \"ConfirmedCases\")\n\n def lookup_hb(la):\n hb = la_to_hb(la)\n if hb is None:\n return la\n return hb\n\n df.rename(columns={\"ConfirmedCases\": \"TotalCases\"}, inplace=True)\n df[\"Area\"] = df[\"Local Authority\"].apply(lambda la: lookup_hb(la))\n area_cases = df.groupby([\"Date\", \"Area\"], as_index=False)[[\"TotalCases\"]].sum()\n area_cases[\"AreaCode\"] = area_cases[\"Area\"].apply(lambda hb: lookup_health_board_code(hb))\n area_cases[\"Country\"] = \"Wales\"\n area_cases = area_cases[[\"Date\", \"Country\", \"AreaCode\", \"Area\", \"TotalCases\"]]\n save_cases_df_to_sqlite(area_cases, \"Wales\")\n\n df = pd.read_excel(file, sheet_name=\"Deaths by date\")\n df[\"Date\"] = df[\"Date of death\"].apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)\n df.rename(columns={\"Cumulative deaths\": \"Deaths\"}, inplace=True)\n save_indicators_df_to_sqlite(df, \"Wales\", \"Deaths\")\n\n\ndef crawl_ni(use_local=False):\n headers = {\"X-PowerBI-ResourceKey\": \"df16636e-99fe-4801-a5a1-20466a39f7bf\"}\n\n request_json = read_json(\"data/raw/ni/request-cumulative-tests.json\")\n if use_local:\n file = \"data/raw/ni/response-cumulative-tests.json\"\n else:\n file = \"https://wabi-north-europe-api.analysis.windows.net/public/reports/querydata?synchronous=true\"\n\n json_data = read_json_post(file, headers, request_json)\n tests = json_data[\"results\"][0][\"result\"][\"data\"][\"dsr\"][\"DS\"][0][\"PH\"][0][\"DM0\"]\n tests = {datetime.datetime.fromtimestamp(elt[\"C\"][0] / 1000).strftime('%Y-%m-%d'): elt[\"C\"][1:] for elt in tests}\n df = pd.DataFrame.from_dict(tests, orient='index', columns=[\"Tests\", \"ConfirmedCases\"])\n df[\"Date\"] = df.index\n df = df.fillna(method=\"ffill\") # fill missing values from previous\n save_indicators_df_to_sqlite(df, \"Northern Ireland\", \"Tests\")\n save_indicators_df_to_sqlite(df, \"Northern Ireland\", \"ConfirmedCases\")\n\n request_json = read_json(\"data/raw/ni/request-cumulative-deaths.json\")\n if use_local:\n file = \"data/raw/ni/response-cumulative-deaths.json\"\n else:\n file = \"https://wabi-north-europe-api.analysis.windows.net/public/reports/querydata?synchronous=true\"\n\n json_data = read_json_post(file, headers, request_json)\n deaths = json_data[\"results\"][0][\"result\"][\"data\"][\"dsr\"][\"DS\"][0][\"PH\"][0][\"DM0\"]\n deaths_dict = {}\n for idx, elt in enumerate(deaths):\n date = datetime.datetime.fromtimestamp(elt[\"C\"][0] / 1000).strftime('%Y-%m-%d') \n if len(elt[\"C\"]) == 1 and elt.get(\"R\", None) == 2: # R means repeat?\n # use previous\n value = [deaths[idx - 1][\"C\"][1]]\n else:\n value = [elt[\"C\"][1]]\n deaths_dict[date] = value\n df = pd.DataFrame.from_dict(deaths_dict, orient='index', columns=[\"Deaths\"])\n df[\"Date\"] = df.index\n save_indicators_df_to_sqlite(df, \"Northern Ireland\", \"Deaths\")\n\n request_json = read_json(\"data/raw/ni/request-area-cases.json\")\n if use_local:\n file = \"data/raw/ni/response-area-cases.json\"\n else:\n file = \"https://wabi-north-europe-api.analysis.windows.net/public/reports/querydata?synchronous=true\"\n\n json_data = read_json_post(file, headers, request_json)\n area_cases = json_data[\"results\"][0][\"result\"][\"data\"][\"dsr\"][\"DS\"][0][\"PH\"][1][\"DM1\"]\n area_cases = {elt[\"C\"][0]: [elt[\"C\"][2]] for elt in area_cases}\n df = pd.DataFrame.from_dict(area_cases, orient='index', columns=[\"TotalCases\"])\n df[\"Area\"] = df.index\n df[\"AreaCode\"] = df[\"Area\"].apply(lambda lgd: lookup_local_government_district_code(lgd))\n df[\"Country\"] = \"Northern Ireland\"\n df[\"Date\"] = json_data[\"results\"][0][\"result\"][\"data\"][\"timestamp\"].split(\"T\")[0]\n df = df[[\"Date\", \"Country\", \"AreaCode\", \"Area\", \"TotalCases\"]]\n save_cases_df_to_sqlite(df, \"Northern Ireland\", delete_old=False)\n\n\nif __name__ == \"__main__\":\n #pd.set_option('display.max_rows', None)\n\n use_local = False\n\n if len(sys.argv) == 2:\n source = sys.argv[1]\n if source.lower() == \"owid\":\n crawl_owid(use_local)\n elif source.lower() == \"phe\":\n crawl_phe(use_local)\n elif source.lower() == \"phs\":\n crawl_phs(use_local)\n elif source.lower() == \"phw\":\n crawl_phw(use_local)\n elif source.lower() == \"ni\":\n crawl_ni(use_local)\n else:\n crawl_owid(use_local)\n crawl_phe(use_local)\n crawl_phs(use_local)\n crawl_phw(use_local)\n crawl_ni(use_local)\n"
] |
[
[
"pandas.read_excel",
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] |
sitaber/pyDM404
|
[
"63e84cc3b82c6b1910846e388954d67f3fb05ade"
] |
[
"app.py"
] |
[
"# -----------------------------------------------------------------------------\n# pyDM404 - A cross platform Drum Sequencer\n# Author: Scott Taber\n# File: app.py - Nuts and bolts of application\n# Version: 1.2\n# Contains all functions and classes for drawing to screen, getting user input\n# loading and saving files, starting and stoping clock process, etc.\n# -----------------------------------------------------------------------------\n\nimport os\nimport sys\nimport json\n\nimport numpy as np\nimport pygame\nfrom pygame.locals import *\n\nimport clock as mc\n\n# RECT func ------------------------------------------------------------ #\n# Functions for creating the pygame Rect objects used for the buttons and other screen elements\n\ndef make_func_rects(start=95):\n button_1 = pygame.Rect(start+15, 170, 50, 10)\n button_2 = pygame.Rect(start+125,170, 50, 10)\n button_3 = pygame.Rect(start+245, 170, 50, 10)\n button_4 = pygame.Rect(start+360,170, 50, 10)\n return [button_1, button_2, button_3, button_4]\n\ndef make_text_rect(text, font, color, x, y):\n textobj = font.render(text, 1, color)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n return [textobj, textrect]\n \ndef make_playrec_rects(start, font, screen):\n text_rects = []\n \n button_play = pygame.Rect(start+490,100,20,20)\n led_play = pygame.draw.circle(screen, (255,255,255), (start+480+20,140-10), 5) \n\n rects = make_text_rect(\"PLAY\", font, (255, 255, 255), start+485,140)\n text_rects.append(rects)\n\n button_record = pygame.Rect(start+530,100,20,20)\n led_record = pygame.draw.circle(screen, (255, 255, 255), (start+530+10,140-10), 5)\n \n rects = make_text_rect(\"REC\", font, (255,255,255), start+530,140)\n text_rects.append(rects)\n \n return [button_play, led_play, button_record, led_record], text_rects\n \ndef make_control_rects(start, font):\n buttons_down = []\n buttons_up = []\n text_rects = []\n buttons_text1 = []\n buttons_text2 = []\n for i, t in enumerate([\"SEQ\",\"BPM\",\"AC\",\"MET\"]):\n button_down = pygame.Rect(start+520,12+14*i, 10, 10)\n button_up = pygame.Rect(start+550,12+14*i, 10, 10)\n buttons_down.append(button_down)\n buttons_up.append(button_up)\n \n rects = make_text_rect(t, font, (255, 255, 255), start+480, 10+14*i)\n text_rects.append(rects)\n if t == \"MET\":\n rects = make_text_rect(\"OFF\", font, (255, 255, 255), start+515,10+18*i)\n buttons_text1.append(rects)\n \n rects = make_text_rect(\"ON\", font, (255, 255, 255), start+548, 10+18*i)\n buttons_text2.append(rects)\n else:\n rects = make_text_rect(\"-\", font, (0, 0, 0), start+522, 10+14*i)\n buttons_text1.append(rects)\n \n rects = make_text_rect(\"+\", font, (0, 0, 0), start+551, 10+14*i)\n buttons_text2.append(rects) \n \n return buttons_down, buttons_up, text_rects, buttons_text1, buttons_text2 \n\n\n \n# Sequencer Class ------------------------------------------------------- #\nclass Sequencer():\n auto_cor = [\"4\",\"8\",\"16\", \"16t\", \"HI-REZ\"]\n note_ppq = [np.array([0,24]), \n np.array([0,12,24]), \n np.array([0,6,12,18,24]),\n np.array([0,4,8,12,16,20,24]), \n np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,\n 21,22,23,24]) ]\n __transport__ = ['pulse', 'beat', 'bar', 'playing']\n \n recording = False\n delete_flag = False\n bpm = 90\n seq_num = 0\n tc = 0\n met_on = True\n \n def __init__(self, send_conn):\n self.send_conn = send_conn\n self.clock = None\n self.state = 0\n for val in self.__transport__:\n setattr(self, val, 0)\n \n self.sequences = [self.make_seq() for i in range(8)]\n self.current_seq = self.sequences[self.seq_num]\n self.queued_seq = None\n # Need two sequences, one for recording and one for play back\n self.seq_record = self.current_seq[0]\n self.seq_play = self.current_seq[1]\n \n def make_seq(self, length = 2 ):\n seq_record = np.zeros((8, 24*4*length), dtype='i,i') \n seq_play = seq_record.copy()\n return [seq_record, seq_play]\n \n def toggle(self):\n if self.state == 0: self.play()\n elif self.state == 1: self.stop() \n \n def play(self):\n self.state = 1\n self.clock = mc.ClockGen()\n self.clock.shared_bpm.value = self.bpm\n self.clock.launch_process(self.send_conn) # Explain how this works. \n \n def stop(self):\n self.clock.end_process()\n self.copy()\n self.state = 0\n self.queued_seq = None\n for val in self.__transport__:\n setattr(self, val, 0)\n\n def copy(self):\n self.seq_play = self.seq_record.copy() \n \n def set_bpm(self, val):\n self.bpm += val\n if self.clock:\n self.clock.shared_bpm.value = self.bpm\n \n def set_tc(self, val):\n self.tc = max(0, min(4, self.tc+val))\n \n def change_seq(self, val):\n if self.state == 1:\n if self.queued_seq is not None:\n self.queued_seq = max(0, min(7, self.queued_seq+val))\n else: self.queued_seq = max(0, min(7, self.seq_num+val))\n\n elif (self.recording == False or self.state == 0):\n self.seq_num = max(0, min(7, self.seq_num+val))\n self.current_seq = self.sequences[self.seq_num]\n self.seq_record = self.current_seq[0]\n self.seq_play = self.current_seq[1]\n self.copy()\n \n def step_end(self): \n # Call At end of main function\n self.pulse += 1\n if self.pulse == self.seq_play.shape[1]: self.pulse = 0\n if self.pulse % 24 == 0: self.beat += 1\n if self.beat == 4:\n self.beat = 0\n self.bar += 1\n if self.bar == 2:\n self.bar = 0 \n self.copy() \n if self.queued_seq is not None:\n self.current_seq = self.sequences[self.queued_seq]\n self.seq_num = self.queued_seq \n self.queued_seq = None\n self.seq_record = self.current_seq[0]\n self.seq_play = self.current_seq[1]\n self.copy()\n\n def ekey_to_idx(self, ekey):\n pad_keys = [K_a,K_s,K_d,K_f,K_g,K_h,K_j,K_k]\n pad_idx = pad_keys.index(ekey) \n return pad_idx\n \n def record(self, ekey, pads):\n pad = self.ekey_to_idx(ekey)\n tick = self.auto_correct()\n self.seq_record[pad, tick][0] = 1\n # Here is the pitch setting, just need to add something for velocity\n self.seq_record[pad, tick][1] = pads[pad].pitch \n \n def delete(self, ekey, whole = False):\n pad = self.ekey_to_idx(ekey)\n self.seq_record[pad, self.pulse] = 0\n if whole:\n self.seq_record[pad, :] = 0\n\n def to_play(self, ekey):\n pad = self.ekey_to_idx(ekey)\n val = self.seq_play[pad, self.pulse][0] \n pitch = self.seq_play[pad, self.pulse][1] \n return val, pitch\n \n # AUTOCORRECT FUNCTION ------------------------------------- ###########\n def auto_correct(self):\n scaled_ac = self.note_ppq[self.tc] + 24*self.beat+96*self.bar\n correct_to_idx = np.argmin( np.abs(self.pulse-scaled_ac) ) \n correct_to = scaled_ac[correct_to_idx]\n if correct_to == self.seq_play.shape[1]:\n correct_to = 0\n return correct_to\n\n # STEP SEQ --------------------------------------------------------- #\n def step_edit(self, chan, pulse, record, pads):\n if record:\n self.seq_record[chan, pulse][0] = 1\n self.seq_record[chan, pulse][1] = pads[chan].pitch\n if not record:\n self.seq_record[chan, pulse][0] = 0\n self.copy()\n \n return\n# Setup -------------------------------------------------- # \ndef build_chan():\n BASE = pygame.Surface((40,210)) \n notch_y = [200-(y*8) for y in range(0,25)]\n for y in notch_y:\n pygame.draw.line(BASE, (80,80,80), (0, y), (5,y), 2)\n pygame.draw.line(BASE, (255,255,255), (19,0), (19,210), 1)\n return BASE\n\n# LOAD functions ------------------------------------------------------- #\ndef load_disk(path, pads, snd_files):\n sounds = [load_snd(x, path) for x in snd_files]\n pads = load_config(path, pads, snd_files, sounds)\n seqs = load_seq(path)\n return pads, sounds, seqs\n \ndef load_config(path, pads, snd_files, sounds):\n attrs = [\"sound_file\",\"pitch\",\"channel_out\",\"volume\"]\n \n with open(path[:-8]+'config.json', 'r') as f:\n params = json.load(f) \n \n for i,pad in enumerate(pads):\n for attr in attrs:\n setattr(pad, attr, params[i][attr])\n snd_file = getattr(pad,\"sound_file\")\n if snd_file is not None:\n idx = snd_files.index(snd_file)\n pad.sound = sounds[idx]\n else: pad.sound = None\n return pads \n \ndef load_snd(to_load, path):\n snd = pygame.mixer.Sound(path+to_load)\n snd_array = pygame.sndarray.array(snd)\n return pitch_it(snd_array)\n\ndef pitch_it(data):\n pitch_snd = []\n for n in np.arange(1,13): \n idx = np.arange(0, data.shape[0] * 2**(n/12) ) * 2**(-n/12)\n idx_floor = np.floor(idx).astype(\"int\")\n new_data = data[ idx_floor[idx_floor < data.shape[0]] ]\n pitch_snd.append( pygame.sndarray.make_sound(new_data) )\n \n pitch_snd.reverse()\n pitch_snd.append(pygame.sndarray.make_sound(data))\n for n in np.arange(1,13): \n idx = np.arange(0, data.shape[0]) * 2**(n/12)\n idx_floor = np.floor(idx).astype(\"int\")\n new_data = data[ idx_floor[ idx_floor < data.shape[0] ] ]\n pitch_snd.append( pygame.sndarray.make_sound(new_data) ) \n \n return pitch_snd \n\ndef load_seq(path):\n #path = DISKS/{disk}/\n seqs = []\n for i in range(8):\n temp = np.load(path[:-8]+f\"seq0{i}.npy\")\n seqs.append([temp.copy(), temp.copy()]) \n return seqs \n\n# SAVE functions ------------------------------------------------------ #\ndef save_disk(path, pads, seqs):\n config = make_config(pads)\n save_config(path, config)\n save_seq(path, seqs)\n \ndef make_config(pads):\n params = {\"sound_file\": None, \"pitch\": 12, \"channel_out\": 1, \"volume\": 1.0}\n attrs = [\"sound_file\", \"pitch\", \"channel_out\", \"volume\"]\n configs = []\n for pad in pads:\n for attr in attrs:\n params[attr] = getattr(pad,attr)\n configs.append(params.copy())\n return configs\n \ndef save_config(path, config):\n with open(path+'config.json', 'w') as f:\n json.dump(config, f)\n\ndef save_seq(path, seqs):\n #path = DISKS/{disk}/\n #print(len(seqs))\n for i in range(8):\n to_save = seqs[i][0]\n np.save(path+f\"seq0{i}.npy\", to_save) \n\n# PAD class ----------------------------------------------------------- # \nclass Pad():\n def __init__(self, mixer):\n self.sound_file = None\n self.sound = None\n self.pitch = 12\n self.channel_out = 1\n self.volume = 24\n self.mixer = mixer\n\n# Main Class ---------------------------------------------------------- #\nclass DrumMachine():\n FLAGS = 0\n WIDTH = 800\n HEIGHT = 600\n FPS = 30\n WHITE = (255,255,255)\n GREY = (80,80,80)\n RED = (255,0,0)\n GREEN = (0,255,0)\n \n def __init__(self,receive_conn, send_conn):\n # Pygame inits ------- #\n pygame.mixer.pre_init(44100, -16, 2, 512)\n pygame.init()\n #pygame.midi.init()\n pygame.font.init()\n #self.midi_in = pygame.midi.Input(1) \n self.receive_conn = receive_conn\n self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT), self.FLAGS)\n self.mainClock = pygame.time.Clock()\n pygame.display.set_caption(\"pyDM404\")\n self.font = pygame.font.Font('assets/Kenney Future Narrow.ttf', 12)#pygame.font.SysFont(None, 20)#\n \n # Class vars ---------- #\n self.sequencer = Sequencer(send_conn)\n self.mixer_chan = build_chan()\n self.DISPLAY = pygame.Surface((460,165)) \n self.knob_notch_locs = [200-(y*8) for y in range(0,25)]\n self.pads = [Pad(self.mixer_chan) for i in range(8)]\n self.knobs = [] \n self.snd_files = []\n self.sounds = []\n self.met = pygame.mixer.Sound('assets/metronome.wav')\n self.met.set_volume(0.4) \n self.fill_rect =[]\n self.step_rect = []\n self.step_rect2 = []\n self.fill_rect2 = []\n \n def play(self,ekey, pitch = None):\n pad_keys = [K_a,K_s,K_d,K_f,K_g,K_h,K_j,K_k]\n pad_idx = pad_keys.index(ekey)\n if self.pads[pad_idx].sound is not None:\n if pitch is not None:\n pitch = pitch\n else:\n pitch = self.pads[pad_idx].pitch\n to_play = self.pads[pad_idx].sound[pitch]\n chan = self.pads[pad_idx].channel_out\n to_play.set_volume(0.04*self.pads[pad_idx].volume)\n pygame.mixer.Channel(chan).play(to_play)\n \n def draw_mixer(self,volume):\n start = 95 #0\n sub_mixer = pygame.Surface((40,210)) \n MIXER = pygame.Surface((460,210)) \n self.knobs = [] \n for x in range(8):\n if volume:\n level = self.pads[x].volume\n else: \n level = self.pads[x].pitch\n knob = pygame.Rect( (start+x*60, self.knob_notch_locs[level]-3+260, 40, 8) )\n self.knobs.append(knob)\n self.screen.blit(self.pads[x].mixer, (start+x*60,260))\n pygame.draw.rect(self.screen, (255,255,255), knob, 0) \n \n def draw_text(self,text, font, color, surface, x, y):\n textobj = font.render(text, 1, color)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n\n def _draw_main(self, display_step):\n self.DISPLAY.fill((0,255,0)) \n if self.sequencer.queued_seq is not None:\n seq = str(self.sequencer.queued_seq) + '*'\n else: seq = self.sequencer.seq_num\n bpm = self.sequencer.bpm\n c = [self.sequencer.bar+1,self.sequencer.beat+1,self.sequencer.pulse]\n c[2] = self.sequencer.pulse - 24 * self.sequencer.beat - 96 * self.sequencer.bar\n ac = self.sequencer.auto_cor[self.sequencer.tc]\n DISPLAY_TEXT = [f\"SEQ: 0{seq}\",f\"BPM: {bpm}\", f\"COUNT: 0{c[0]}.0{c[1]}.{c[2]}\", f\"AC: {ac}\"]\n \n for i in [0,2]:\n t = self.font.render(DISPLAY_TEXT[i], True, (0, 0, 0))\n self.DISPLAY.blit(t, (5,10+i*10)) \n t = self.font.render(DISPLAY_TEXT[i+1], True, (0, 0, 0))\n self.DISPLAY.blit(t, (320,10+i*10)) \n \n if display_step:\n lower_menu = [\"<EXIT>\", \"<BAR->\", \"<BAR+>\", \"<>\"]\n else:\n lower_menu = [\"<LOAD>\", \"<ASSN SND>\", \"<STEPEDIT>\", \"<SAVE>\"]\n for x, b in enumerate(lower_menu):\n func_text = self.font.render(b, True, (0,0,0))\n self.DISPLAY.blit(func_text, (15+115*x,150)) \n \n self.screen.blit(self.DISPLAY, (95,0))\n return\n \n # DRAW STEP -------------------------------------------------- #\n def draw_step(self, step_bar_val):\n #STEPDISPLAY = pygame.Surface((460,165)) \n y1 = 75\n y2 = 145\n \n if self.sequencer.pulse < 96 and step_bar_val == 0:\n x1 = 30+self.sequencer.pulse*4\n pygame.draw.lines(self.DISPLAY, self.RED, False, [(x1,y1),(x1,y2)])\n elif self.sequencer.pulse > 96 and step_bar_val == 1:\n x1 = 30+(self.sequencer.pulse-96)*4\n pygame.draw.lines(self.DISPLAY, self.RED, False, [(x1,y1),(x1,y2)])\n \n for rect in self.fill_rect:\n pygame.draw.rect(self.DISPLAY, self.RED, rect)\n pygame.draw.rect(self.DISPLAY, self.GREEN, rect, width=1)\n for rect in self.step_rect:\n pygame.draw.rect(self.DISPLAY, self.WHITE, rect, width=1)\n \n func_text = self.font.render(\"0\"+str(step_bar_val+1)+\".\", True, (0,0,0))\n self.DISPLAY.blit(func_text, (0,53)) \n for i, pad_alpha in enumerate(['A','S','D','F','G','H','J','K']):\n self.draw_text(pad_alpha, self.font, (0, 0, 0), self.DISPLAY, 20, 63+10*i)\n \n for x, b in enumerate([\"1\", \"2\", \"3\", \"4\"]):\n func_text = self.font.render(b, True, (0,0,0))\n self.DISPLAY.blit(func_text, (26+(384/4)*x,53)) \n \n for x, b in enumerate([\"|\", \"|\", \"|\", \"|\"]):\n func_text = self.font.render(b, True, (0,0,0))\n self.DISPLAY.blit(func_text, (76+(384/4)*x,53)) \n \n self.screen.blit(self.DISPLAY, (95,0)) # DISPLAY IS LCD AREA - USE COOR SHIFT\n return\n \n def make_step_rects(self, step_bar_val):\n bar = step_bar_val\n self.step_rect = []\n self.fill_rect = []\n self.step_rect2 = []\n self.fill_rect2 = []\n steps = [4,8,16,24,32]\n length = steps[self.sequencer.tc]\n for i in range(8):\n for j in range(length):\n self.step_rect.append(pygame.Rect(30+(384/length)*j,65+10*i, 384/length, 10)) # DISPLAY COORDS\n self.step_rect2.append(pygame.Rect(95+30+(384/length)*j,65+10*i, 384/length, 10)) # SHIFT MAIN SCREEN\n for i in range(8):\n for j in range(96*bar,96*(bar+1)): #range(self.sequencer.seq_play.shape[1]-1): # \n val = self.sequencer.seq_record[i, j][0] \n if val:\n if bar == 0:\n self.fill_rect.append(pygame.Rect(30+4*j, 65+10*i, 384/32, 10))\n self.fill_rect2.append(pygame.Rect(95+30+4*j, 65+10*i, 384/32, 10))\n elif bar == 1:\n self.fill_rect.append(pygame.Rect(30+4*(j-96), 65+10*i, 384/32, 10))\n self.fill_rect2.append(pygame.Rect(95+30+4*(j-96), 65+10*i, 384/32, 10))\n # save fill_rect to self ==> mouse_pos_collide => if true remove fill and record\n \n # MAIN LOOP ----------------------------------------------------------- # \n def main_loop(self):\n click = False\n drag = False\n pad_keys = [K_a,K_s,K_d,K_f,K_g,K_h,K_j,K_k]\n step = False\n display_step = False\n step_bar_val = 0\n volume = True\n start = 95\n func_rects = make_func_rects(start = 95)\n playrec_rects, playrec_text = make_playrec_rects(95, self.font, self.screen)\n buttons_down, buttons_up, text_rects, buttons_text1, buttons_text2 = make_control_rects(95, self.font)\n mix_tune_rect = pygame.Rect((8,348,20,20))\n\n button_1 = pygame.Rect(start+15, 170, 50, 10)\n button_2 = pygame.Rect(start+125,170, 50, 10)\n button_3 = pygame.Rect(start+245, 170, 50, 10)\n button_4 = pygame.Rect(start+360,170, 50, 10)\n \n while True:\n self.screen.fill((0,0,0))\n \n mx, my = pygame.mouse.get_pos()\n mouse_rect = pygame.Rect((mx,my),(1,1)) \n # STEP SEQ boxes ----------------------------------------------- #\n select_idx2 = mouse_rect.collidelistall(self.step_rect2) \n if select_idx2 and click and self.sequencer.recording:\n idx = select_idx2[0]\n rect_clicked = self.step_rect[idx]\n rx,ry = rect_clicked.topleft\n r_chan = int((ry - 65)/ 10)\n r_pulse = int( ((rx - 30)/4)+96*step_bar_val)\n self.sequencer.step_edit(r_chan, r_pulse, True, self.pads) \n \n select_idx = mouse_rect.collidelistall(self.fill_rect2)\n if select_idx and click:\n idx = select_idx[0]\n rect_clicked = self.fill_rect[idx]\n rx,ry = rect_clicked.topleft\n r_chan = int((ry - 65)/ 10)\n r_pulse = int( ((rx - 30)/4)+96*step_bar_val)\n self.fill_rect.pop(idx)\n self.sequencer.step_edit(r_chan, r_pulse, False, self.pads) \n\n \n clock_on = (self.sequencer.clock and self.sequencer.state == 1)\n \n if button_1.collidepoint((mx, my)):\n if (click and display_step): display_step = not display_step\n elif (click and not display_step):\n if clock_on: self.sequencer.stop() \n self.load_menu()\n\n if button_2.collidepoint((mx, my)):\n if (click and display_step): step_bar_val = 0\n elif (click and not display_step):\n if clock_on: self.sequencer.stop() \n self.assn_menu()\n \n if button_3.collidepoint((mx, my)):\n if (click and not display_step): display_step = True\n elif (click and display_step): step_bar_val = 1\n \n if button_4.collidepoint((mx, my)):\n if (click and not display_step):\n if clock_on: self.sequencer.stop() \n self.load_menu(load=False)\n \n idx = mouse_rect.collidelist(self.knobs)\n if (idx != -1 and click):\n knob_idx = idx\n drag = True\n knob_drag = self.knobs[knob_idx]\n\n if mix_tune_rect.collidepoint((mx, my)):\n if click: volume = not volume \n \n select_down_idx = mouse_rect.collidelistall(buttons_down)\n if select_down_idx and click:\n idx = select_down_idx[0]\n if idx == 0: self.sequencer.change_seq(-1) \n if idx == 1: self.sequencer.set_bpm(-0.5) \n if idx == 2: self.sequencer.set_tc(-1)\n if idx == 3: self.sequencer.met_on = False\n \n select_up_idx = mouse_rect.collidelistall(buttons_up) \n if select_up_idx and click:\n idx = select_up_idx[0]\n if idx == 0: self.sequencer.change_seq(1) \n if idx == 1: self.sequencer.set_bpm(0.5)\n if idx == 2: self.sequencer.set_tc(1)\n if idx == 3: self.sequencer.met_on = True\n \n select_playrec_idx = mouse_rect.collidelistall(playrec_rects) \n if select_playrec_idx and click:\n idx = select_playrec_idx[0]\n if idx == 2: self.sequencer.recording = not self.sequencer.recording \n if idx == 0:\n self.sequencer.toggle()\n step = False\n \n \n # 1) Check midi for pulse =>Step Start => Play met----------- #\n if self.receive_conn.poll():\n _ignore = self.receive_conn.recv() \n step = True\n if (self.sequencer.pulse % 24 == 0 and \n self.sequencer.met_on and \n self.sequencer.state): pygame.mixer.Channel(0).play(self.met) \n \n # 2) Get sounds to play and play them --------------- #\n for pad in pad_keys:\n val, pitch = self.sequencer.to_play(pad)\n if val: self.play(pad, pitch)\n \n # 3a) Check user inputs\n click = False\n for event in pygame.event.get():\n if event.type == QUIT:\n if clock_on: self.sequencer.stop() \n self.receive_conn.close()\n self.sequencer.send_conn.close()\n pygame.quit()\n sys.exit()\n if event.type == KEYUP: \n if event.key == K_l: self.sequencer.delete_flag = False \n if event.type == KEYDOWN:\n if (display_step and event.key == K_q): display_step = False\n elif (not display_step and event.key == K_q): \n if clock_on: self.sequencer.stop() \n self.load_menu()\n \n if (display_step and event.key == K_w): step_bar_val = 0\n elif (not display_step and event.key == K_w): \n if clock_on: self.sequencer.stop() \n self.assn_menu()\n \n if (display_step and event.key == K_e): step_bar_val = 1\n elif (not display_step and event.key == K_e): display_step = True\n\n if (not display_step and event.key == K_r): \n if clock_on: self.sequencer.stop() \n self.load_menu(load=False)\n \n if event.key == K_l: self.sequencer.delete_flag = True\n if event.key == K_o: self.sequencer.recording = not self.sequencer.recording\n if event.key == K_m: self.sequencer.met_on = not self.sequencer.met_on\n if event.key == K_p or event.key == K_SPACE:\n self.sequencer.toggle()\n step = False\n \n # PAD PLAY \n if event.key in pad_keys:\n self.play(event.key)\n # Record if applicable \n if (self.sequencer.recording and \n self.sequencer.state and not \n self.sequencer.delete_flag): self.sequencer.record(event.key, self.pads)\n \n if event.key == K_EQUALS: self.sequencer.set_bpm(0.5)\n if event.key == K_MINUS: self.sequencer.set_bpm(-0.5)\n if event.key == K_0: self.sequencer.set_tc(1)\n if event.key == K_9: self.sequencer.set_tc(-1)\n if event.key == K_LEFTBRACKET: self.sequencer.change_seq(-1) \n if event.key == K_RIGHTBRACKET: self.sequencer.change_seq(1) \n if event.key == K_i: volume = not volume \n \n if event.type == MOUSEBUTTONDOWN and event.button == 1: click = True\n if event.type == MOUSEBUTTONUP and event.button == 1: drag = False\n # Mixer knob drag\n if event.type == pygame.MOUSEMOTION and drag:\n mouse_x, mouse_y = event.pos\n knob_drag.y = mouse_y \n new_loc = max(0, min(24, (460-knob_drag.y)//8))\n if volume: self.pads[knob_idx].volume = new_loc\n elif not volume: self.pads[knob_idx].pitch = new_loc\n \n # DELETE --------------------------------------------- #\n if self.sequencer.delete_flag and self.sequencer.state: # Delete note as realtime plays over it\n pressed = pygame.key.get_pressed()\n for press in [97,115,100,102,103,104,106,107]:\n if pressed[press]:\n self.sequencer.delete(press)\n elif self.sequencer.delete_flag and not self.sequencer.state: # Deletes all notes if not running\n pressed = pygame.key.get_pressed()\n for press in [97,115,100,102,103,104,106,107]:\n if pressed[press]:\n self.sequencer.delete(press, whole=True)\n self.sequencer.copy() \n \n # 4) Step End - increment pulse,beat,bar, etc (after event loop) \n if step:\n self.sequencer.step_end() \n step = False \n \n # DRAW STUFF ----------------------------------------------------- # \n self._draw_main(display_step)\n self.draw_mixer(volume)\n if display_step:\n self.make_step_rects(step_bar_val)\n self.draw_step(step_bar_val)\n \n # DRAW CONTROLS ------------------------------------------------ # \n pygame.draw.rect(self.screen, self.WHITE, playrec_rects[0])\n if self.sequencer.state:\n pygame.draw.rect(self.screen, self.RED, playrec_rects[1])\n else: pygame.draw.rect(self.screen, self.WHITE, playrec_rects[1])\n\n pygame.draw.rect(self.screen, self.WHITE, playrec_rects[2])\n if self.sequencer.recording:\n pygame.draw.rect(self.screen, self.RED, playrec_rects[3])\n else: pygame.draw.rect(self.screen, self.WHITE, playrec_rects[3])\n \n for r in func_rects:\n pygame.draw.rect(self.screen, self.WHITE, r)\n\n for r in buttons_down:\n pygame.draw.rect(self.screen, self.WHITE, r)\n if not self.sequencer.met_on:\n pygame.draw.rect(self.screen, self.RED, buttons_down[3])\n \n for r in buttons_up:\n pygame.draw.rect(self.screen, self.WHITE, r)\n if self.sequencer.met_on:\n pygame.draw.rect(self.screen, self.RED, buttons_up[3]) \n \n self.screen.blits(playrec_text)\n self.screen.blits(text_rects)\n self.screen.blits(buttons_text1)\n self.screen.blits(buttons_text2)\n\n # MIX/TUNE ------------------------------------------------------ #\n pygame.draw.rect(self.screen, self.WHITE, mix_tune_rect,0)\n for i, t in enumerate([\"MIX\",\"TUNE\"]):\n text_b = self.font.render(t, True, (255, 255, 255))\n self.screen.blit(text_b, (50,344+i*20))\n if i != volume:\n color = self.RED\n else: color = self.WHITE\n pygame.draw.circle(self.screen, color, (41,349+i*20), 5)\n \n pygame.display.update()\n self.mainClock.tick(160)\n\n # LOAD/SAVE LOOP -------------------------------------------------------- # \n def load_menu(self, load=True):\n running = True\n click = False\n DIR_CONTENTS = os.listdir(\"DISKS\")\n #print(os.getcwd())\n DIR_CONTENTS.sort()\n DIR_CONTENTS.remove(\"BLANK\")\n selected = np.zeros((len(DIR_CONTENTS)), dtype=int)\n button_select = None\n \n while running:\n self.screen.fill((0,0,0))\n self.draw_text('DISKS', self.font, (255, 255, 255), self.screen, 20, 20)\n\n # Make Select Boxes and text --------------------------------- #\n buttons = []\n for i,text in enumerate(DIR_CONTENTS):\n self.draw_text(text, self.font, (255, 255, 255), self.screen, 20, 60+20*i)\n button = pygame.Rect(5, 62+20*i, 10, 10)\n pygame.draw.rect(self.screen, (255, 255, 255), button)\n buttons.append(button)\n\n # Mouse pos and collision check ------------------------------ #\n mx, my = pygame.mouse.get_pos() \n mouse_rect = pygame.Rect((mx,my),(1,1)) \n select_idx = mouse_rect.collidelistall(buttons)\n\n if select_idx and click:\n if selected[select_idx] == 0:\n selected[select_idx] = 1\n button_select = buttons[select_idx[0]] \n else: \n selected[select_idx] = 0\n button_select = None\n \n # Color Select Box WHITE if selected ----------------------- # \n if button_select:\n pygame.draw.rect(self.screen, self.RED, button_select) \n \n click = False \n for event in pygame.event.get():\n if event.type == QUIT:\n self.receive_conn.close()\n self.sequencer.send_conn.close()\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n if event.key == K_F1 and button_select:\n #print(selected)\n #print(np.nonzero(selected)[0][0])\n DISK = DIR_CONTENTS[np.nonzero(selected)[0][0]]\n if load:\n snd_path = \"DISKS/\"+DISK+\"/samples/\"\n snd_files = os.listdir(snd_path)\n snd_files.sort()\n self.snd_files = snd_files\n self.pads, self.sounds,seqs = load_disk(snd_path, self.pads, snd_files)\n self.sequencer.sequences = seqs\n self.sequencer.change_seq(0)\n if not load:\n disk_path = \"DISKS/\"+DISK+\"/\"\n #print(disk_path)\n #print(self.sequencer.sequences)\n save_disk(disk_path, self.pads, self.sequencer.sequences)\n running = False \n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n pygame.display.update()\n self.mainClock.tick(60) \n \n # ASSN LOOP ----------------------------------------------------------- # \n def assn_menu(self):\n running = True\n click = False\n assn = False\n checked_rect = None\n snd_file_text = [None] + self.snd_files\n chan = 1\n num_keys = [K_1,K_2,K_3,K_4,K_5,K_6,K_7,K_8]\n \n while running:\n self.screen.fill((0,0,0))\n self.draw_text('PADS', self.font, (255, 255, 255), self.screen, 20, 20)\n self.draw_text('SOUNDS', self.font, (255, 255, 255), self.screen, 350, 20)\n\n # Sounds ------------------------------------------------------ #\n buttons = []\n for i,text in enumerate(snd_file_text):\n self.draw_text(text, self.font, (255, 255, 255), self.screen, 350, 60+20*i)\n button = pygame.Rect(335, 62+20*i, 10, 10)\n pygame.draw.rect(self.screen, (0, 255, 0), button)\n buttons.append(button)\n \n # Pads ---------------------------------------------------------- #\n buttons2 = [] \n for i, pad_alpha in enumerate(['A','S','D','F','G','H','J','K']):\n self.draw_text(pad_alpha, self.font, (255, 255, 255), self.screen, 20, 60+20*i)\n file_text = self.pads[i].sound_file\n if file_text is not None:\n file_text = file_text[0:40]\n self.draw_text(file_text, self.font, (255, 255, 255), self.screen, 40, 60+20*i)\n button = pygame.Rect(5, 62+20*i, 10, 10)\n pygame.draw.rect(self.screen, (255, 255,255), button)\n buttons2.append(button)\n self.draw_text(str(self.pads[i].channel_out), self.font, (255, 255, 255), \n self.screen, 30, 60+20*i) \n # Mouse pos and collision check ------------------------------ # \n mx, my = pygame.mouse.get_pos() \n mouse_rect = pygame.Rect((mx,my),(1,1)) \n select_idx = mouse_rect.collidelistall(buttons2)\n \n if select_idx and click:\n assn = not assn\n pad_idx = select_idx[0]\n checked_rect = buttons2[select_idx[0]]\n chan = self.pads[pad_idx].channel_out\n \n if checked_rect and assn:\n pygame.draw.rect(self.screen, self.RED, checked_rect)\n self.pads[pad_idx].channel_out = chan # Set pad channel ------ #\n \n # Select Sound for pad --------------------------------------- # \n select_idx = mouse_rect.collidelistall(buttons) \n if assn and select_idx and click:\n self.pads[pad_idx].sound_file = snd_file_text[select_idx[0]]\n if snd_file_text[select_idx[0]] is not None:\n self.pads[pad_idx].sound = self.sounds[select_idx[0]-1] \n else: self.pads[pad_idx].sound = None\n \n click = False \n for event in pygame.event.get():\n if event.type == QUIT:\n self.receive_conn.close()\n self.sequencer.send_conn.close()\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n if event.key in num_keys:\n chan = num_keys.index(event.key)+1\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n \n pygame.display.update()\n self.mainClock.tick(60) \n# MAIN ----------------------------------------------------------------------- #\n\n"
] |
[
[
"numpy.abs",
"numpy.nonzero",
"numpy.arange",
"numpy.save",
"numpy.floor",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
MageshDominator/data-science-py
|
[
"6dd98a16e72bda96cf5fa3db01c044e3f1b66c05"
] |
[
"My_algorithms/Logistic_Regression/DataPreprocessing.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 4 21:04:05 2019\n\n@author: MAGESHWARAN\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom LogReg import LogRegWithRegularization\n\ndata = pd.read_csv(\"ex2data2.csv\", sep = \",\")\n# plt.matshow(data.corr())\n\n# no. of examples\nm = data.shape[0]\n\n# no. of features\nn = data.shape[1] - 1\n\nX = np.zeros((m, n + 1))\ny = np.array((m, 1))\n\n# load data into X\nX[:, 1] = data[\"X0\"].values\nX[:, 2] = data[\"X1\"].values\n\n# Load data into Y\ny = data[\"Y\"].values\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 11)\n\n# creating object for the linear model\nmodel = LogRegWithRegularization(use_optimizer = False)\n\n# fit data into the model\nmodel.fit(X_train, y_train)\n\n# run prediction for the test set\npredicted_output = model.predict(X_test)\nprint(predicted_output)\nprint(\"Model score:\", accuracy_score(y_test, predicted_output) * 100 , \"%\")"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.array",
"numpy.zeros",
"sklearn.metrics.accuracy_score"
]
] |
bojigu/thoipapy
|
[
"cc571677bd7e25e73db07af9d5d3c2cc36682903",
"cc571677bd7e25e73db07af9d5d3c2cc36682903"
] |
[
"thoipapy/figs/create_BOcurve_files.py",
"thoipapy/validation/precision_recall.py"
] |
[
"import warnings\nfrom pathlib import Path\nfrom typing import Union\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nfrom scipy.stats import linregress\nfrom thoipapy.utils import normalise_0_1, make_sure_path_exists\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict, logging, AUC_ser, plot_o_over_r=False):\n df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name=\"df_o_minus_r\", index_col=0)\n BO_scatter_png = str(BO_barchart_png)[:-12] + \"scatter.png\"\n\n #######################################################################################################\n # #\n # Create a dataframe with AUBOC and AUC for individual protein (df_valid_indiv) #\n # #\n #######################################################################################################\n # load AUBOC values as a series\n mean_o_minus_r_by_sample_ser = pd.read_excel(bocurve_data_xlsx, sheet_name=\"mean_o_minus_r_by_sample\", index_col=0)[\"mean_o_minus_r_by_sample\"]\n # select sample sizes 5 and 10\n df_valid_indiv = df_o_minus_r.loc[[5, 10], :].T.copy()\n df_valid_indiv[\"AUBOC\"] = mean_o_minus_r_by_sample_ser\n df_valid_indiv[\"ROC AUC\"] = AUC_ser\n df_valid_indiv.sort_values(\"AUBOC\", axis=0, ascending=False, inplace=True)\n\n \"\"\" df_valid_indiv should now have the results from BO curve and ROC for each protein\n \n AUBOC sample size 5 sample size 10 ROC AUC\n 3ij4_A-crystal 17.456522 1.913043 1.652174 0.714286\n 4wit_A-crystal 16.620000 2.000000 2.000000 0.622807\n Q08345-ETRA 16.571429 2.809524 2.238095 0.842593\n P04626-ETRA 16.456522 1.913043 1.652174 0.916667\n P25189-ETRA 14.634615 2.038462 2.153846 0.812500\n \"\"\"\n\n #######################################################################################################\n # #\n # plot correlation between AUBOC and ROC #\n # #\n #######################################################################################################\n # BO_barchart_png\n plt.close(\"all\")\n # plt.rcParams.update({'font.size': 8})\n figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes\n fig, ax = plt.subplots(figsize=figsize)\n # df_valid_indiv_scatter = df_valid_indiv[[\"AUBOC\", \"ROC AUC\"]]\n df_valid_indiv.plot(kind=\"scatter\", ax=ax, x=\"AUBOC\", y=\"ROC AUC\", alpha=0.7)\n\n # calculate linear regression for fitted line\n slope, intercept, r_value, p_value, std_err = linregress(df_valid_indiv[\"AUBOC\"], df_valid_indiv[\"ROC AUC\"])\n # fit_fn = np.poly1d(linear_regression)\n\n # slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)\n x_first_last_dp = np.array([df_valid_indiv[\"AUBOC\"].min(), df_valid_indiv[\"AUBOC\"].max()])\n y_fitted = x_first_last_dp * slope + intercept\n ax.plot(x_first_last_dp, y_fitted, label=\"$R^2$ : {:.2f}\".format(r_value ** 2))\n\n ax.set_xlabel(\"AUBOC\")\n ax.set_ylabel(\"ROC AUC\")\n ax.legend()\n fig.tight_layout()\n ax.grid(False)\n # BO_barchart_png = os.path.join(BO_curve_folder, \"AUBOC_barchart.png\")\n\n fig.savefig(BO_scatter_png, dpi=240)\n\n # simply normalise all between 0 and 1\n for col in df_valid_indiv.columns:\n df_valid_indiv[col] = normalise_0_1(df_valid_indiv[col])[0] + 0.01\n\n bocurve_data_xlsx: Union[Path, str] = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/data/{s['setname']}_thoipa_loo_bo_curve_data.xlsx\"\n BO_data_valid_indiv_csv: Union[Path, str] = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/data/{s['setname']}_BO_curve_data_valid_indiv.csv\"\n make_sure_path_exists(bocurve_data_xlsx, isfile=True)\n\n df_valid_indiv = df_valid_indiv.reindex(columns=[\"AUBOC\", 5, 10, \"ROC AUC\"])\n df_valid_indiv.columns = [\"AUBOC\", \"sample size 5\", \"sample size 10\", \"ROC AUC\"]\n\n df_valid_indiv.to_csv(BO_data_valid_indiv_csv)\n\n \"\"\" df_valid_indiv is now normalised within each column, and sorted by AUBOC\n AUBOC sample size 5 sample size 10 ROC AUC\n 3ij4_A-crystal 1.010000 0.789166 0.727758 0.724139\n 4wit_A-crystal 0.980317 0.810587 0.793133 0.594927\n DDR1 [Q08345-ETRA] 0.978593 1.010000 0.837883 0.905371\n ErbB2 [P04626-ETRA] 0.974516 0.789166 0.727758 1.010000\n MPZ [P25189-ETRA] 0.909867 0.820061 0.822048 0.862866\n \"\"\"\n\n #######################################################################################################\n # #\n # plot barchart #\n # #\n #######################################################################################################\n # BO_barchart_png\n plt.close(\"all\")\n # plt.rcParams.update({'font.size': 8})\n figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes\n fig, ax = plt.subplots(figsize=figsize)\n # replace the protein names\n df_valid_indiv.index = pd.Series(df_valid_indiv.index).replace(namedict)\n df_valid_indiv.plot(kind=\"bar\", ax=ax, alpha=0.7)\n\n ax.set_ylabel(\"performance value\\n(observed overlap - random overlap)\")\n ax.legend() # ([\"sample size = 5\", \"sample size = 10\"])\n\n fig.tight_layout()\n ax.grid(False)\n fig.savefig(BO_barchart_png, dpi=240)\n\n #######################################################################################################\n # #\n # plot linechart (combined data all proteins #\n # #\n #######################################################################################################\n if plot_o_over_r:\n df_o_over_r = pd.read_excel(bocurve_data_xlsx, sheet_name=\"df_o_over_r\", index_col=0)\n df_o_over_r_mean = df_o_over_r.T.mean()\n df_o_minus_r.columns = pd.Series(df_o_minus_r.columns).replace(namedict)\n df_o_minus_r_mean = df_o_minus_r.T.mean()\n\n # apply cutoff (e.g. 5 residues for AUBOC5)\n auboc_ser = df_o_minus_r_mean.iloc[:s[\"n_residues_AUBOC_validation\"]]\n\n # get the area under the curve\n AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)\n\n # BO_linechart_png\n plt.close(\"all\")\n figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes\n fig, ax = plt.subplots(figsize=figsize)\n\n df_o_minus_r_mean.plot(ax=ax, color=\"#0f7d9b\", linestyle=\"-\", label=\"prediction (AUBOC : {:0.2f}\".format(AUBOC))\n ax.plot([1, 10], [0, 0], color=\"#0f7d9b\", linestyle=\"--\", label=\"random\", alpha=0.5)\n\n if plot_o_over_r:\n ax2 = ax.twinx()\n df_o_over_r_mean.plot(ax=ax2, color=\"#9b2d0f\", linestyle=\"-\", label=\"old method (o/r)\")\n ax2.plot([1, 10], [1, 1], color=\"#9b2d0f\", linestyle=\"--\", label=\"old method random\", alpha=0.5)\n\n # ax.set_ylim(0)\n ax.grid(False)\n ax.set_ylabel(\"fraction of correctly predicted residues\\n(observed - random)\", color=\"#0f7d9b\")\n ax.tick_params('y', colors=\"#0f7d9b\")\n\n ax.spines['left'].set_color(\"#0f7d9b\")\n ax.legend()\n if plot_o_over_r:\n ax2.tick_params('y', colors=\"#9b2d0f\")\n ax2.spines['right'].set_color(\"#9b2d0f\")\n # ax.set_ylabel(\"performance value\\n (observed / random)\", color=\"#9b2d0f\")\n ax.set_ylabel(\"fraction of correctly predicted residues\\n(observed / random)\", color=\"#9b2d0f\")\n ax2.legend()\n\n ax.set_xlabel(\"number of TMD residues\\n(sample size)\")\n fig.tight_layout()\n fig.savefig(BO_linechart_png, dpi=140)\n\n return AUBOC\n\n\ndef save_extra_BO_figs(bocurve_data_xlsx, other_figs_path):\n linechart_mean_obs_and_rand = os.path.join(other_figs_path, \"1_linechart_mean_obs_and_rand.png\")\n linechart_obs_indiv = os.path.join(other_figs_path, \"2_linechart_obs_indiv.png\")\n linechart_p_indiv = os.path.join(other_figs_path, \"3_linechart_p_indiv.png\")\n linechart_o_minus_r = os.path.join(other_figs_path, \"4_linechart_o_minus_r.png\")\n linechart_o_over_r = os.path.join(other_figs_path, \"5_linechart_o_over_r.png\")\n\n dfrand = pd.read_excel(bocurve_data_xlsx, sheet_name=\"dfrand\", index_col=0)\n dfobs = pd.read_excel(bocurve_data_xlsx, sheet_name=\"dfobs\", index_col=0)\n df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name=\"df_o_minus_r\", index_col=0)\n # linechart_mean_obs_and_rand\n\n fig, ax = plt.subplots()\n dfrand.mean(axis=1).plot(ax=ax, color=\"k\", linestyle=\"--\", label=\"mean random\")\n dfobs.mean(axis=1).plot(ax=ax, color=\"k\", label=\"mean observed\")\n ax.grid(False)\n ax.set_ylabel(\"mean overlap\")\n ax.legend()\n fig.savefig(linechart_mean_obs_and_rand, dpi=140)\n\n # linechart_obs_indiv\n\n plt.close(\"all\")\n fig, ax = plt.subplots()\n dfrand.mean(axis=1).plot(ax=ax, color=\"k\", linestyle=\"--\", label=\"mean random\")\n dfobs.plot(ax=ax, alpha=0.7)\n ax.legend(loc=\"upper left\", ncol=2)\n ax.set_ylabel(\"overlap\")\n fig.savefig(linechart_obs_indiv, dpi=140)\n\n dfp = pd.read_excel(bocurve_data_xlsx, sheet_name=\"dfp\", index_col=0)\n # linechart_p_indiv\n plt.close(\"all\")\n fig, ax = plt.subplots()\n dfp.plot(ax=ax, alpha=0.7)\n ax.legend(loc=\"upper right\", ncol=2)\n ax.set_ylabel(\"p-value of result\")\n fig.savefig(linechart_p_indiv, dpi=140)\n\n # linechart_o_minus_r\n plt.close(\"all\")\n fig, ax = plt.subplots()\n df_o_minus_r.plot(ax=ax, alpha=0.7)\n ax.legend(loc=\"upper left\", ncol=2)\n ax.set_ylabel(\"observed - random\")\n fig.savefig(linechart_o_minus_r, dpi=140)\n",
"from pathlib import Path\nfrom random import shuffle\nfrom typing import Union\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import precision_recall_curve, auc\n\nfrom thoipapy import utils\nfrom thoipapy.validation.gather import create_df_with_all_predictions_for_all_residues_in_set\n\n\ndef create_precision_recall_all_residues(s, df_set, logging):\n \"\"\"Combine all residue predictions, so precision recall can be calculated from a single array.\n\n Effectively stacks the CSVs on top of each other.\n Code is directly copied and modified from create_ROC_all_residues.\n\n This is the recommended method for the pr of a dataset, and complements methods where the precision-recall\n is calculated for each protein separately.\n\n Parameters\n ----------\n s : dict\n Settings dictionary\n df_set : pd.DataFrame\n Dataframe containing the list of proteins to process, including their TMD sequences and full-length sequences\n index : range(0, ..)\n columns : ['acc', 'seqlen', 'TMD_start', 'TMD_end', 'tm_surr_left', 'tm_surr_right', 'database', ....]\n logging : logging.Logger\n Python object with settings for logging to console and file.\n\n Saved Files\n -----------\n predictions_csv : csv\n csv file with stacked predictions data for multiple proteins\n index = range(0, ..)\n columns =\n \"\"\"\n logging.info('Starting combine_all_residue_predictions.')\n\n # output file with all predictions\n pred_all_res_csv: Union[Path, str] = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/precision_recall/{s['setname']}_pred_all_res.csv\"\n # all_res_precision_recall_data_dict_pkl = os.path.join(s[\"data_dir\"], \"results\", s[\"setname\"], \"precision_recall\", \"{}_all_res_precision_recall_data_dict.pickle\".format(s[\"setname\"]))\n all_res_precision_recall_data_csv: Path = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/precision_recall/{s['setname']}_all_res_precision_recall_data.csv\"\n all_res_precision_recall_png: Path = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/precision_recall/{s['setname']}_all_res_precision_recall.png\"\n\n utils.make_sure_path_exists(pred_all_res_csv, isfile=True)\n\n df_set_nonred = utils.drop_redundant_proteins_from_list(df_set, logging)\n\n df_all = create_df_with_all_predictions_for_all_residues_in_set(s, df_set_nonred, pred_all_res_csv, logging)\n\n save_fig_precision_recall_all_residues(s, df_all, all_res_precision_recall_png, all_res_precision_recall_data_csv, logging)\n\n df_all[\"subset\"] = df_all.acc_db.str.split(\"-\").str[1]\n\n subsets = [\"ETRA\", \"NMR\", \"crystal\"]\n for subset in subsets:\n df_subset = df_all.loc[df_all.subset == subset]\n if df_subset.empty:\n continue\n precision_recall_png: Union[Path, str] = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/precision_recall/{s['setname']}_all_res_precision_recall_data_{subset}_subset.png\"\n precision_recall_data_csv: Union[Path, str] = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/precision_recall/{s['setname']}_all_res_precision_recall_data_{subset}_subset.csv\"\n save_fig_precision_recall_all_residues(s, df_subset, precision_recall_png, precision_recall_data_csv, logging)\n\n # with open(all_res_precision_recall_data_pkl, \"wb\") as f:\n # pickle.dump(output_dict, f, protocol=pickle.HIGHEST_PROTOCOL)\n logging.info('Finished combine_all_residue_predictions.')\n\n\ndef save_fig_precision_recall_all_residues(s, df, all_res_precision_recall_png, all_res_precision_recall_data_csv, logging):\n \"\"\"Save figure for precision recall plot of all residues joined together.\n\n Code is directly copied and modified from save_fig_ROC_all_residues\n\n \"\"\"\n fontsize = 8\n fig, ax = plt.subplots(figsize=(5, 5))\n THOIPA_predictor = \"THOIPA_{}_LOO\".format(s[\"set_number\"])\n predictors = [THOIPA_predictor, \"TMDOCK\", \"LIPS_surface_ranked\", \"PREDDIMER\", \"random\"]\n\n testsetname, trainsetname = utils.get_testsetname_trainsetname_from_run_settings(s)\n\n if s[\"setname\"] == testsetname:\n predictors.append(f\"thoipa.train{trainsetname}\")\n\n output_dict = {}\n interface_random = df.interface_score.tolist()\n shuffle(interface_random)\n df[\"random\"] = interface_random\n\n for predictor in predictors:\n df_sel = df[[\"interface\", predictor]].dropna()\n if predictor in [\"TMDOCK\", \"PREDDIMER\"]:\n pred = - df_sel[predictor]\n # pred = normalise_between_2_values(df_sel[predictor], 2.5, 8, invert=True)\n else:\n pred = df_sel[predictor]\n precision, recall, thresholds_PRC = precision_recall_curve(df_sel.interface, pred)\n\n pred_auc = auc(recall, precision)\n # sys.stdout.write(\"{} AUC : {:.03f}\\n\".format(predictor, pred_auc))\n label = \"{}. AUC : {:.03f}\".format(predictor, pred_auc)\n ax.plot(recall, precision, label=label, linewidth=1)\n\n output_dict[predictor] = {\"precision\": list(precision), \"recall\": list(recall), \"pred_auc\": pred_auc}\n ax.grid(False)\n\n ax.set_xlabel(\"recall\", fontsize=fontsize)\n ax.set_ylabel(\"precision\", fontsize=fontsize)\n ax.legend(fontsize=fontsize)\n fig.tight_layout()\n fig.savefig(all_res_precision_recall_png, dpi=240)\n fig.savefig(str(all_res_precision_recall_png)[:-4] + \".pdf\")\n\n df_precision_recall_data = pd.DataFrame(output_dict).T\n df_precision_recall_data.to_csv(all_res_precision_recall_data_csv)\n\n logging.info(\"save_fig_precision_recall_all_residues finished ({})\".format(all_res_precision_recall_data_csv))\n"
] |
[
[
"pandas.read_excel",
"pandas.Series",
"matplotlib.pyplot.subplots",
"scipy.stats.linregress",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.trapz"
],
[
"sklearn.metrics.auc",
"matplotlib.pyplot.subplots",
"sklearn.metrics.precision_recall_curve",
"pandas.DataFrame"
]
] |
opz/pyti
|
[
"1ecc174195525bb3bdf401252244f80f02df04ef"
] |
[
"pyti/relative_strength_index.py"
] |
[
"from __future__ import absolute_import\nimport numpy as np\nfrom pyti import catch_errors\nfrom pyti.function_helper import fill_for_noncomputable_vals\nfrom six.moves import range\nfrom six.moves import zip\n\n\ndef relative_strength_index(data, period):\n \"\"\"\n Relative Strength Index.\n\n Formula:\n RSI = 100 - (100 / 1 + (prevGain/prevLoss))\n \"\"\"\n catch_errors.check_for_period_error(data, period)\n\n period = int(period)\n changes = [data_tup[1] - data_tup[0] for data_tup in zip(data[::1], data[1::1])]\n\n gains = [0 if val < 0 else val for val in changes]\n\n losses = [0 if val > 0 else abs(val) for val in changes]\n\n avg_gain = np.mean(gains[:period])\n avg_loss = np.mean(losses[:period])\n\n rsi = []\n if avg_loss == 0:\n rsi.append(100)\n else:\n rs = avg_gain / avg_loss\n rsi.append(100 - (100 / (1 + rs)))\n\n for idx in range(1, len(data) - period):\n avg_gain = ((avg_gain * (period - 1) +\n gains[idx + (period - 1)]) / period)\n avg_loss = ((avg_loss * (period - 1) +\n losses[idx + (period - 1)]) / period)\n\n if avg_loss == 0:\n rsi.append(100)\n else:\n rs = avg_gain / avg_loss\n rsi.append(100 - (100 / (1 + rs)))\n\n rsi = fill_for_noncomputable_vals(data, rsi)\n\n return rsi\n"
] |
[
[
"numpy.mean"
]
] |
mega002/DANN-MNLI
|
[
"bd27c5ec70d2b68453dd16f90a3b8d2f28f7a945"
] |
[
"python/util/flip_gradient.py"
] |
[
"import tensorflow as tf\nfrom tensorflow.python.framework import ops\n\n\nclass FlipGradientBuilder(object):\n def __init__(self):\n self.num_calls = 0\n\n def __call__(self, x, l=1.0):\n grad_name = \"FlipGradient%d\" % self.num_calls\n\n @ops.RegisterGradient(grad_name)\n def _flip_gradients(op, grad):\n return [tf.negative(grad) * l]\n\n g = tf.get_default_graph()\n with g.gradient_override_map({\"Identity\": grad_name}):\n y = tf.identity(x)\n\n self.num_calls += 1\n return y\n\n\nflip_gradient = FlipGradientBuilder()\n"
] |
[
[
"tensorflow.negative",
"tensorflow.get_default_graph",
"tensorflow.identity",
"tensorflow.python.framework.ops.RegisterGradient"
]
] |
wall-ed-coder/ReAgent
|
[
"92f223a135b8fbc0942a217acb117ad0935897a3"
] |
[
"reagent/training/ranking/seq2slate_attn_trainer.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\nimport logging\n\nimport reagent.types as rlt\nimport torch\nimport torch.nn as nn\nfrom reagent.core.dataclasses import field\nfrom reagent.core.tracker import observable\nfrom reagent.model_utils.seq2slate_utils import Seq2SlateMode\nfrom reagent.models.seq2slate import Seq2SlateTransformerNet\nfrom reagent.optimizer.union import Optimizer__Union\nfrom reagent.parameters import TransformerParameters\nfrom reagent.training.loss_reporter import NoOpLossReporter\nfrom reagent.training.trainer import Trainer\n\n\nlogger = logging.getLogger(__name__)\n\n\n@observable(cross_entropy_loss=torch.Tensor)\nclass Seq2SlatePairwiseAttnTrainer(Trainer):\n \"\"\"\n Seq2Slate without a decoder learned in a supervised learning fashion (\n https://arxiv.org/pdf/1904.06813.pdf )\n \"\"\"\n\n def __init__(\n self,\n seq2slate_net: Seq2SlateTransformerNet,\n minibatch_size: int = 1024,\n loss_reporter=None,\n use_gpu: bool = False,\n policy_optimizer: Optimizer__Union = field( # noqa: B008\n default_factory=Optimizer__Union.default\n ),\n ) -> None:\n self.loss_reporter = loss_reporter\n self.use_gpu = use_gpu\n self.seq2slate_net = seq2slate_net\n self.minibatch_size = minibatch_size\n self.minibatch = 0\n self.optimizer = policy_optimizer.make_optimizer(\n self.seq2slate_net.parameters()\n )\n self.log_softmax = nn.LogSoftmax(dim=1)\n self.kl_loss = nn.KLDivLoss(reduction=\"batchmean\")\n if self.loss_reporter is None:\n self.loss_reporter = NoOpLossReporter()\n\n def warm_start_components(self):\n components = [\"seq2slate_net\"]\n return components\n\n def train(self, training_batch: rlt.PreprocessedRankingInput):\n assert type(training_batch) is rlt.PreprocessedRankingInput\n\n # shape: batch_size, tgt_seq_len\n encoder_scores = self.seq2slate_net(\n training_batch, mode=Seq2SlateMode.ENCODER_SCORE_MODE\n ).encoder_scores\n assert encoder_scores.requires_grad\n\n loss = self.kl_loss(\n self.log_softmax(encoder_scores), training_batch.position_reward\n )\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n loss = loss.detach()\n self.minibatch += 1\n\n # pyre-fixme[16]: `Seq2SlatePairwiseAttnTrainer` has no attribute\n # `notify_observers`.\n self.notify_observers(cross_entropy_loss=loss)\n\n return {\"cross_entropy_loss\": loss}\n"
] |
[
[
"torch.nn.LogSoftmax",
"torch.nn.KLDivLoss"
]
] |
mjunix/arrow
|
[
"4144c1739ec2e58d5f076fa63a0b61653324dc02"
] |
[
"python/pyarrow/tests/test_pandas.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport gc\nimport decimal\nimport json\nimport multiprocessing as mp\nimport sys\n\nfrom collections import OrderedDict\nfrom datetime import date, datetime, time, timedelta, timezone\n\nimport hypothesis as h\nimport hypothesis.extra.pytz as tzst\nimport hypothesis.strategies as st\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nimport pytz\n\nfrom pyarrow.pandas_compat import get_logical_type, _pandas_api\nfrom pyarrow.tests.util import invoke_script, random_ascii, rands\nimport pyarrow.tests.strategies as past\nfrom pyarrow.vendored.version import Version\n\nimport pyarrow as pa\ntry:\n from pyarrow import parquet as pq\nexcept ImportError:\n pass\n\ntry:\n import pandas as pd\n import pandas.testing as tm\n from .pandas_examples import dataframe_with_arrays, dataframe_with_lists\nexcept ImportError:\n pass\n\n\n# Marks all of the tests in this module\npytestmark = pytest.mark.pandas\n\n\ndef _alltypes_example(size=100):\n return pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,\n # us, ns\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n\n\ndef _check_pandas_roundtrip(df, expected=None, use_threads=False,\n expected_schema=None,\n check_dtype=True, schema=None,\n preserve_index=False,\n as_batch=False):\n klass = pa.RecordBatch if as_batch else pa.Table\n table = klass.from_pandas(df, schema=schema,\n preserve_index=preserve_index,\n nthreads=2 if use_threads else 1)\n result = table.to_pandas(use_threads=use_threads)\n\n if expected_schema:\n # all occurrences of _check_pandas_roundtrip passes expected_schema\n # without the pandas generated key-value metadata\n assert table.schema.equals(expected_schema)\n\n if expected is None:\n expected = df\n\n tm.assert_frame_equal(result, expected, check_dtype=check_dtype,\n check_index_type=('equiv' if preserve_index\n else False))\n\n\ndef _check_series_roundtrip(s, type_=None, expected_pa_type=None):\n arr = pa.array(s, from_pandas=True, type=type_)\n\n if type_ is not None and expected_pa_type is None:\n expected_pa_type = type_\n\n if expected_pa_type is not None:\n assert arr.type == expected_pa_type\n\n result = pd.Series(arr.to_pandas(), name=s.name)\n tm.assert_series_equal(s, result)\n\n\ndef _check_array_roundtrip(values, expected=None, mask=None,\n type=None):\n arr = pa.array(values, from_pandas=True, mask=mask, type=type)\n result = arr.to_pandas()\n\n values_nulls = pd.isnull(values)\n if mask is None:\n assert arr.null_count == values_nulls.sum()\n else:\n assert arr.null_count == (mask | values_nulls).sum()\n\n if expected is None:\n if mask is None:\n expected = pd.Series(values)\n else:\n expected = pd.Series(np.ma.masked_array(values, mask=mask))\n\n tm.assert_series_equal(pd.Series(result), expected, check_names=False)\n\n\ndef _check_array_from_pandas_roundtrip(np_array, type=None):\n arr = pa.array(np_array, from_pandas=True, type=type)\n result = arr.to_pandas()\n npt.assert_array_equal(result, np_array)\n\n\nclass TestConvertMetadata:\n \"\"\"\n Conversion tests for Pandas metadata & indices.\n \"\"\"\n\n def test_non_string_columns(self):\n df = pd.DataFrame({0: [1, 2, 3]})\n table = pa.Table.from_pandas(df)\n assert table.field(0).name == '0'\n\n def test_from_pandas_with_columns(self):\n df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]},\n columns=[1, 0])\n\n table = pa.Table.from_pandas(df, columns=[0, 1])\n expected = pa.Table.from_pandas(df[[0, 1]])\n assert expected.equals(table)\n\n record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])\n record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])\n assert record_batch_expected.equals(record_batch_table)\n\n def test_column_index_names_are_preserved(self):\n df = pd.DataFrame({'data': [1, 2, 3]})\n df.columns.names = ['a']\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_column_index_names_with_tz(self):\n # ARROW-13756\n # Bug if index is timezone aware DataTimeIndex\n\n df = pd.DataFrame(\n np.random.randn(5, 3),\n columns=pd.date_range(\n \"2021-01-01\", \"2021-01-3\", freq=\"D\", tz=\"CET\")\n )\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_range_index_shortcut(self):\n # ARROW-1639\n index_name = 'foo'\n df = pd.DataFrame({'a': [1, 2, 3, 4]},\n index=pd.RangeIndex(0, 8, step=2, name=index_name))\n\n df2 = pd.DataFrame({'a': [4, 5, 6, 7]},\n index=pd.RangeIndex(0, 4))\n\n table = pa.Table.from_pandas(df)\n table_no_index_name = pa.Table.from_pandas(df2)\n\n # The RangeIndex is tracked in the metadata only\n assert len(table.schema) == 1\n\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n assert isinstance(result.index, pd.RangeIndex)\n assert _pandas_api.get_rangeindex_attribute(result.index, 'step') == 2\n assert result.index.name == index_name\n\n result2 = table_no_index_name.to_pandas()\n tm.assert_frame_equal(result2, df2)\n assert isinstance(result2.index, pd.RangeIndex)\n assert _pandas_api.get_rangeindex_attribute(result2.index, 'step') == 1\n assert result2.index.name is None\n\n def test_range_index_force_serialization(self):\n # ARROW-5427: preserve_index=True will force the RangeIndex to\n # be serialized as a column rather than tracked more\n # efficiently as metadata\n df = pd.DataFrame({'a': [1, 2, 3, 4]},\n index=pd.RangeIndex(0, 8, step=2, name='foo'))\n\n table = pa.Table.from_pandas(df, preserve_index=True)\n assert table.num_columns == 2\n assert 'foo' in table.column_names\n\n restored = table.to_pandas()\n tm.assert_frame_equal(restored, df)\n\n def test_rangeindex_doesnt_warn(self):\n # ARROW-5606: pandas 0.25 deprecated private _start/stop/step\n # attributes -> can be removed if support < pd 0.25 is dropped\n df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])\n\n with pytest.warns(None) as record:\n _check_pandas_roundtrip(df, preserve_index=True)\n\n assert len(record) == 0\n\n def test_multiindex_columns(self):\n columns = pd.MultiIndex.from_arrays([\n ['one', 'two'], ['X', 'Y']\n ])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_with_dtypes(self):\n columns = pd.MultiIndex.from_arrays(\n [\n ['one', 'two'],\n pd.DatetimeIndex(['2017-08-01', '2017-08-02']),\n ],\n names=['level_1', 'level_2'],\n )\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_with_column_dtype_object(self):\n # ARROW-3651 & ARROW-9096\n # Bug when dtype of the columns is object.\n\n # uinderlying dtype: integer\n df = pd.DataFrame([1], columns=pd.Index([1], dtype=object))\n _check_pandas_roundtrip(df, preserve_index=True)\n\n # underlying dtype: floating\n df = pd.DataFrame([1], columns=pd.Index([1.1], dtype=object))\n _check_pandas_roundtrip(df, preserve_index=True)\n\n # underlying dtype: datetime\n # ARROW-9096: a simple roundtrip now works\n df = pd.DataFrame([1], columns=pd.Index(\n [datetime(2018, 1, 1)], dtype=\"object\"))\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_unicode(self):\n columns = pd.MultiIndex.from_arrays([['あ', 'い'], ['X', 'Y']])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_doesnt_warn(self):\n # ARROW-3953: pandas 0.24 rename of MultiIndex labels to codes\n columns = pd.MultiIndex.from_arrays([['one', 'two'], ['X', 'Y']])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n\n with pytest.warns(None) as record:\n _check_pandas_roundtrip(df, preserve_index=True)\n\n assert len(record) == 0\n\n def test_integer_index_column(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_index_metadata_field_name(self):\n # test None case, and strangely named non-index columns\n df = pd.DataFrame(\n [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],\n index=pd.MultiIndex.from_arrays(\n [['c', 'b', 'a'], [3, 2, 1]],\n names=[None, 'foo']\n ),\n columns=['a', None, '__index_level_0__'],\n )\n with pytest.warns(UserWarning):\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n col1, col2, col3, idx0, foo = js['columns']\n\n assert col1['name'] == 'a'\n assert col1['name'] == col1['field_name']\n\n assert col2['name'] is None\n assert col2['field_name'] == 'None'\n\n assert col3['name'] == '__index_level_0__'\n assert col3['name'] == col3['field_name']\n\n idx0_descr, foo_descr = js['index_columns']\n assert idx0_descr == '__index_level_0__'\n assert idx0['field_name'] == idx0_descr\n assert idx0['name'] is None\n\n assert foo_descr == 'foo'\n assert foo['field_name'] == foo_descr\n assert foo['name'] == foo_descr\n\n def test_categorical_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), dtype='category')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'categorical'\n assert column_indexes['numpy_type'] == 'int8'\n\n md = column_indexes['metadata']\n assert md['num_categories'] == 3\n assert md['ordered'] is False\n\n def test_string_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), name='stringz')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] == 'stringz'\n assert column_indexes['name'] == column_indexes['field_name']\n assert column_indexes['numpy_type'] == 'object'\n assert column_indexes['pandas_type'] == 'unicode'\n\n md = column_indexes['metadata']\n\n assert len(md) == 1\n assert md['encoding'] == 'UTF-8'\n\n def test_datetimetz_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'datetimetz'\n assert column_indexes['numpy_type'] == 'datetime64[ns]'\n\n md = column_indexes['metadata']\n assert md['timezone'] == 'America/New_York'\n\n def test_datetimetz_row_index(self):\n df = pd.DataFrame({\n 'a': pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n })\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_categorical_row_index(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})\n df['a'] = df.a.astype('category')\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_duplicate_column_names_does_not_crash(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df)\n\n def test_dictionary_indices_boundscheck(self):\n # ARROW-1658. No validation of indices leads to segfaults in pandas\n indices = [[0, 1], [0, -1]]\n\n for inds in indices:\n arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)\n batch = pa.RecordBatch.from_arrays([arr], ['foo'])\n table = pa.Table.from_batches([batch, batch, batch])\n\n with pytest.raises(IndexError):\n arr.to_pandas()\n\n with pytest.raises(IndexError):\n table.to_pandas()\n\n def test_unicode_with_unicode_column_and_index(self):\n df = pd.DataFrame({'あ': ['い']}, index=['う'])\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_mixed_column_names(self):\n # mixed type column names are not reconstructed exactly\n df = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})\n\n for cols in [['あ', b'a'], [1, '2'], [1, 1.5]]:\n df.columns = pd.Index(cols, dtype=object)\n\n # assert that the from_pandas raises the warning\n with pytest.warns(UserWarning):\n pa.Table.from_pandas(df)\n\n expected = df.copy()\n expected.columns = df.columns.values.astype(str)\n with pytest.warns(UserWarning):\n _check_pandas_roundtrip(df, expected=expected,\n preserve_index=True)\n\n def test_binary_column_name(self):\n column_data = ['い']\n key = 'あ'.encode()\n data = {key: column_data}\n df = pd.DataFrame(data)\n\n # we can't use _check_pandas_roundtrip here because our metadata\n # is always decoded as utf8: even if binary goes in, utf8 comes out\n t = pa.Table.from_pandas(df, preserve_index=True)\n df2 = t.to_pandas()\n assert df.values[0] == df2.values[0]\n assert df.index.values[0] == df2.index.values[0]\n assert df.columns[0] == key\n\n def test_multiindex_duplicate_values(self):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_metadata_with_mixed_types(self):\n df = pd.DataFrame({'data': [b'some_bytes', 'some_unicode']})\n table = pa.Table.from_pandas(df)\n js = table.schema.pandas_metadata\n assert 'mixed' not in js\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'bytes'\n assert data_column['numpy_type'] == 'object'\n\n def test_ignore_metadata(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': ['foo', 'bar', 'baz']},\n index=['one', 'two', 'three'])\n table = pa.Table.from_pandas(df)\n\n result = table.to_pandas(ignore_metadata=True)\n expected = (table.cast(table.schema.remove_metadata())\n .to_pandas())\n\n tm.assert_frame_equal(result, expected)\n\n def test_list_metadata(self):\n df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})\n schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])\n table = pa.Table.from_pandas(df, schema=schema)\n js = table.schema.pandas_metadata\n assert 'mixed' not in js\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'list[int64]'\n assert data_column['numpy_type'] == 'object'\n\n def test_struct_metadata(self):\n df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})\n table = pa.Table.from_pandas(df)\n pandas_metadata = table.schema.pandas_metadata\n assert pandas_metadata['columns'][0]['pandas_type'] == 'object'\n\n def test_decimal_metadata(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('394092382910493.12341234678'),\n -decimal.Decimal('314292388910493.12343437128'),\n ]\n })\n table = pa.Table.from_pandas(expected)\n js = table.schema.pandas_metadata\n assert 'mixed' not in js\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'decimal'\n assert data_column['numpy_type'] == 'object'\n assert data_column['metadata'] == {'precision': 26, 'scale': 11}\n\n def test_table_column_subset_metadata(self):\n # ARROW-1883\n # non-default index\n for index in [\n pd.Index(['a', 'b', 'c'], name='index'),\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')]:\n df = pd.DataFrame({'a': [1, 2, 3],\n 'b': [.1, .2, .3]}, index=index)\n table = pa.Table.from_pandas(df)\n\n table_subset = table.remove_column(1)\n result = table_subset.to_pandas()\n expected = df[['a']]\n if isinstance(df.index, pd.DatetimeIndex):\n df.index.freq = None\n tm.assert_frame_equal(result, expected)\n\n table_subset2 = table_subset.remove_column(1)\n result = table_subset2.to_pandas()\n tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))\n\n def test_to_pandas_column_subset_multiindex(self):\n # ARROW-10122\n df = pd.DataFrame(\n {\"first\": list(range(5)),\n \"second\": list(range(5)),\n \"value\": np.arange(5)}\n )\n table = pa.Table.from_pandas(df.set_index([\"first\", \"second\"]))\n\n subset = table.select([\"first\", \"value\"])\n result = subset.to_pandas()\n expected = df[[\"first\", \"value\"]].set_index(\"first\")\n tm.assert_frame_equal(result, expected)\n\n def test_empty_list_metadata(self):\n # Create table with array of empty lists, forced to have type\n # list(string) in pyarrow\n c1 = [[\"test\"], [\"a\", \"b\"], None]\n c2 = [[], [], []]\n arrays = OrderedDict([\n ('c1', pa.array(c1, type=pa.list_(pa.string()))),\n ('c2', pa.array(c2, type=pa.list_(pa.string()))),\n ])\n rb = pa.RecordBatch.from_arrays(\n list(arrays.values()),\n list(arrays.keys())\n )\n tbl = pa.Table.from_batches([rb])\n\n # First roundtrip changes schema, because pandas cannot preserve the\n # type of empty lists\n df = tbl.to_pandas()\n tbl2 = pa.Table.from_pandas(df)\n md2 = tbl2.schema.pandas_metadata\n\n # Second roundtrip\n df2 = tbl2.to_pandas()\n expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))\n\n tm.assert_frame_equal(df2, expected)\n\n assert md2['columns'] == [\n {\n 'name': 'c1',\n 'field_name': 'c1',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[unicode]',\n },\n {\n 'name': 'c2',\n 'field_name': 'c2',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[empty]',\n }\n ]\n\n def test_metadata_pandas_version(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})\n table = pa.Table.from_pandas(df)\n assert table.schema.pandas_metadata['pandas_version'] is not None\n\n def test_mismatch_metadata_schema(self):\n # ARROW-10511\n # It is possible that the metadata and actual schema is not fully\n # matching (eg no timezone information for tz-aware column)\n # -> to_pandas() conversion should not fail on that\n df = pd.DataFrame({\"datetime\": pd.date_range(\"2020-01-01\", periods=3)})\n\n # OPTION 1: casting after conversion\n table = pa.Table.from_pandas(df)\n # cast the \"datetime\" column to be tz-aware\n new_col = table[\"datetime\"].cast(pa.timestamp('ns', tz=\"UTC\"))\n new_table1 = table.set_column(\n 0, pa.field(\"datetime\", new_col.type), new_col\n )\n\n # OPTION 2: specify schema during conversion\n schema = pa.schema([(\"datetime\", pa.timestamp('ns', tz=\"UTC\"))])\n new_table2 = pa.Table.from_pandas(df, schema=schema)\n\n expected = df.copy()\n expected[\"datetime\"] = expected[\"datetime\"].dt.tz_localize(\"UTC\")\n\n for new_table in [new_table1, new_table2]:\n # ensure the new table still has the pandas metadata\n assert new_table.schema.pandas_metadata is not None\n # convert to pandas\n result = new_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\nclass TestConvertPrimitiveTypes:\n \"\"\"\n Conversion tests for primitive (e.g. numeric) types.\n \"\"\"\n\n def test_float_no_nulls(self):\n data = {}\n fields = []\n dtypes = [('f2', pa.float16()),\n ('f4', pa.float32()),\n ('f8', pa.float64())]\n num_values = 100\n\n for numpy_dtype, arrow_dtype in dtypes:\n values = np.random.randn(num_values)\n data[numpy_dtype] = values.astype(numpy_dtype)\n fields.append(pa.field(numpy_dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_float_nulls(self):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n dtypes = [('f2', pa.float16()),\n ('f4', pa.float32()),\n ('f8', pa.float64())]\n names = ['f2', 'f4', 'f8']\n expected_cols = []\n\n arrays = []\n fields = []\n for name, arrow_dtype in dtypes:\n values = np.random.randn(num_values).astype(name)\n\n arr = pa.array(values, from_pandas=True, mask=null_mask)\n arrays.append(arr)\n fields.append(pa.field(name, arrow_dtype))\n values[null_mask] = np.nan\n\n expected_cols.append(values)\n\n ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),\n columns=names)\n\n table = pa.Table.from_arrays(arrays, names)\n assert table.schema.equals(pa.schema(fields))\n result = table.to_pandas()\n tm.assert_frame_equal(result, ex_frame)\n\n def test_float_nulls_to_ints(self):\n # ARROW-2135\n df = pd.DataFrame({\"a\": [1.0, 2.0, np.NaN]})\n schema = pa.schema([pa.field(\"a\", pa.int16(), nullable=True)])\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n assert table[0].to_pylist() == [1, 2, None]\n tm.assert_frame_equal(df, table.to_pandas())\n\n def test_float_nulls_to_boolean(self):\n s = pd.Series([0.0, 1.0, 2.0, None, -3.0])\n expected = pd.Series([False, True, True, None, True])\n _check_array_roundtrip(s, expected=expected, type=pa.bool_())\n\n def test_series_from_pandas_false_respected(self):\n # Check that explicit from_pandas=False is respected\n s = pd.Series([0.0, np.nan])\n arr = pa.array(s, from_pandas=False)\n assert arr.null_count == 0\n assert np.isnan(arr[1].as_py())\n\n def test_integer_no_nulls(self):\n data = OrderedDict()\n fields = []\n\n numpy_dtypes = [\n ('i1', pa.int8()), ('i2', pa.int16()),\n ('i4', pa.int32()), ('i8', pa.int64()),\n ('u1', pa.uint8()), ('u2', pa.uint16()),\n ('u4', pa.uint32()), ('u8', pa.uint64()),\n ('longlong', pa.int64()), ('ulonglong', pa.uint64())\n ]\n num_values = 100\n\n for dtype, arrow_dtype in numpy_dtypes:\n info = np.iinfo(dtype)\n values = np.random.randint(max(info.min, np.iinfo(np.int_).min),\n min(info.max, np.iinfo(np.int_).max),\n size=num_values)\n data[dtype] = values.astype(dtype)\n fields.append(pa.field(dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_integer_types(self):\n # Test all Numpy integer aliases\n data = OrderedDict()\n numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',\n 'int_', 'uint', 'longlong', 'ulonglong']\n for dtype in numpy_dtypes:\n data[dtype] = np.arange(12, dtype=dtype)\n df = pd.DataFrame(data)\n _check_pandas_roundtrip(df)\n\n # Do the same with pa.array()\n # (for some reason, it doesn't use the same code paths at all)\n for np_arr in data.values():\n arr = pa.array(np_arr)\n assert arr.to_pylist() == np_arr.tolist()\n\n def test_integer_byteorder(self):\n # Byteswapped arrays are not supported yet\n int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n for dt in int_dtypes:\n for order in '=<>':\n data = np.array([1, 2, 42], dtype=order + dt)\n for np_arr in (data, data[::2]):\n if data.dtype.isnative:\n arr = pa.array(data)\n assert arr.to_pylist() == data.tolist()\n else:\n with pytest.raises(NotImplementedError):\n arr = pa.array(data)\n\n def test_integer_with_nulls(self):\n # pandas requires upcast to float dtype\n\n int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n\n expected_cols = []\n arrays = []\n for name in int_dtypes:\n values = np.random.randint(0, 100, size=num_values)\n\n arr = pa.array(values, mask=null_mask)\n arrays.append(arr)\n\n expected = values.astype('f8')\n expected[null_mask] = np.nan\n\n expected_cols.append(expected)\n\n ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),\n columns=int_dtypes)\n\n table = pa.Table.from_arrays(arrays, int_dtypes)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_array_from_pandas_type_cast(self):\n arr = np.arange(10, dtype='int64')\n\n target_type = pa.int8()\n\n result = pa.array(arr, type=target_type)\n expected = pa.array(arr.astype('int8'))\n assert result.equals(expected)\n\n def test_boolean_no_nulls(self):\n num_values = 100\n\n np.random.seed(0)\n\n df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_boolean_nulls(self):\n # pandas requires upcast to object dtype\n num_values = 100\n np.random.seed(0)\n\n mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 10, size=num_values) < 5\n\n arr = pa.array(values, mask=mask)\n\n expected = values.astype(object)\n expected[mask] = None\n\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n ex_frame = pd.DataFrame({'bools': expected})\n\n table = pa.Table.from_arrays([arr], ['bools'])\n assert table.schema.equals(schema)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_boolean_to_int(self):\n # test from dtype=bool\n s = pd.Series([True, True, False, True, True] * 2)\n expected = pd.Series([1, 1, 0, 1, 1] * 2)\n _check_array_roundtrip(s, expected=expected, type=pa.int64())\n\n def test_boolean_objects_to_int(self):\n # test from dtype=object\n s = pd.Series([True, True, False, True, True] * 2, dtype=object)\n expected = pd.Series([1, 1, 0, 1, 1] * 2)\n expected_msg = 'Expected integer, got bool'\n with pytest.raises(pa.ArrowTypeError, match=expected_msg):\n _check_array_roundtrip(s, expected=expected, type=pa.int64())\n\n def test_boolean_nulls_to_float(self):\n # test from dtype=object\n s = pd.Series([True, True, False, None, True] * 2)\n expected = pd.Series([1.0, 1.0, 0.0, None, 1.0] * 2)\n _check_array_roundtrip(s, expected=expected, type=pa.float64())\n\n def test_boolean_multiple_columns(self):\n # ARROW-6325 (multiple columns resulting in strided conversion)\n df = pd.DataFrame(np.ones((3, 2), dtype='bool'), columns=['a', 'b'])\n _check_pandas_roundtrip(df)\n\n def test_float_object_nulls(self):\n arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)\n df = pd.DataFrame({'floats': arr})\n expected = pd.DataFrame({'floats': pd.to_numeric(arr)})\n field = pa.field('floats', pa.float64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_float_with_null_as_integer(self):\n # ARROW-2298\n s = pd.Series([np.nan, 1., 2., np.nan])\n\n types = [pa.int8(), pa.int16(), pa.int32(), pa.int64(),\n pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]\n for ty in types:\n result = pa.array(s, type=ty)\n expected = pa.array([None, 1, 2, None], type=ty)\n assert result.equals(expected)\n\n df = pd.DataFrame({'has_nulls': s})\n schema = pa.schema([pa.field('has_nulls', ty)])\n result = pa.Table.from_pandas(df, schema=schema,\n preserve_index=False)\n assert result[0].chunk(0).equals(expected)\n\n def test_int_object_nulls(self):\n arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)\n df = pd.DataFrame({'ints': arr})\n expected = pd.DataFrame({'ints': pd.to_numeric(arr)})\n field = pa.field('ints', pa.int64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_boolean_object_nulls(self):\n arr = np.array([False, None, True] * 100, dtype=object)\n df = pd.DataFrame({'bools': arr})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_nulls_cast_numeric(self):\n arr = np.array([None], dtype=object)\n\n def _check_type(t):\n a2 = pa.array(arr, type=t)\n assert a2.type == t\n assert a2[0].as_py() is None\n\n _check_type(pa.int32())\n _check_type(pa.float64())\n\n def test_half_floats_from_numpy(self):\n arr = np.array([1.5, np.nan], dtype=np.float16)\n a = pa.array(arr, type=pa.float16())\n x, y = a.to_pylist()\n assert isinstance(x, np.float16)\n assert x == 1.5\n assert isinstance(y, np.float16)\n assert np.isnan(y)\n\n a = pa.array(arr, type=pa.float16(), from_pandas=True)\n x, y = a.to_pylist()\n assert isinstance(x, np.float16)\n assert x == 1.5\n assert y is None\n\n\[email protected]('dtype',\n ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])\ndef test_array_integer_object_nulls_option(dtype):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 100, size=num_values, dtype=dtype)\n\n array = pa.array(values, mask=null_mask)\n\n if null_mask.any():\n expected = values.astype('O')\n expected[null_mask] = None\n else:\n expected = values\n\n result = array.to_pandas(integer_object_nulls=True)\n\n np.testing.assert_equal(result, expected)\n\n\[email protected]('dtype',\n ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])\ndef test_table_integer_object_nulls_option(dtype):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 100, size=num_values, dtype=dtype)\n\n array = pa.array(values, mask=null_mask)\n\n if null_mask.any():\n expected = values.astype('O')\n expected[null_mask] = None\n else:\n expected = values\n\n expected = pd.DataFrame({dtype: expected})\n\n table = pa.Table.from_arrays([array], [dtype])\n result = table.to_pandas(integer_object_nulls=True)\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestConvertDateTimeLikeTypes:\n \"\"\"\n Conversion tests for datetime- and timestamp-like types (date64, etc.).\n \"\"\"\n\n def test_timestamps_notimezone_no_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_notimezone_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_with_timezone(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123',\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n })\n df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern')\n _check_pandas_roundtrip(df)\n\n _check_series_roundtrip(df['datetime64'])\n\n # drop-in a null and ns instead of ms\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern')\n\n _check_pandas_roundtrip(df)\n\n def test_python_datetime(self):\n # ARROW-2106\n date_array = [datetime.today() + timedelta(days=x) for x in range(10)]\n df = pd.DataFrame({\n 'datetime': pd.Series(date_array, dtype=object)\n })\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\n 'datetime': date_array\n })\n tm.assert_frame_equal(expected_df, result)\n\n def test_python_datetime_with_pytz_tzinfo(self):\n for tz in [pytz.utc, pytz.timezone('US/Eastern'), pytz.FixedOffset(1)]:\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)]\n df = pd.DataFrame({'datetime': values})\n _check_pandas_roundtrip(df)\n\n @h.given(st.none() | tzst.timezones())\n def test_python_datetime_with_pytz_timezone(self, tz):\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)]\n df = pd.DataFrame({'datetime': values})\n _check_pandas_roundtrip(df)\n\n def test_python_datetime_with_timezone_tzinfo(self):\n from datetime import timezone\n\n if Version(pd.__version__) > Version(\"0.25.0\"):\n # older pandas versions fail on datetime.timezone.utc (as in input)\n # vs pytz.UTC (as in result)\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=timezone.utc)]\n # also test with index to ensure both paths roundtrip (ARROW-9962)\n df = pd.DataFrame({'datetime': values}, index=values)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n # datetime.timezone is going to be pytz.FixedOffset\n hours = 1\n tz_timezone = timezone(timedelta(hours=hours))\n tz_pytz = pytz.FixedOffset(hours * 60)\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_timezone)]\n values_exp = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_pytz)]\n df = pd.DataFrame({'datetime': values}, index=values)\n df_exp = pd.DataFrame({'datetime': values_exp}, index=values_exp)\n _check_pandas_roundtrip(df, expected=df_exp, preserve_index=True)\n\n def test_python_datetime_subclass(self):\n\n class MyDatetime(datetime):\n # see https://github.com/pandas-dev/pandas/issues/21142\n nanosecond = 0.0\n\n date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]\n df = pd.DataFrame({\"datetime\": pd.Series(date_array, dtype=object)})\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\"datetime\": date_array})\n\n # https://github.com/pandas-dev/pandas/issues/21142\n expected_df[\"datetime\"] = pd.to_datetime(expected_df[\"datetime\"])\n\n tm.assert_frame_equal(expected_df, result)\n\n def test_python_date_subclass(self):\n\n class MyDate(date):\n pass\n\n date_array = [MyDate(2000, 1, 1)]\n df = pd.DataFrame({\"date\": pd.Series(date_array, dtype=object)})\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].chunk(0), pa.Date32Array)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame(\n {\"date\": np.array([date(2000, 1, 1)], dtype=object)}\n )\n tm.assert_frame_equal(expected_df, result)\n\n def test_datetime64_to_date32(self):\n # ARROW-1718\n arr = pa.array([date(2017, 10, 23), None])\n c = pa.chunked_array([arr])\n s = c.to_pandas()\n\n arr2 = pa.Array.from_pandas(s, type=pa.date32())\n\n assert arr2.equals(arr.cast('date32'))\n\n @pytest.mark.parametrize('mask', [\n None,\n np.array([True, False, False, True, False, False]),\n ])\n def test_pandas_datetime_to_date64(self, mask):\n s = pd.to_datetime([\n '2018-05-10T00:00:00',\n '2018-05-11T00:00:00',\n '2018-05-12T00:00:00',\n '2018-05-10T10:24:01',\n '2018-05-11T10:24:01',\n '2018-05-12T10:24:01',\n ])\n arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)\n\n data = np.array([\n date(2018, 5, 10),\n date(2018, 5, 11),\n date(2018, 5, 12),\n date(2018, 5, 10),\n date(2018, 5, 11),\n date(2018, 5, 12),\n ])\n expected = pa.array(data, mask=mask, type=pa.date64())\n\n assert arr.equals(expected)\n\n def test_array_types_date_as_object(self):\n data = [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]\n expected_d = np.array(['2000-01-01', None, '1970-01-01',\n '2040-02-26'], dtype='datetime64[D]')\n\n expected_ns = np.array(['2000-01-01', None, '1970-01-01',\n '2040-02-26'], dtype='datetime64[ns]')\n\n objects = [pa.array(data),\n pa.chunked_array([data])]\n\n for obj in objects:\n result = obj.to_pandas()\n expected_obj = expected_d.astype(object)\n assert result.dtype == expected_obj.dtype\n npt.assert_array_equal(result, expected_obj)\n\n result = obj.to_pandas(date_as_object=False)\n assert result.dtype == expected_ns.dtype\n npt.assert_array_equal(result, expected_ns)\n\n def test_table_convert_date_as_object(self):\n df = pd.DataFrame({\n 'date': [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]})\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n df_datetime = table.to_pandas(date_as_object=False)\n df_object = table.to_pandas()\n\n tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,\n check_dtype=True)\n tm.assert_frame_equal(df, df_object, check_dtype=True)\n\n def test_date_infer(self):\n df = pd.DataFrame({\n 'date': [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]})\n table = pa.Table.from_pandas(df, preserve_index=False)\n field = pa.field('date', pa.date32())\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = pa.schema([field], metadata=table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n def test_date_mask(self):\n arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],\n dtype='datetime64[D]')\n mask = [True, False]\n result = pa.array(arr, mask=np.array(mask))\n expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')\n expected = pa.array(expected, from_pandas=True)\n assert expected.equals(result)\n\n def test_date_objects_typed(self):\n arr = np.array([\n date(2017, 4, 3),\n None,\n date(2017, 4, 4),\n date(2017, 4, 5)], dtype=object)\n\n arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')\n arr_i8 = arr_i4.astype('int64') * 86400000\n mask = np.array([False, True, False, False])\n\n t32 = pa.date32()\n t64 = pa.date64()\n\n a32 = pa.array(arr, type=t32)\n a64 = pa.array(arr, type=t64)\n\n a32_expected = pa.array(arr_i4, mask=mask, type=t32)\n a64_expected = pa.array(arr_i8, mask=mask, type=t64)\n\n assert a32.equals(a32_expected)\n assert a64.equals(a64_expected)\n\n # Test converting back to pandas\n colnames = ['date32', 'date64']\n table = pa.Table.from_arrays([a32, a64], colnames)\n\n ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',\n '2017-04-05'],\n dtype='datetime64[D]'))\n ex_values[1] = pd.NaT.value\n\n ex_datetime64ns = ex_values.astype('datetime64[ns]')\n expected_pandas = pd.DataFrame({'date32': ex_datetime64ns,\n 'date64': ex_datetime64ns},\n columns=colnames)\n table_pandas = table.to_pandas(date_as_object=False)\n tm.assert_frame_equal(table_pandas, expected_pandas)\n\n table_pandas_objects = table.to_pandas()\n ex_objects = ex_values.astype('object')\n expected_pandas_objects = pd.DataFrame({'date32': ex_objects,\n 'date64': ex_objects},\n columns=colnames)\n tm.assert_frame_equal(table_pandas_objects,\n expected_pandas_objects)\n\n def test_pandas_null_values(self):\n # ARROW-842\n pd_NA = getattr(pd, 'NA', None)\n values = np.array([datetime(2000, 1, 1), pd.NaT, pd_NA], dtype=object)\n values_with_none = np.array([datetime(2000, 1, 1), None, None],\n dtype=object)\n result = pa.array(values, from_pandas=True)\n expected = pa.array(values_with_none, from_pandas=True)\n assert result.equals(expected)\n assert result.null_count == 2\n\n # ARROW-9407\n assert pa.array([pd.NaT], from_pandas=True).type == pa.null()\n assert pa.array([pd_NA], from_pandas=True).type == pa.null()\n\n def test_dates_from_integers(self):\n t1 = pa.date32()\n t2 = pa.date64()\n\n arr = np.array([17259, 17260, 17261], dtype='int32')\n arr2 = arr.astype('int64') * 86400000\n\n a1 = pa.array(arr, type=t1)\n a2 = pa.array(arr2, type=t2)\n\n expected = date(2017, 4, 3)\n assert a1[0].as_py() == expected\n assert a2[0].as_py() == expected\n\n def test_pytime_from_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356)]\n\n # microseconds\n t1 = pa.time64('us')\n\n aobjs = np.array(pytimes + [None], dtype=object)\n parr = pa.array(aobjs)\n assert parr.type == t1\n assert parr[0].as_py() == pytimes[0]\n assert parr[1].as_py() == pytimes[1]\n assert parr[2].as_py() is None\n\n # DataFrame\n df = pd.DataFrame({'times': aobjs})\n batch = pa.RecordBatch.from_pandas(df)\n assert batch[0].equals(parr)\n\n # Test ndarray of int64 values\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n a1 = pa.array(arr, type=pa.time64('us'))\n assert a1[0].as_py() == pytimes[0]\n\n a2 = pa.array(arr * 1000, type=pa.time64('ns'))\n assert a2[0].as_py() == pytimes[0]\n\n a3 = pa.array((arr / 1000).astype('i4'),\n type=pa.time32('ms'))\n assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)\n\n a4 = pa.array((arr / 1000000).astype('i4'),\n type=pa.time32('s'))\n assert a4[0].as_py() == pytimes[0].replace(microsecond=0)\n\n def test_arrow_time_to_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356),\n time(0, 0, 0)]\n\n expected = np.array(pytimes[:2] + [None])\n expected_ms = np.array([x.replace(microsecond=1000)\n for x in pytimes[:2]] +\n [None])\n expected_s = np.array([x.replace(microsecond=0)\n for x in pytimes[:2]] +\n [None])\n\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n null_mask = np.array([False, False, True], dtype=bool)\n\n a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))\n a2 = pa.array(arr * 1000, mask=null_mask,\n type=pa.time64('ns'))\n\n a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,\n type=pa.time32('ms'))\n a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,\n type=pa.time32('s'))\n\n names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']\n batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)\n\n for arr, expected_values in [(a1, expected),\n (a2, expected),\n (a3, expected_ms),\n (a4, expected_s)]:\n result_pandas = arr.to_pandas()\n assert (result_pandas.values == expected_values).all()\n\n df = batch.to_pandas()\n expected_df = pd.DataFrame({'time64[us]': expected,\n 'time64[ns]': expected,\n 'time32[ms]': expected_ms,\n 'time32[s]': expected_s},\n columns=names)\n\n tm.assert_frame_equal(df, expected_df)\n\n def test_numpy_datetime64_columns(self):\n datetime64_ns = np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n _check_array_from_pandas_roundtrip(datetime64_ns)\n\n datetime64_us = np.array([\n '2007-07-13T01:23:34.123456',\n None,\n '2006-01-13T12:34:56.432539',\n '2010-08-13T05:46:57.437699'],\n dtype='datetime64[us]')\n _check_array_from_pandas_roundtrip(datetime64_us)\n\n datetime64_ms = np.array([\n '2007-07-13T01:23:34.123',\n None,\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n _check_array_from_pandas_roundtrip(datetime64_ms)\n\n datetime64_s = np.array([\n '2007-07-13T01:23:34',\n None,\n '2006-01-13T12:34:56',\n '2010-08-13T05:46:57'],\n dtype='datetime64[s]')\n _check_array_from_pandas_roundtrip(datetime64_s)\n\n def test_timestamp_to_pandas_ns(self):\n # non-ns timestamp gets cast to ns on conversion to pandas\n arr = pa.array([1, 2, 3], pa.timestamp('ms'))\n expected = pd.Series(pd.to_datetime([1, 2, 3], unit='ms'))\n s = arr.to_pandas()\n tm.assert_series_equal(s, expected)\n arr = pa.chunked_array([arr])\n s = arr.to_pandas()\n tm.assert_series_equal(s, expected)\n\n def test_timestamp_to_pandas_out_of_bounds(self):\n # ARROW-7758 check for out of bounds timestamps for non-ns timestamps\n\n for unit in ['s', 'ms', 'us']:\n for tz in [None, 'America/New_York']:\n arr = pa.array([datetime(1, 1, 1)], pa.timestamp(unit, tz=tz))\n table = pa.table({'a': arr})\n\n msg = \"would result in out of bounds timestamp\"\n with pytest.raises(ValueError, match=msg):\n arr.to_pandas()\n\n with pytest.raises(ValueError, match=msg):\n table.to_pandas()\n\n with pytest.raises(ValueError, match=msg):\n # chunked array\n table.column('a').to_pandas()\n\n # just ensure those don't give an error, but do not\n # check actual garbage output\n arr.to_pandas(safe=False)\n table.to_pandas(safe=False)\n table.column('a').to_pandas(safe=False)\n\n def test_timestamp_to_pandas_empty_chunked(self):\n # ARROW-7907 table with chunked array with 0 chunks\n table = pa.table({'a': pa.chunked_array([], type=pa.timestamp('us'))})\n result = table.to_pandas()\n expected = pd.DataFrame({'a': pd.Series([], dtype=\"datetime64[ns]\")})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])\n def test_numpy_datetime64_day_unit(self, dtype):\n datetime64_d = np.array([\n '2007-07-13',\n None,\n '2006-01-15',\n '2010-08-19'],\n dtype='datetime64[D]')\n _check_array_from_pandas_roundtrip(datetime64_d, type=dtype)\n\n def test_array_from_pandas_date_with_mask(self):\n m = np.array([True, False, True])\n data = pd.Series([\n date(1990, 1, 1),\n date(1991, 1, 1),\n date(1992, 1, 1)\n ])\n\n result = pa.Array.from_pandas(data, mask=m)\n\n expected = pd.Series([None, date(1991, 1, 1), None])\n assert pa.Array.from_pandas(expected).equals(result)\n\n @pytest.mark.skipif(\n Version('1.16.0') <= Version(np.__version__) < Version('1.16.1'),\n reason='Until numpy/numpy#12745 is resolved')\n def test_fixed_offset_timezone(self):\n df = pd.DataFrame({\n 'a': [\n pd.Timestamp('2012-11-11 00:00:00+01:00'),\n pd.NaT\n ]\n })\n _check_pandas_roundtrip(df)\n _check_serialize_components_roundtrip(df)\n\n def test_timedeltas_no_nulls(self):\n df = pd.DataFrame({\n 'timedelta64': np.array([0, 3600000000000, 7200000000000],\n dtype='timedelta64[ns]')\n })\n field = pa.field('timedelta64', pa.duration('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timedeltas_nulls(self):\n df = pd.DataFrame({\n 'timedelta64': np.array([0, None, 7200000000000],\n dtype='timedelta64[ns]')\n })\n field = pa.field('timedelta64', pa.duration('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_month_day_nano_interval(self):\n from pandas.tseries.offsets import DateOffset\n df = pd.DataFrame({\n 'date_offset': [None,\n DateOffset(days=3600, months=3600, microseconds=3,\n nanoseconds=600)]\n })\n schema = pa.schema([('date_offset', pa.month_day_nano_interval())])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema)\n\n\n# ----------------------------------------------------------------------\n# Conversion tests for string and binary types.\n\n\nclass TestConvertStringLikeTypes:\n\n def test_pandas_unicode(self):\n repeats = 1000\n values = ['foo', None, 'bar', 'mañana', np.nan]\n df = pd.DataFrame({'strings': values * repeats})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_bytes_to_binary(self):\n values = ['qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]\n df = pd.DataFrame({'strings': values})\n\n table = pa.Table.from_pandas(df)\n assert table[0].type == pa.binary()\n\n values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]\n expected = pd.DataFrame({'strings': values2})\n _check_pandas_roundtrip(df, expected)\n\n @pytest.mark.large_memory\n def test_bytes_exceed_2gb(self):\n v1 = b'x' * 100000000\n v2 = b'x' * 147483646\n\n # ARROW-2227, hit exactly 2GB on the nose\n df = pd.DataFrame({\n 'strings': [v1] * 20 + [v2] + ['x'] * 20\n })\n arr = pa.array(df['strings'])\n assert isinstance(arr, pa.ChunkedArray)\n assert arr.num_chunks == 2\n arr = None\n\n table = pa.Table.from_pandas(df)\n assert table[0].num_chunks == 2\n\n @pytest.mark.large_memory\n @pytest.mark.parametrize('char', ['x', b'x'])\n def test_auto_chunking_pandas_series_of_strings(self, char):\n # ARROW-2367\n v1 = char * 100000000\n v2 = char * 147483646\n\n df = pd.DataFrame({\n 'strings': [[v1]] * 20 + [[v2]] + [[b'x']]\n })\n arr = pa.array(df['strings'], from_pandas=True)\n assert isinstance(arr, pa.ChunkedArray)\n assert arr.num_chunks == 2\n assert len(arr.chunk(0)) == 21\n assert len(arr.chunk(1)) == 1\n\n def test_fixed_size_bytes(self):\n values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema[0].type == schema[0].type\n assert table.schema[0].name == schema[0].name\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n def test_fixed_size_bytes_does_not_accept_varying_lengths(self):\n values = [b'foo', None, b'ba', None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n with pytest.raises(pa.ArrowInvalid):\n pa.Table.from_pandas(df, schema=schema)\n\n def test_variable_size_bytes(self):\n s = pd.Series([b'123', b'', b'a', None])\n _check_series_roundtrip(s, type_=pa.binary())\n\n def test_binary_from_bytearray(self):\n s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),\n None])\n # Explicitly set type\n _check_series_roundtrip(s, type_=pa.binary())\n # Infer type from bytearrays\n _check_series_roundtrip(s, expected_pa_type=pa.binary())\n\n def test_large_binary(self):\n s = pd.Series([b'123', b'', b'a', None])\n _check_series_roundtrip(s, type_=pa.large_binary())\n df = pd.DataFrame({'a': s})\n _check_pandas_roundtrip(\n df, schema=pa.schema([('a', pa.large_binary())]))\n\n def test_large_string(self):\n s = pd.Series(['123', '', 'a', None])\n _check_series_roundtrip(s, type_=pa.large_string())\n df = pd.DataFrame({'a': s})\n _check_pandas_roundtrip(\n df, schema=pa.schema([('a', pa.large_string())]))\n\n def test_table_empty_str(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result1 = table.to_pandas(strings_to_categorical=False)\n expected1 = pd.DataFrame({'strings': values})\n tm.assert_frame_equal(result1, expected1, check_dtype=True)\n\n result2 = table.to_pandas(strings_to_categorical=True)\n expected2 = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result2, expected2, check_dtype=True)\n\n def test_selective_categoricals(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n expected_str = pd.DataFrame({'strings': values})\n expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})\n\n result1 = table.to_pandas(categories=['strings'])\n tm.assert_frame_equal(result1, expected_cat, check_dtype=True)\n result2 = table.to_pandas(categories=[])\n tm.assert_frame_equal(result2, expected_str, check_dtype=True)\n result3 = table.to_pandas(categories=('strings',))\n tm.assert_frame_equal(result3, expected_cat, check_dtype=True)\n result4 = table.to_pandas(categories=tuple())\n tm.assert_frame_equal(result4, expected_str, check_dtype=True)\n\n def test_to_pandas_categorical_zero_length(self):\n # ARROW-3586\n array = pa.array([], type=pa.int32())\n table = pa.Table.from_arrays(arrays=[array], names=['col'])\n # This would segfault under 0.11.0\n table.to_pandas(categories=['col'])\n\n def test_to_pandas_categories_already_dictionary(self):\n # Showed up in ARROW-6434, ARROW-6435\n array = pa.array(['foo', 'foo', 'foo', 'bar']).dictionary_encode()\n table = pa.Table.from_arrays(arrays=[array], names=['col'])\n result = table.to_pandas(categories=['col'])\n assert table.to_pandas().equals(result)\n\n def test_table_str_to_categorical_without_na(self):\n values = ['a', 'a', 'b', 'b', 'c']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n def test_table_str_to_categorical_with_na(self):\n values = [None, 'a', 'b', np.nan]\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n # Regression test for ARROW-2101\n def test_array_of_bytes_to_strings(self):\n converted = pa.array(np.array([b'x'], dtype=object), pa.string())\n assert converted.type == pa.string()\n\n # Make sure that if an ndarray of bytes is passed to the array\n # constructor and the type is string, it will fail if those bytes\n # cannot be converted to utf-8\n def test_array_of_bytes_to_strings_bad_data(self):\n with pytest.raises(\n pa.lib.ArrowInvalid,\n match=\"was not a utf8 string\"):\n pa.array(np.array([b'\\x80\\x81'], dtype=object), pa.string())\n\n def test_numpy_string_array_to_fixed_size_binary(self):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')\n\n converted = pa.array(arr, type=pa.binary(3))\n expected = pa.array(list(arr), type=pa.binary(3))\n assert converted.equals(expected)\n\n mask = np.array([False, True, False])\n converted = pa.array(arr, type=pa.binary(3), mask=mask)\n expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))\n assert converted.equals(expected)\n\n with pytest.raises(pa.lib.ArrowInvalid,\n match=r'Got bytestring of length 3 \\(expected 4\\)'):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')\n pa.array(arr, type=pa.binary(4))\n\n with pytest.raises(\n pa.lib.ArrowInvalid,\n match=r'Got bytestring of length 12 \\(expected 3\\)'):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')\n pa.array(arr, type=pa.binary(3))\n\n\nclass TestConvertDecimalTypes:\n \"\"\"\n Conversion test for decimal types.\n \"\"\"\n decimal32 = [\n decimal.Decimal('-1234.123'),\n decimal.Decimal('1234.439')\n ]\n decimal64 = [\n decimal.Decimal('-129934.123331'),\n decimal.Decimal('129534.123731')\n ]\n decimal128 = [\n decimal.Decimal('394092382910493.12341234678'),\n decimal.Decimal('-314292388910493.12343437128')\n ]\n\n @pytest.mark.parametrize(('values', 'expected_type'), [\n pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),\n pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),\n pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')\n ])\n def test_decimal_from_pandas(self, values, expected_type):\n expected = pd.DataFrame({'decimals': values})\n table = pa.Table.from_pandas(expected, preserve_index=False)\n field = pa.field('decimals', expected_type)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = pa.schema([field], metadata=table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n @pytest.mark.parametrize('values', [\n pytest.param(decimal32, id='decimal32'),\n pytest.param(decimal64, id='decimal64'),\n pytest.param(decimal128, id='decimal128')\n ])\n def test_decimal_to_pandas(self, values):\n expected = pd.DataFrame({'decimals': values})\n converted = pa.Table.from_pandas(expected)\n df = converted.to_pandas()\n tm.assert_frame_equal(df, expected)\n\n def test_decimal_fails_with_truncation(self):\n data1 = [decimal.Decimal('1.234')]\n type1 = pa.decimal128(10, 2)\n with pytest.raises(pa.ArrowInvalid):\n pa.array(data1, type=type1)\n\n data2 = [decimal.Decimal('1.2345')]\n type2 = pa.decimal128(10, 3)\n with pytest.raises(pa.ArrowInvalid):\n pa.array(data2, type=type2)\n\n def test_decimal_with_different_precisions(self):\n data = [\n decimal.Decimal('0.01'),\n decimal.Decimal('0.001'),\n ]\n series = pd.Series(data)\n array = pa.array(series)\n assert array.to_pylist() == data\n assert array.type == pa.decimal128(3, 3)\n\n array = pa.array(data, type=pa.decimal128(12, 5))\n expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]\n assert array.to_pylist() == expected\n\n def test_decimal_with_None_explicit_type(self):\n series = pd.Series([decimal.Decimal('3.14'), None])\n _check_series_roundtrip(series, type_=pa.decimal128(12, 5))\n\n # Test that having all None values still produces decimal array\n series = pd.Series([None] * 2)\n _check_series_roundtrip(series, type_=pa.decimal128(12, 5))\n\n def test_decimal_with_None_infer_type(self):\n series = pd.Series([decimal.Decimal('3.14'), None])\n _check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))\n\n def test_strided_objects(self, tmpdir):\n # see ARROW-3053\n data = {\n 'a': {0: 'a'},\n 'b': {0: decimal.Decimal('0.0')}\n }\n\n # This yields strided objects\n df = pd.DataFrame.from_dict(data)\n _check_pandas_roundtrip(df)\n\n\nclass TestConvertListTypes:\n \"\"\"\n Conversion tests for list<> types.\n \"\"\"\n\n def test_column_of_arrays(self):\n df, schema = dataframe_with_arrays()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = schema.with_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n for column in df.columns:\n field = schema.field(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_arrays_to_py(self):\n # Test regression in ARROW-1199 not caught in above test\n dtype = 'i1'\n arr = np.array([\n np.arange(10, dtype=dtype),\n np.arange(5, dtype=dtype),\n None,\n np.arange(1, dtype=dtype)\n ], dtype=object)\n type_ = pa.list_(pa.int8())\n parr = pa.array(arr, type=type_)\n\n assert parr[0].as_py() == list(range(10))\n assert parr[1].as_py() == list(range(5))\n assert parr[2].as_py() is None\n assert parr[3].as_py() == [0]\n\n def test_column_of_boolean_list(self):\n # ARROW-4370: Table to pandas conversion fails for list of bool\n array = pa.array([[True, False], [True]], type=pa.list_(pa.bool_()))\n table = pa.Table.from_arrays([array], names=['col1'])\n df = table.to_pandas()\n\n expected_df = pd.DataFrame({'col1': [[True, False], [True]]})\n tm.assert_frame_equal(df, expected_df)\n\n s = table[0].to_pandas()\n tm.assert_series_equal(pd.Series(s), df['col1'], check_names=False)\n\n def test_column_of_decimal_list(self):\n array = pa.array([[decimal.Decimal('1'), decimal.Decimal('2')],\n [decimal.Decimal('3.3')]],\n type=pa.list_(pa.decimal128(2, 1)))\n table = pa.Table.from_arrays([array], names=['col1'])\n df = table.to_pandas()\n\n expected_df = pd.DataFrame(\n {'col1': [[decimal.Decimal('1'), decimal.Decimal('2')],\n [decimal.Decimal('3.3')]]})\n tm.assert_frame_equal(df, expected_df)\n\n def test_nested_types_from_ndarray_null_entries(self):\n # Root cause of ARROW-6435\n s = pd.Series(np.array([np.nan, np.nan], dtype=object))\n\n for ty in [pa.list_(pa.int64()),\n pa.large_list(pa.int64()),\n pa.struct([pa.field('f0', 'int32')])]:\n result = pa.array(s, type=ty)\n expected = pa.array([None, None], type=ty)\n assert result.equals(expected)\n\n with pytest.raises(TypeError):\n pa.array(s.values, type=ty)\n\n def test_column_of_lists(self):\n df, schema = dataframe_with_lists()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = schema.with_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n for column in df.columns:\n field = schema.field(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_lists_first_empty(self):\n # ARROW-2124\n num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]\n series = pd.Series([np.array(s, dtype=float) for s in num_lists])\n arr = pa.array(series)\n result = pd.Series(arr.to_pandas())\n tm.assert_series_equal(result, series)\n\n def test_column_of_lists_chunked(self):\n # ARROW-1357\n df = pd.DataFrame({\n 'lists': np.array([\n [1, 2],\n None,\n [2, 3],\n [4, 5],\n [6, 7],\n [8, 9]\n ], dtype=object)\n })\n\n schema = pa.schema([\n pa.field('lists', pa.list_(pa.int64()))\n ])\n\n t1 = pa.Table.from_pandas(df[:2], schema=schema)\n t2 = pa.Table.from_pandas(df[2:], schema=schema)\n\n table = pa.concat_tables([t1, t2])\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_empty_column_of_lists_chunked(self):\n df = pd.DataFrame({\n 'lists': np.array([], dtype=object)\n })\n\n schema = pa.schema([\n pa.field('lists', pa.list_(pa.int64()))\n ])\n\n table = pa.Table.from_pandas(df, schema=schema)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_column_of_lists_chunked2(self):\n data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],\n [12, 13], [14, 15], [16, 17]]\n data2 = [[8, 9], [18, 19]]\n\n a1 = pa.array(data1)\n a2 = pa.array(data2)\n\n t1 = pa.Table.from_arrays([a1], names=['a'])\n t2 = pa.Table.from_arrays([a2], names=['a'])\n\n concatenated = pa.concat_tables([t1, t2])\n\n result = concatenated.to_pandas()\n expected = pd.DataFrame({'a': data1 + data2})\n\n tm.assert_frame_equal(result, expected)\n\n def test_column_of_lists_strided(self):\n df, schema = dataframe_with_lists()\n df = pd.concat([df] * 6, ignore_index=True)\n\n arr = df['int64'].values[::3]\n assert arr.strides[0] != 8\n\n _check_array_roundtrip(arr)\n\n def test_nested_lists_all_none(self):\n data = np.array([[None, None], None], dtype=object)\n\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n data2 = np.array([None, None, [None, None],\n np.array([None, None], dtype=object)],\n dtype=object)\n arr = pa.array(data2)\n expected = pa.array([None, None, [None, None], [None, None]])\n assert arr.equals(expected)\n\n def test_nested_lists_all_empty(self):\n # ARROW-2128\n data = pd.Series([[], [], []])\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n def test_nested_list_first_empty(self):\n # ARROW-2711\n data = pd.Series([[], [\"a\"]])\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.string())\n\n def test_nested_smaller_ints(self):\n # ARROW-1345, ARROW-2008, there were some type inference bugs happening\n # before\n data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])\n result = pa.array(data)\n result2 = pa.array(data.values)\n expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))\n assert result.equals(expected)\n assert result2.equals(expected)\n\n data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])\n result3 = pa.array(data3)\n expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))\n assert result3.equals(expected3)\n\n def test_infer_lists(self):\n data = OrderedDict([\n ('nan_ints', [[None, 1], [2, 3]]),\n ('ints', [[0, 1], [2, 3]]),\n ('strs', [[None, 'b'], ['c', 'd']]),\n ('nested_strs', [[[None, 'b'], ['c', 'd']], None])\n ])\n df = pd.DataFrame(data)\n\n expected_schema = pa.schema([\n pa.field('nan_ints', pa.list_(pa.int64())),\n pa.field('ints', pa.list_(pa.int64())),\n pa.field('strs', pa.list_(pa.string())),\n pa.field('nested_strs', pa.list_(pa.list_(pa.string())))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n def test_fixed_size_list(self):\n # ARROW-7365\n fixed_ty = pa.list_(pa.int64(), list_size=4)\n variable_ty = pa.list_(pa.int64())\n\n data = [[0, 1, 2, 3], None, [4, 5, 6, 7], [8, 9, 10, 11]]\n fixed_arr = pa.array(data, type=fixed_ty)\n variable_arr = pa.array(data, type=variable_ty)\n\n result = fixed_arr.to_pandas()\n expected = variable_arr.to_pandas()\n\n for left, right in zip(result, expected):\n if left is None:\n assert right is None\n npt.assert_array_equal(left, right)\n\n def test_infer_numpy_array(self):\n data = OrderedDict([\n ('ints', [\n np.array([0, 1], dtype=np.int64),\n np.array([2, 3], dtype=np.int64)\n ])\n ])\n df = pd.DataFrame(data)\n expected_schema = pa.schema([\n pa.field('ints', pa.list_(pa.int64()))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n def test_to_list_of_structs_pandas(self):\n ints = pa.array([1, 2, 3], pa.int32())\n strings = pa.array([['a', 'b'], ['c', 'd'], ['e', 'f']],\n pa.list_(pa.string()))\n structs = pa.StructArray.from_arrays([ints, strings], ['f1', 'f2'])\n data = pa.ListArray.from_arrays([0, 1, 3], structs)\n\n expected = pd.Series([\n [{'f1': 1, 'f2': ['a', 'b']}],\n [{'f1': 2, 'f2': ['c', 'd']},\n {'f1': 3, 'f2': ['e', 'f']}]\n ])\n\n series = pd.Series(data.to_pandas())\n tm.assert_series_equal(series, expected)\n\n @pytest.mark.parametrize('t,data,expected', [\n (\n pa.int64,\n [[1, 2], [3], None],\n [None, [3], None]\n ),\n (\n pa.string,\n [['aaa', 'bb'], ['c'], None],\n [None, ['c'], None]\n ),\n (\n pa.null,\n [[None, None], [None], None],\n [None, [None], None]\n )\n ])\n def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):\n m = np.array([True, False, True])\n\n s = pd.Series(data)\n result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))\n\n assert pa.Array.from_pandas(expected,\n type=pa.list_(t())).equals(result)\n\n def test_empty_list_roundtrip(self):\n empty_list_array = np.empty((3,), dtype=object)\n empty_list_array.fill([])\n\n df = pd.DataFrame({'a': np.array(['1', '2', '3']),\n 'b': empty_list_array})\n tbl = pa.Table.from_pandas(df)\n\n result = tbl.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_array_from_nested_arrays(self):\n df, schema = dataframe_with_arrays()\n for field in schema:\n arr = df[field.name].values\n expected = pa.array(list(arr), type=field.type)\n result = pa.array(arr)\n assert result.type == field.type # == list<scalar>\n assert result.equals(expected)\n\n def test_nested_large_list(self):\n s = (pa.array([[[1, 2, 3], [4]], None],\n type=pa.large_list(pa.large_list(pa.int64())))\n .to_pandas())\n tm.assert_series_equal(\n s, pd.Series([[[1, 2, 3], [4]], None], dtype=object),\n check_names=False)\n\n def test_large_binary_list(self):\n for list_type_factory in (pa.list_, pa.large_list):\n s = (pa.array([[\"aa\", \"bb\"], None, [\"cc\"], []],\n type=list_type_factory(pa.large_binary()))\n .to_pandas())\n tm.assert_series_equal(\n s, pd.Series([[b\"aa\", b\"bb\"], None, [b\"cc\"], []]),\n check_names=False)\n s = (pa.array([[\"aa\", \"bb\"], None, [\"cc\"], []],\n type=list_type_factory(pa.large_string()))\n .to_pandas())\n tm.assert_series_equal(\n s, pd.Series([[\"aa\", \"bb\"], None, [\"cc\"], []]),\n check_names=False)\n\n def test_list_of_dictionary(self):\n child = pa.array([\"foo\", \"bar\", None, \"foo\"]).dictionary_encode()\n arr = pa.ListArray.from_arrays([0, 1, 3, 3, 4], child)\n\n # Expected a Series of lists\n expected = pd.Series(arr.to_pylist())\n tm.assert_series_equal(arr.to_pandas(), expected)\n\n # Same but with nulls\n arr = arr.take([0, 1, None, 3])\n expected[2] = None\n tm.assert_series_equal(arr.to_pandas(), expected)\n\n @pytest.mark.large_memory\n def test_auto_chunking_on_list_overflow(self):\n # ARROW-9976\n n = 2**21\n df = pd.DataFrame.from_dict({\n \"a\": list(np.zeros((n, 2**10), dtype='uint8')),\n \"b\": range(n)\n })\n table = pa.Table.from_pandas(df)\n\n column_a = table[0]\n assert column_a.num_chunks == 2\n assert len(column_a.chunk(0)) == 2**21 - 1\n assert len(column_a.chunk(1)) == 1\n\n def test_map_array_roundtrip(self):\n data = [[(b'a', 1), (b'b', 2)],\n [(b'c', 3)],\n [(b'd', 4), (b'e', 5), (b'f', 6)],\n [(b'g', 7)]]\n\n df = pd.DataFrame({\"map\": data})\n schema = pa.schema([(\"map\", pa.map_(pa.binary(), pa.int32()))])\n\n _check_pandas_roundtrip(df, schema=schema)\n\n def test_map_array_chunked(self):\n data1 = [[(b'a', 1), (b'b', 2)],\n [(b'c', 3)],\n [(b'd', 4), (b'e', 5), (b'f', 6)],\n [(b'g', 7)]]\n data2 = [[(k, v * 2) for k, v in row] for row in data1]\n\n arr1 = pa.array(data1, type=pa.map_(pa.binary(), pa.int32()))\n arr2 = pa.array(data2, type=pa.map_(pa.binary(), pa.int32()))\n arr = pa.chunked_array([arr1, arr2])\n\n expected = pd.Series(data1 + data2)\n actual = arr.to_pandas()\n tm.assert_series_equal(actual, expected, check_names=False)\n\n def test_map_array_with_nulls(self):\n data = [[(b'a', 1), (b'b', 2)],\n None,\n [(b'd', 4), (b'e', 5), (b'f', None)],\n [(b'g', 7)]]\n\n # None value in item array causes upcast to float\n expected = [[(k, float(v) if v is not None else None) for k, v in row]\n if row is not None else None for row in data]\n expected = pd.Series(expected)\n\n arr = pa.array(data, type=pa.map_(pa.binary(), pa.int32()))\n actual = arr.to_pandas()\n tm.assert_series_equal(actual, expected, check_names=False)\n\n def test_map_array_dictionary_encoded(self):\n offsets = pa.array([0, 3, 5])\n items = pa.array(['a', 'b', 'c', 'a', 'd']).dictionary_encode()\n keys = pa.array(list(range(len(items))))\n arr = pa.MapArray.from_arrays(offsets, keys, items)\n\n # Dictionary encoded values converted to dense\n expected = pd.Series(\n [[(0, 'a'), (1, 'b'), (2, 'c')], [(3, 'a'), (4, 'd')]])\n\n actual = arr.to_pandas()\n tm.assert_series_equal(actual, expected, check_names=False)\n\n\nclass TestConvertStructTypes:\n \"\"\"\n Conversion tests for struct types.\n \"\"\"\n\n def test_pandas_roundtrip(self):\n df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})\n\n expected_schema = pa.schema([\n ('dicts', pa.struct([('a', pa.int64()), ('b', pa.int64())])),\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n # specifying schema explicitly in from_pandas\n _check_pandas_roundtrip(\n df, schema=expected_schema, expected_schema=expected_schema)\n\n def test_to_pandas(self):\n ints = pa.array([None, 2, 3], type=pa.int64())\n strs = pa.array(['a', None, 'c'], type=pa.string())\n bools = pa.array([True, False, None], type=pa.bool_())\n arr = pa.StructArray.from_arrays(\n [ints, strs, bools],\n ['ints', 'strs', 'bools'])\n\n expected = pd.Series([\n {'ints': None, 'strs': 'a', 'bools': True},\n {'ints': 2, 'strs': None, 'bools': False},\n {'ints': 3, 'strs': 'c', 'bools': None},\n ])\n\n series = pd.Series(arr.to_pandas())\n tm.assert_series_equal(series, expected)\n\n def test_to_pandas_multiple_chunks(self):\n # ARROW-11855\n gc.collect()\n bytes_start = pa.total_allocated_bytes()\n ints1 = pa.array([1], type=pa.int64())\n ints2 = pa.array([2], type=pa.int64())\n arr1 = pa.StructArray.from_arrays([ints1], ['ints'])\n arr2 = pa.StructArray.from_arrays([ints2], ['ints'])\n arr = pa.chunked_array([arr1, arr2])\n\n expected = pd.Series([\n {'ints': 1},\n {'ints': 2}\n ])\n\n series = pd.Series(arr.to_pandas())\n tm.assert_series_equal(series, expected)\n\n del series\n del arr\n del arr1\n del arr2\n del ints1\n del ints2\n bytes_end = pa.total_allocated_bytes()\n assert bytes_end == bytes_start\n\n def test_from_numpy(self):\n dt = np.dtype([('x', np.int32),\n (('y_title', 'y'), np.bool_)])\n ty = pa.struct([pa.field('x', pa.int32()),\n pa.field('y', pa.bool_())])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([(42, True), (43, False)], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{'x': 42, 'y': True},\n {'x': 43, 'y': False}]\n\n # With mask\n arr = pa.array(data, mask=np.bool_([False, True]), type=ty)\n assert arr.to_pylist() == [{'x': 42, 'y': True}, None]\n\n # Trivial struct type\n dt = np.dtype([])\n ty = pa.struct([])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([(), ()], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{}, {}]\n\n def test_from_numpy_nested(self):\n # Note: an object field inside a struct\n dt = np.dtype([('x', np.dtype([('xx', np.int8),\n ('yy', np.bool_)])),\n ('y', np.int16),\n ('z', np.object_)])\n # Note: itemsize is not a multiple of sizeof(object)\n assert dt.itemsize == 12\n ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),\n pa.field('yy', pa.bool_())])),\n pa.field('y', pa.int16()),\n pa.field('z', pa.string())])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([\n ((1, True), 2, 'foo'),\n ((3, False), 4, 'bar')], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [\n {'x': {'xx': 1, 'yy': True}, 'y': 2, 'z': 'foo'},\n {'x': {'xx': 3, 'yy': False}, 'y': 4, 'z': 'bar'}]\n\n @pytest.mark.slow\n @pytest.mark.large_memory\n def test_from_numpy_large(self):\n # Exercise rechunking + nulls\n target_size = 3 * 1024**3 # 4GB\n dt = np.dtype([('x', np.float64), ('y', 'object')])\n bs = 65536 - dt.itemsize\n block = b'.' * bs\n n = target_size // (bs + dt.itemsize)\n data = np.zeros(n, dtype=dt)\n data['x'] = np.random.random_sample(n)\n data['y'] = block\n # Add implicit nulls\n data['x'][data['x'] < 0.2] = np.nan\n\n ty = pa.struct([pa.field('x', pa.float64()),\n pa.field('y', pa.binary())])\n arr = pa.array(data, type=ty, from_pandas=True)\n assert arr.num_chunks == 2\n\n def iter_chunked_array(arr):\n for chunk in arr.iterchunks():\n yield from chunk\n\n def check(arr, data, mask=None):\n assert len(arr) == len(data)\n xs = data['x']\n ys = data['y']\n for i, obj in enumerate(iter_chunked_array(arr)):\n try:\n d = obj.as_py()\n if mask is not None and mask[i]:\n assert d is None\n else:\n x = xs[i]\n if np.isnan(x):\n assert d['x'] is None\n else:\n assert d['x'] == x\n assert d['y'] == ys[i]\n except Exception:\n print(\"Failed at index\", i)\n raise\n\n check(arr, data)\n del arr\n\n # Now with explicit mask\n mask = np.random.random_sample(n) < 0.2\n arr = pa.array(data, type=ty, mask=mask, from_pandas=True)\n assert arr.num_chunks == 2\n\n check(arr, data, mask)\n del arr\n\n def test_from_numpy_bad_input(self):\n ty = pa.struct([pa.field('x', pa.int32()),\n pa.field('y', pa.bool_())])\n dt = np.dtype([('x', np.int32),\n ('z', np.bool_)])\n\n data = np.array([], dtype=dt)\n with pytest.raises(ValueError,\n match=\"Missing field 'y'\"):\n pa.array(data, type=ty)\n data = np.int32([])\n with pytest.raises(TypeError,\n match=\"Expected struct array\"):\n pa.array(data, type=ty)\n\n def test_from_tuples(self):\n df = pd.DataFrame({'tuples': [(1, 2), (3, 4)]})\n expected_df = pd.DataFrame(\n {'tuples': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})\n\n # conversion from tuples works when specifying expected struct type\n struct_type = pa.struct([('a', pa.int64()), ('b', pa.int64())])\n\n arr = np.asarray(df['tuples'])\n _check_array_roundtrip(\n arr, expected=expected_df['tuples'], type=struct_type)\n\n expected_schema = pa.schema([('tuples', struct_type)])\n _check_pandas_roundtrip(\n df, expected=expected_df, schema=expected_schema,\n expected_schema=expected_schema)\n\n def test_struct_of_dictionary(self):\n names = ['ints', 'strs']\n children = [pa.array([456, 789, 456]).dictionary_encode(),\n pa.array([\"foo\", \"foo\", None]).dictionary_encode()]\n arr = pa.StructArray.from_arrays(children, names=names)\n\n # Expected a Series of {field name: field value} dicts\n rows_as_tuples = zip(*(child.to_pylist() for child in children))\n rows_as_dicts = [dict(zip(names, row)) for row in rows_as_tuples]\n\n expected = pd.Series(rows_as_dicts)\n tm.assert_series_equal(arr.to_pandas(), expected)\n\n # Same but with nulls\n arr = arr.take([0, None, 2])\n expected[1] = None\n tm.assert_series_equal(arr.to_pandas(), expected)\n\n\nclass TestZeroCopyConversion:\n \"\"\"\n Tests that zero-copy conversion works with some types.\n \"\"\"\n\n def test_zero_copy_success(self):\n result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, [0, 1, 2])\n\n def test_zero_copy_dictionaries(self):\n arr = pa.DictionaryArray.from_arrays(\n np.array([0, 0]),\n np.array([5]))\n\n result = arr.to_pandas(zero_copy_only=True)\n values = pd.Categorical([5, 5])\n\n tm.assert_series_equal(pd.Series(result), pd.Series(values),\n check_names=False)\n\n def test_zero_copy_timestamp(self):\n arr = np.array(['2007-07-13'], dtype='datetime64[ns]')\n result = pa.array(arr).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, arr)\n\n def test_zero_copy_duration(self):\n arr = np.array([1], dtype='timedelta64[ns]')\n result = pa.array(arr).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, arr)\n\n def check_zero_copy_failure(self, arr):\n with pytest.raises(pa.ArrowInvalid):\n arr.to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_on_object_types(self):\n self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))\n\n def test_zero_copy_failure_with_int_when_nulls(self):\n self.check_zero_copy_failure(pa.array([0, 1, None]))\n\n def test_zero_copy_failure_with_float_when_nulls(self):\n self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))\n\n def test_zero_copy_failure_on_bool_types(self):\n self.check_zero_copy_failure(pa.array([True, False]))\n\n def test_zero_copy_failure_on_list_types(self):\n arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))\n self.check_zero_copy_failure(arr)\n\n def test_zero_copy_failure_on_timestamp_with_nulls(self):\n arr = np.array([1, None], dtype='datetime64[ns]')\n self.check_zero_copy_failure(pa.array(arr))\n\n def test_zero_copy_failure_on_duration_with_nulls(self):\n arr = np.array([1, None], dtype='timedelta64[ns]')\n self.check_zero_copy_failure(pa.array(arr))\n\n\ndef _non_threaded_conversion():\n df = _alltypes_example()\n _check_pandas_roundtrip(df, use_threads=False)\n _check_pandas_roundtrip(df, use_threads=False, as_batch=True)\n\n\ndef _threaded_conversion():\n df = _alltypes_example()\n _check_pandas_roundtrip(df, use_threads=True)\n _check_pandas_roundtrip(df, use_threads=True, as_batch=True)\n\n\nclass TestConvertMisc:\n \"\"\"\n Miscellaneous conversion tests.\n \"\"\"\n\n type_pairs = [\n (np.int8, pa.int8()),\n (np.int16, pa.int16()),\n (np.int32, pa.int32()),\n (np.int64, pa.int64()),\n (np.uint8, pa.uint8()),\n (np.uint16, pa.uint16()),\n (np.uint32, pa.uint32()),\n (np.uint64, pa.uint64()),\n (np.float16, pa.float16()),\n (np.float32, pa.float32()),\n (np.float64, pa.float64()),\n # XXX unsupported\n # (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),\n (np.object_, pa.string()),\n (np.object_, pa.binary()),\n (np.object_, pa.binary(10)),\n (np.object_, pa.list_(pa.int64())),\n ]\n\n def test_all_none_objects(self):\n df = pd.DataFrame({'a': [None, None, None]})\n _check_pandas_roundtrip(df)\n\n def test_all_none_category(self):\n df = pd.DataFrame({'a': [None, None, None]})\n df['a'] = df['a'].astype('category')\n _check_pandas_roundtrip(df)\n\n def test_empty_arrays(self):\n for dtype, pa_type in self.type_pairs:\n arr = np.array([], dtype=dtype)\n _check_array_roundtrip(arr, type=pa_type)\n\n def test_non_threaded_conversion(self):\n _non_threaded_conversion()\n\n def test_threaded_conversion_multiprocess(self):\n # Parallel conversion should work from child processes too (ARROW-2963)\n pool = mp.Pool(2)\n try:\n pool.apply(_threaded_conversion)\n finally:\n pool.close()\n pool.join()\n\n def test_category(self):\n repeats = 5\n v1 = ['foo', None, 'bar', 'qux', np.nan]\n v2 = [4, 5, 6, 7, 8]\n v3 = [b'foo', None, b'bar', b'qux', np.nan]\n\n arrays = {\n 'cat_strings': pd.Categorical(v1 * repeats),\n 'cat_strings_with_na': pd.Categorical(v1 * repeats,\n categories=['foo', 'bar']),\n 'cat_ints': pd.Categorical(v2 * repeats),\n 'cat_binary': pd.Categorical(v3 * repeats),\n 'cat_strings_ordered': pd.Categorical(\n v1 * repeats, categories=['bar', 'qux', 'foo'],\n ordered=True),\n 'ints': v2 * repeats,\n 'ints2': v2 * repeats,\n 'strings': v1 * repeats,\n 'strings2': v1 * repeats,\n 'strings3': v3 * repeats}\n df = pd.DataFrame(arrays)\n _check_pandas_roundtrip(df)\n\n for k in arrays:\n _check_array_roundtrip(arrays[k])\n\n def test_category_implicit_from_pandas(self):\n # ARROW-3374\n def _check(v):\n arr = pa.array(v)\n result = arr.to_pandas()\n tm.assert_series_equal(pd.Series(result), pd.Series(v))\n\n arrays = [\n pd.Categorical(['a', 'b', 'c'], categories=['a', 'b']),\n pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'],\n ordered=True)\n ]\n for arr in arrays:\n _check(arr)\n\n def test_empty_category(self):\n # ARROW-2443\n df = pd.DataFrame({'cat': pd.Categorical([])})\n _check_pandas_roundtrip(df)\n\n def test_category_zero_chunks(self):\n # ARROW-5952\n for pa_type, dtype in [(pa.string(), 'object'), (pa.int64(), 'int64')]:\n a = pa.chunked_array([], pa.dictionary(pa.int8(), pa_type))\n result = a.to_pandas()\n expected = pd.Categorical([], categories=np.array([], dtype=dtype))\n tm.assert_series_equal(pd.Series(result), pd.Series(expected))\n\n table = pa.table({'a': a})\n result = table.to_pandas()\n expected = pd.DataFrame({'a': expected})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"data,error_type\",\n [\n ({\"a\": [\"a\", 1, 2.0]}, pa.ArrowTypeError),\n ({\"a\": [\"a\", 1, 2.0]}, pa.ArrowTypeError),\n ({\"a\": [1, True]}, pa.ArrowTypeError),\n ({\"a\": [True, \"a\"]}, pa.ArrowInvalid),\n ({\"a\": [1, \"a\"]}, pa.ArrowInvalid),\n ({\"a\": [1.0, \"a\"]}, pa.ArrowInvalid),\n ],\n )\n def test_mixed_types_fails(self, data, error_type):\n df = pd.DataFrame(data)\n msg = \"Conversion failed for column a with type object\"\n with pytest.raises(error_type, match=msg):\n pa.Table.from_pandas(df)\n\n def test_strided_data_import(self):\n cases = []\n\n columns = ['a', 'b', 'c']\n N, K = 100, 3\n random_numbers = np.random.randn(N, K).copy() * 100\n\n numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'f4', 'f8']\n\n for type_name in numeric_dtypes:\n cases.append(random_numbers.astype(type_name))\n\n # strings\n cases.append(np.array([random_ascii(10) for i in range(N * K)],\n dtype=object)\n .reshape(N, K).copy())\n\n # booleans\n boolean_objects = (np.array([True, False, True] * N, dtype=object)\n .reshape(N, K).copy())\n\n # add some nulls, so dtype comes back as objects\n boolean_objects[5] = None\n cases.append(boolean_objects)\n\n cases.append(np.arange(\"2016-01-01T00:00:00.001\", N * K,\n dtype='datetime64[ms]')\n .reshape(N, K).copy())\n\n strided_mask = (random_numbers > 0).astype(bool)[:, 0]\n\n for case in cases:\n df = pd.DataFrame(case, columns=columns)\n col = df['a']\n\n _check_pandas_roundtrip(df)\n _check_array_roundtrip(col)\n _check_array_roundtrip(col, mask=strided_mask)\n\n def test_all_nones(self):\n def _check_series(s):\n converted = pa.array(s)\n assert isinstance(converted, pa.NullArray)\n assert len(converted) == 3\n assert converted.null_count == 3\n for item in converted:\n assert item is pa.NA\n\n _check_series(pd.Series([None] * 3, dtype=object))\n _check_series(pd.Series([np.nan] * 3, dtype=object))\n _check_series(pd.Series([None, np.nan, None], dtype=object))\n\n def test_partial_schema(self):\n data = OrderedDict([\n ('a', [0, 1, 2, 3, 4]),\n ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),\n ('c', [-10, -5, 0, 5, 10])\n ])\n df = pd.DataFrame(data)\n\n partial_schema = pa.schema([\n pa.field('c', pa.int64()),\n pa.field('a', pa.int64())\n ])\n\n _check_pandas_roundtrip(df, schema=partial_schema,\n expected=df[['c', 'a']],\n expected_schema=partial_schema)\n\n def test_table_batch_empty_dataframe(self):\n df = pd.DataFrame({})\n _check_pandas_roundtrip(df)\n _check_pandas_roundtrip(df, as_batch=True)\n\n df2 = pd.DataFrame({}, index=[0, 1, 2])\n _check_pandas_roundtrip(df2, preserve_index=True)\n _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)\n\n def test_convert_empty_table(self):\n arr = pa.array([], type=pa.int64())\n empty_objects = pd.Series(np.array([], dtype=object))\n tm.assert_series_equal(arr.to_pandas(),\n pd.Series(np.array([], dtype=np.int64)))\n arr = pa.array([], type=pa.string())\n tm.assert_series_equal(arr.to_pandas(), empty_objects)\n arr = pa.array([], type=pa.list_(pa.int64()))\n tm.assert_series_equal(arr.to_pandas(), empty_objects)\n arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))\n tm.assert_series_equal(arr.to_pandas(), empty_objects)\n\n def test_non_natural_stride(self):\n \"\"\"\n ARROW-2172: converting from a Numpy array with a stride that's\n not a multiple of itemsize.\n \"\"\"\n dtype = np.dtype([('x', np.int32), ('y', np.int16)])\n data = np.array([(42, -1), (-43, 2)], dtype=dtype)\n assert data.strides == (6,)\n arr = pa.array(data['x'], type=pa.int32())\n assert arr.to_pylist() == [42, -43]\n arr = pa.array(data['y'], type=pa.int16())\n assert arr.to_pylist() == [-1, 2]\n\n def test_array_from_strided_numpy_array(self):\n # ARROW-5651\n np_arr = np.arange(0, 10, dtype=np.float32)[1:-1:2]\n pa_arr = pa.array(np_arr, type=pa.float64())\n expected = pa.array([1.0, 3.0, 5.0, 7.0], type=pa.float64())\n pa_arr.equals(expected)\n\n def test_safe_unsafe_casts(self):\n # ARROW-2799\n df = pd.DataFrame({\n 'A': list('abc'),\n 'B': np.linspace(0, 1, 3)\n })\n\n schema = pa.schema([\n pa.field('A', pa.string()),\n pa.field('B', pa.int32())\n ])\n\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df, schema=schema)\n\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n assert table.column('B').type == pa.int32()\n\n def test_error_sparse(self):\n # ARROW-2818\n try:\n df = pd.DataFrame({'a': pd.arrays.SparseArray([1, np.nan, 3])})\n except AttributeError:\n # pandas.arrays module introduced in pandas 0.24\n df = pd.DataFrame({'a': pd.SparseArray([1, np.nan, 3])})\n with pytest.raises(TypeError, match=\"Sparse pandas data\"):\n pa.Table.from_pandas(df)\n\n\ndef test_safe_cast_from_float_with_nans_to_int():\n # TODO(kszucs): write tests for creating Date32 and Date64 arrays, see\n # ARROW-4258 and https://github.com/apache/arrow/pull/3395\n values = pd.Series([1, 2, None, 4])\n arr = pa.Array.from_pandas(values, type=pa.int32(), safe=True)\n expected = pa.array([1, 2, None, 4], type=pa.int32())\n assert arr.equals(expected)\n\n\ndef _fully_loaded_dataframe_example():\n index = pd.MultiIndex.from_arrays([\n pd.date_range('2000-01-01', periods=5).repeat(2),\n np.tile(np.array(['foo', 'bar'], dtype=object), 5)\n ])\n\n c1 = pd.date_range('2000-01-01', periods=10)\n data = {\n 0: c1,\n 1: c1.tz_localize('utc'),\n 2: c1.tz_localize('US/Eastern'),\n 3: c1[::2].tz_localize('utc').repeat(2).astype('category'),\n 4: ['foo', 'bar'] * 5,\n 5: pd.Series(['foo', 'bar'] * 5).astype('category').values,\n 6: [True, False] * 5,\n 7: np.random.randn(10),\n 8: np.random.randint(0, 100, size=10),\n 9: pd.period_range('2013', periods=10, freq='M')\n }\n\n if Version(pd.__version__) >= Version('0.21'):\n # There is an issue with pickling IntervalIndex in pandas 0.20.x\n data[10] = pd.interval_range(start=1, freq=1, periods=10)\n\n return pd.DataFrame(data, index=index)\n\n\[email protected]('columns', ([b'foo'], ['foo']))\ndef test_roundtrip_with_bytes_unicode(columns):\n df = pd.DataFrame(columns=columns)\n table1 = pa.Table.from_pandas(df)\n table2 = pa.Table.from_pandas(table1.to_pandas())\n assert table1.equals(table2)\n assert table1.schema.equals(table2.schema)\n assert table1.schema.metadata == table2.schema.metadata\n\n\ndef _check_serialize_components_roundtrip(pd_obj):\n with pytest.warns(FutureWarning):\n ctx = pa.default_serialization_context()\n\n with pytest.warns(FutureWarning):\n components = ctx.serialize(pd_obj).to_components()\n with pytest.warns(FutureWarning):\n deserialized = ctx.deserialize_components(components)\n\n if isinstance(pd_obj, pd.DataFrame):\n tm.assert_frame_equal(pd_obj, deserialized)\n else:\n tm.assert_series_equal(pd_obj, deserialized)\n\n\[email protected](\n Version('1.16.0') <= Version(np.__version__) < Version('1.16.1'),\n reason='Until numpy/numpy#12745 is resolved')\ndef test_serialize_deserialize_pandas():\n # ARROW-1784, serialize and deserialize DataFrame by decomposing\n # BlockManager\n df = _fully_loaded_dataframe_example()\n _check_serialize_components_roundtrip(df)\n\n\ndef test_serialize_deserialize_empty_pandas():\n # ARROW-7996, serialize and deserialize empty pandas objects\n df = pd.DataFrame({'col1': [], 'col2': [], 'col3': []})\n _check_serialize_components_roundtrip(df)\n\n series = pd.Series([], dtype=np.float32, name='col')\n _check_serialize_components_roundtrip(series)\n\n\ndef _pytime_from_micros(val):\n microseconds = val % 1000000\n val //= 1000000\n seconds = val % 60\n val //= 60\n minutes = val % 60\n hours = val // 60\n return time(hours, minutes, seconds, microseconds)\n\n\ndef _pytime_to_micros(pytime):\n return (pytime.hour * 3600000000 +\n pytime.minute * 60000000 +\n pytime.second * 1000000 +\n pytime.microsecond)\n\n\ndef test_convert_unsupported_type_error_message():\n # ARROW-1454\n\n # custom python objects\n class A:\n pass\n\n df = pd.DataFrame({'a': [A(), A()]})\n\n msg = 'Conversion failed for column a with type object'\n with pytest.raises(ValueError, match=msg):\n pa.Table.from_pandas(df)\n\n # period unsupported for pandas <= 0.25\n if Version(pd.__version__) <= Version('0.25'):\n df = pd.DataFrame({\n 'a': pd.period_range('2000-01-01', periods=20),\n })\n\n msg = 'Conversion failed for column a with type (period|object)'\n with pytest.raises((TypeError, ValueError), match=msg):\n pa.Table.from_pandas(df)\n\n\n# ----------------------------------------------------------------------\n# Hypothesis tests\n\n\[email protected](past.arrays(past.pandas_compatible_types))\ndef test_array_to_pandas_roundtrip(arr):\n s = arr.to_pandas()\n restored = pa.array(s, type=arr.type, from_pandas=True)\n assert restored.equals(arr)\n\n\n# ----------------------------------------------------------------------\n# Test object deduplication in to_pandas\n\n\ndef _generate_dedup_example(nunique, repeats):\n unique_values = [rands(10) for i in range(nunique)]\n return unique_values * repeats\n\n\ndef _assert_nunique(obj, expected):\n assert len({id(x) for x in obj}) == expected\n\n\ndef test_to_pandas_deduplicate_strings_array_types():\n nunique = 100\n repeats = 10\n values = _generate_dedup_example(nunique, repeats)\n\n for arr in [pa.array(values, type=pa.binary()),\n pa.array(values, type=pa.utf8()),\n pa.chunked_array([values, values])]:\n _assert_nunique(arr.to_pandas(), nunique)\n _assert_nunique(arr.to_pandas(deduplicate_objects=False), len(arr))\n\n\ndef test_to_pandas_deduplicate_strings_table_types():\n nunique = 100\n repeats = 10\n values = _generate_dedup_example(nunique, repeats)\n\n arr = pa.array(values)\n rb = pa.RecordBatch.from_arrays([arr], ['foo'])\n tbl = pa.Table.from_batches([rb])\n\n for obj in [rb, tbl]:\n _assert_nunique(obj.to_pandas()['foo'], nunique)\n _assert_nunique(obj.to_pandas(deduplicate_objects=False)['foo'],\n len(obj))\n\n\ndef test_to_pandas_deduplicate_integers_as_objects():\n nunique = 100\n repeats = 10\n\n # Python automatically interns smaller integers\n unique_values = list(np.random.randint(10000000, 1000000000, size=nunique))\n unique_values[nunique // 2] = None\n\n arr = pa.array(unique_values * repeats)\n\n _assert_nunique(arr.to_pandas(integer_object_nulls=True), nunique)\n _assert_nunique(arr.to_pandas(integer_object_nulls=True,\n deduplicate_objects=False),\n # Account for None\n (nunique - 1) * repeats + 1)\n\n\ndef test_to_pandas_deduplicate_date_time():\n nunique = 100\n repeats = 10\n\n unique_values = list(range(nunique))\n\n cases = [\n # raw type, array type, to_pandas options\n ('int32', 'date32', {'date_as_object': True}),\n ('int64', 'date64', {'date_as_object': True}),\n ('int32', 'time32[ms]', {}),\n ('int64', 'time64[us]', {})\n ]\n\n for raw_type, array_type, pandas_options in cases:\n raw_arr = pa.array(unique_values * repeats, type=raw_type)\n casted_arr = raw_arr.cast(array_type)\n\n _assert_nunique(casted_arr.to_pandas(**pandas_options),\n nunique)\n _assert_nunique(casted_arr.to_pandas(deduplicate_objects=False,\n **pandas_options),\n len(casted_arr))\n\n\n# ---------------------------------------------------------------------\n\ndef test_table_from_pandas_checks_field_nullability():\n # ARROW-2136\n df = pd.DataFrame({'a': [1.2, 2.1, 3.1],\n 'b': [np.nan, 'string', 'foo']})\n schema = pa.schema([pa.field('a', pa.float64(), nullable=False),\n pa.field('b', pa.utf8(), nullable=False)])\n\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df, schema=schema)\n\n\ndef test_table_from_pandas_keeps_column_order_of_dataframe():\n df1 = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n df2 = df1[['floats', 'partition', 'arrays']]\n\n schema1 = pa.schema([\n ('partition', pa.int64()),\n ('arrays', pa.list_(pa.int64())),\n ('floats', pa.float64()),\n ])\n schema2 = pa.schema([\n ('floats', pa.float64()),\n ('partition', pa.int64()),\n ('arrays', pa.list_(pa.int64()))\n ])\n\n table1 = pa.Table.from_pandas(df1, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, preserve_index=False)\n\n assert table1.schema.equals(schema1)\n assert table2.schema.equals(schema2)\n\n\ndef test_table_from_pandas_keeps_column_order_of_schema():\n # ARROW-3766\n df = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n\n schema = pa.schema([\n ('floats', pa.float64()),\n ('arrays', pa.list_(pa.int32())),\n ('partition', pa.int32())\n ])\n\n df1 = df[df.partition == 0]\n df2 = df[df.partition == 1][['floats', 'partition', 'arrays']]\n\n table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)\n\n assert table1.schema.equals(schema)\n assert table1.schema.equals(table2.schema)\n\n\ndef test_table_from_pandas_columns_argument_only_does_filtering():\n df = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n\n columns1 = ['arrays', 'floats', 'partition']\n schema1 = pa.schema([\n ('arrays', pa.list_(pa.int64())),\n ('floats', pa.float64()),\n ('partition', pa.int64())\n ])\n\n columns2 = ['floats', 'partition']\n schema2 = pa.schema([\n ('floats', pa.float64()),\n ('partition', pa.int64())\n ])\n\n table1 = pa.Table.from_pandas(df, columns=columns1, preserve_index=False)\n table2 = pa.Table.from_pandas(df, columns=columns2, preserve_index=False)\n\n assert table1.schema.equals(schema1)\n assert table2.schema.equals(schema2)\n\n\ndef test_table_from_pandas_columns_and_schema_are_mutually_exclusive():\n df = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n schema = pa.schema([\n ('partition', pa.int32()),\n ('arrays', pa.list_(pa.int32())),\n ('floats', pa.float64()),\n ])\n columns = ['arrays', 'floats']\n\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df, schema=schema, columns=columns)\n\n\ndef test_table_from_pandas_keeps_schema_nullability():\n # ARROW-5169\n df = pd.DataFrame({'a': [1, 2, 3, 4]})\n\n schema = pa.schema([\n pa.field('a', pa.int64(), nullable=False),\n ])\n\n table = pa.Table.from_pandas(df)\n assert table.schema.field('a').nullable is True\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.field('a').nullable is False\n\n\ndef test_table_from_pandas_schema_index_columns():\n # ARROW-5220\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})\n\n schema = pa.schema([\n ('a', pa.int64()),\n ('b', pa.float64()),\n ('index', pa.int32()),\n ])\n\n # schema includes index with name not in dataframe\n with pytest.raises(KeyError, match=\"name 'index' present in the\"):\n pa.Table.from_pandas(df, schema=schema)\n\n df.index.name = 'index'\n\n # schema includes correct index name -> roundtrip works\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n\n # schema includes correct index name but preserve_index=False\n with pytest.raises(ValueError, match=\"'preserve_index=False' was\"):\n pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # in case of preserve_index=None -> RangeIndex serialized as metadata\n # clashes with the index in the schema\n with pytest.raises(ValueError, match=\"name 'index' is present in the \"\n \"schema, but it is a RangeIndex\"):\n pa.Table.from_pandas(df, schema=schema, preserve_index=None)\n\n df.index = pd.Index([0, 1, 2], name='index')\n\n # for non-RangeIndex, both preserve_index=None and True work\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n\n # schema has different order (index column not at the end)\n schema = pa.schema([\n ('index', pa.int32()),\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n\n # schema does not include the index -> index is not included as column\n # even though preserve_index=True/None\n schema = pa.schema([\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n expected = df.copy()\n expected = expected.reset_index(drop=True)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema, expected=expected)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema, expected=expected)\n\n # dataframe with a MultiIndex\n df.index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],\n names=['level1', 'level2'])\n schema = pa.schema([\n ('level1', pa.string()),\n ('level2', pa.int64()),\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema)\n\n # only one of the levels of the MultiIndex is included\n schema = pa.schema([\n ('level2', pa.int64()),\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n expected = df.copy()\n expected = expected.reset_index('level1', drop=True)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema, expected=expected)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema, expected=expected)\n\n\ndef test_table_from_pandas_schema_index_columns__unnamed_index():\n # ARROW-6999 - unnamed indices in specified schema\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})\n\n expected_schema = pa.schema([\n ('a', pa.int64()),\n ('b', pa.float64()),\n ('__index_level_0__', pa.int64()),\n ])\n\n schema = pa.Schema.from_pandas(df, preserve_index=True)\n table = pa.Table.from_pandas(df, preserve_index=True, schema=schema)\n assert table.schema.remove_metadata().equals(expected_schema)\n\n # non-RangeIndex (preserved by default)\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]}, index=[0, 1, 2])\n schema = pa.Schema.from_pandas(df)\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.remove_metadata().equals(expected_schema)\n\n\ndef test_table_from_pandas_schema_with_custom_metadata():\n # ARROW-7087 - metadata disappear from pandas\n df = pd.DataFrame()\n schema = pa.Schema.from_pandas(df).with_metadata({'meta': 'True'})\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.metadata.get(b'meta') == b'True'\n\n\ndef test_table_from_pandas_schema_field_order_metadat():\n # ARROW-10532\n # ensure that a different field order in specified schema doesn't\n # mangle metadata\n df = pd.DataFrame({\n \"datetime\": pd.date_range(\"2020-01-01T00:00:00Z\", freq=\"H\", periods=2),\n \"float\": np.random.randn(2)\n })\n\n schema = pa.schema([\n pa.field(\"float\", pa.float32(), nullable=True),\n pa.field(\"datetime\", pa.timestamp(\"s\", tz=\"UTC\"), nullable=False)\n ])\n\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.equals(schema)\n metadata_float = table.schema.pandas_metadata[\"columns\"][0]\n assert metadata_float[\"name\"] == \"float\"\n assert metadata_float[\"metadata\"] is None\n metadata_datetime = table.schema.pandas_metadata[\"columns\"][1]\n assert metadata_datetime[\"name\"] == \"datetime\"\n assert metadata_datetime[\"metadata\"] == {'timezone': 'UTC'}\n\n result = table.to_pandas()\n expected = df[[\"float\", \"datetime\"]].astype({\"float\": \"float32\"})\n tm.assert_frame_equal(result, expected)\n\n\n# ----------------------------------------------------------------------\n# RecordBatch, Table\n\n\ndef test_recordbatch_from_to_pandas():\n data = pd.DataFrame({\n 'c1': np.array([1, 2, 3, 4, 5], dtype='int64'),\n 'c2': np.array([1, 2, 3, 4, 5], dtype='uint32'),\n 'c3': np.random.randn(5),\n 'c4': ['foo', 'bar', None, 'baz', 'qux'],\n 'c5': [False, True, False, True, False]\n })\n\n batch = pa.RecordBatch.from_pandas(data)\n result = batch.to_pandas()\n tm.assert_frame_equal(data, result)\n\n\ndef test_recordbatchlist_to_pandas():\n data1 = pd.DataFrame({\n 'c1': np.array([1, 1, 2], dtype='uint32'),\n 'c2': np.array([1.0, 2.0, 3.0], dtype='float64'),\n 'c3': [True, None, False],\n 'c4': ['foo', 'bar', None]\n })\n\n data2 = pd.DataFrame({\n 'c1': np.array([3, 5], dtype='uint32'),\n 'c2': np.array([4.0, 5.0], dtype='float64'),\n 'c3': [True, True],\n 'c4': ['baz', 'qux']\n })\n\n batch1 = pa.RecordBatch.from_pandas(data1)\n batch2 = pa.RecordBatch.from_pandas(data2)\n\n table = pa.Table.from_batches([batch1, batch2])\n result = table.to_pandas()\n data = pd.concat([data1, data2]).reset_index(drop=True)\n tm.assert_frame_equal(data, result)\n\n\ndef test_recordbatch_table_pass_name_to_pandas():\n rb = pa.record_batch([pa.array([1, 2, 3, 4])], names=['a0'])\n t = pa.table([pa.array([1, 2, 3, 4])], names=['a0'])\n assert rb[0].to_pandas().name == 'a0'\n assert t[0].to_pandas().name == 'a0'\n\n\n# ----------------------------------------------------------------------\n# Metadata serialization\n\n\[email protected](\n ('type', 'expected'),\n [\n (pa.null(), 'empty'),\n (pa.bool_(), 'bool'),\n (pa.int8(), 'int8'),\n (pa.int16(), 'int16'),\n (pa.int32(), 'int32'),\n (pa.int64(), 'int64'),\n (pa.uint8(), 'uint8'),\n (pa.uint16(), 'uint16'),\n (pa.uint32(), 'uint32'),\n (pa.uint64(), 'uint64'),\n (pa.float16(), 'float16'),\n (pa.float32(), 'float32'),\n (pa.float64(), 'float64'),\n (pa.date32(), 'date'),\n (pa.date64(), 'date'),\n (pa.binary(), 'bytes'),\n (pa.binary(length=4), 'bytes'),\n (pa.string(), 'unicode'),\n (pa.list_(pa.list_(pa.int16())), 'list[list[int16]]'),\n (pa.decimal128(18, 3), 'decimal'),\n (pa.timestamp('ms'), 'datetime'),\n (pa.timestamp('us', 'UTC'), 'datetimetz'),\n (pa.time32('s'), 'time'),\n (pa.time64('us'), 'time')\n ]\n)\ndef test_logical_type(type, expected):\n assert get_logical_type(type) == expected\n\n\n# ----------------------------------------------------------------------\n# to_pandas uses MemoryPool\n\ndef test_array_uses_memory_pool():\n # ARROW-6570\n N = 10000\n arr = pa.array(np.arange(N, dtype=np.int64),\n mask=np.random.randint(0, 2, size=N).astype(np.bool_))\n\n # In the case the gc is caught loading\n gc.collect()\n\n prior_allocation = pa.total_allocated_bytes()\n\n x = arr.to_pandas()\n assert pa.total_allocated_bytes() == (prior_allocation + N * 8)\n x = None # noqa\n gc.collect()\n\n assert pa.total_allocated_bytes() == prior_allocation\n\n # zero copy does not allocate memory\n arr = pa.array(np.arange(N, dtype=np.int64))\n\n prior_allocation = pa.total_allocated_bytes()\n x = arr.to_pandas() # noqa\n assert pa.total_allocated_bytes() == prior_allocation\n\n\ndef test_singleton_blocks_zero_copy():\n # Part of ARROW-3789\n t = pa.table([pa.array(np.arange(1000, dtype=np.int64))], ['f0'])\n\n # Zero copy if split_blocks=True\n _check_to_pandas_memory_unchanged(t, split_blocks=True)\n\n prior_allocation = pa.total_allocated_bytes()\n result = t.to_pandas()\n assert result['f0'].values.flags.writeable\n assert pa.total_allocated_bytes() > prior_allocation\n\n\ndef _check_to_pandas_memory_unchanged(obj, **kwargs):\n prior_allocation = pa.total_allocated_bytes()\n x = obj.to_pandas(**kwargs) # noqa\n\n # Memory allocation unchanged -- either zero copy or self-destructing\n assert pa.total_allocated_bytes() == prior_allocation\n\n\ndef test_to_pandas_split_blocks():\n # ARROW-3789\n t = pa.table([\n pa.array([1, 2, 3, 4, 5], type='i1'),\n pa.array([1, 2, 3, 4, 5], type='i4'),\n pa.array([1, 2, 3, 4, 5], type='i8'),\n pa.array([1, 2, 3, 4, 5], type='f4'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n ], ['f{}'.format(i) for i in range(8)])\n\n _check_blocks_created(t, 8)\n _check_to_pandas_memory_unchanged(t, split_blocks=True)\n\n\ndef _check_blocks_created(t, number):\n x = t.to_pandas(split_blocks=True)\n assert len(x._data.blocks) == number\n\n\ndef test_to_pandas_self_destruct():\n K = 50\n\n def _make_table():\n return pa.table([\n # Slice to force a copy\n pa.array(np.random.randn(10000)[::2])\n for i in range(K)\n ], ['f{}'.format(i) for i in range(K)])\n\n t = _make_table()\n _check_to_pandas_memory_unchanged(t, split_blocks=True, self_destruct=True)\n\n # Check non-split-block behavior\n t = _make_table()\n _check_to_pandas_memory_unchanged(t, self_destruct=True)\n\n\ndef test_table_uses_memory_pool():\n N = 10000\n arr = pa.array(np.arange(N, dtype=np.int64))\n t = pa.table([arr, arr, arr], ['f0', 'f1', 'f2'])\n\n prior_allocation = pa.total_allocated_bytes()\n x = t.to_pandas()\n\n assert pa.total_allocated_bytes() == (prior_allocation + 3 * N * 8)\n\n # Check successful garbage collection\n x = None # noqa\n gc.collect()\n assert pa.total_allocated_bytes() == prior_allocation\n\n\ndef test_object_leak_in_numpy_array():\n # ARROW-6876\n arr = pa.array([{'a': 1}])\n np_arr = arr.to_pandas()\n assert np_arr.dtype == np.dtype('object')\n obj = np_arr[0]\n refcount = sys.getrefcount(obj)\n assert sys.getrefcount(obj) == refcount\n del np_arr\n assert sys.getrefcount(obj) == refcount - 1\n\n\ndef test_object_leak_in_dataframe():\n # ARROW-6876\n arr = pa.array([{'a': 1}])\n table = pa.table([arr], ['f0'])\n col = table.to_pandas()['f0']\n assert col.dtype == np.dtype('object')\n obj = col[0]\n refcount = sys.getrefcount(obj)\n assert sys.getrefcount(obj) == refcount\n del col\n assert sys.getrefcount(obj) == refcount - 1\n\n\n# ----------------------------------------------------------------------\n# Some nested array tests array tests\n\n\ndef test_array_from_py_float32():\n data = [[1.2, 3.4], [9.0, 42.0]]\n\n t = pa.float32()\n\n arr1 = pa.array(data[0], type=t)\n arr2 = pa.array(data, type=pa.list_(t))\n\n expected1 = np.array(data[0], dtype=np.float32)\n expected2 = pd.Series([np.array(data[0], dtype=np.float32),\n np.array(data[1], dtype=np.float32)])\n\n assert arr1.type == t\n assert arr1.equals(pa.array(expected1))\n assert arr2.equals(pa.array(expected2))\n\n\n# ----------------------------------------------------------------------\n# Timestamp tests\n\n\ndef test_cast_timestamp_unit():\n # ARROW-1680\n val = datetime.now()\n s = pd.Series([val])\n s_nyc = s.dt.tz_localize('tzlocal()').dt.tz_convert('America/New_York')\n\n us_with_tz = pa.timestamp('us', tz='America/New_York')\n\n arr = pa.Array.from_pandas(s_nyc, type=us_with_tz)\n\n # ARROW-1906\n assert arr.type == us_with_tz\n\n arr2 = pa.Array.from_pandas(s, type=pa.timestamp('us'))\n\n assert arr[0].as_py() == s_nyc[0].to_pydatetime()\n assert arr2[0].as_py() == s[0].to_pydatetime()\n\n # Disallow truncation\n arr = pa.array([123123], type='int64').cast(pa.timestamp('ms'))\n expected = pa.array([123], type='int64').cast(pa.timestamp('s'))\n\n # sanity check that the cast worked right\n assert arr.type == pa.timestamp('ms')\n\n target = pa.timestamp('s')\n with pytest.raises(ValueError):\n arr.cast(target)\n\n result = arr.cast(target, safe=False)\n assert result.equals(expected)\n\n # ARROW-1949\n series = pd.Series([pd.Timestamp(1), pd.Timestamp(10), pd.Timestamp(1000)])\n expected = pa.array([0, 0, 1], type=pa.timestamp('us'))\n\n with pytest.raises(ValueError):\n pa.array(series, type=pa.timestamp('us'))\n\n with pytest.raises(ValueError):\n pa.Array.from_pandas(series, type=pa.timestamp('us'))\n\n result = pa.Array.from_pandas(series, type=pa.timestamp('us'), safe=False)\n assert result.equals(expected)\n\n result = pa.array(series, type=pa.timestamp('us'), safe=False)\n assert result.equals(expected)\n\n\ndef test_nested_with_timestamp_tz_round_trip():\n ts = pd.Timestamp.now()\n ts_dt = ts.to_pydatetime()\n arr = pa.array([ts_dt], type=pa.timestamp('us', tz='America/New_York'))\n struct = pa.StructArray.from_arrays([arr, arr], ['start', 'stop'])\n\n result = struct.to_pandas()\n restored = pa.array(result)\n assert restored.equals(struct)\n\n\ndef test_nested_with_timestamp_tz():\n # ARROW-7723\n ts = pd.Timestamp.now()\n ts_dt = ts.to_pydatetime()\n\n # XXX: Ensure that this data does not get promoted to nanoseconds (and thus\n # integers) to preserve behavior in 0.15.1\n for unit in ['s', 'ms', 'us']:\n if unit in ['s', 'ms']:\n # This is used for verifying timezone conversion to micros are not\n # important\n def truncate(x): return x.replace(microsecond=0)\n else:\n def truncate(x): return x\n arr = pa.array([ts], type=pa.timestamp(unit))\n arr2 = pa.array([ts], type=pa.timestamp(unit, tz='America/New_York'))\n\n arr3 = pa.StructArray.from_arrays([arr, arr], ['start', 'stop'])\n arr4 = pa.StructArray.from_arrays([arr2, arr2], ['start', 'stop'])\n\n result = arr3.to_pandas()\n assert isinstance(result[0]['start'], datetime)\n assert result[0]['start'].tzinfo is None\n assert isinstance(result[0]['stop'], datetime)\n assert result[0]['stop'].tzinfo is None\n\n result = arr4.to_pandas()\n assert isinstance(result[0]['start'], datetime)\n assert result[0]['start'].tzinfo is not None\n utc_dt = result[0]['start'].astimezone(timezone.utc)\n assert truncate(utc_dt).replace(tzinfo=None) == truncate(ts_dt)\n assert isinstance(result[0]['stop'], datetime)\n assert result[0]['stop'].tzinfo is not None\n\n # same conversion for table\n result = pa.table({'a': arr3}).to_pandas()\n assert isinstance(result['a'][0]['start'], datetime)\n assert result['a'][0]['start'].tzinfo is None\n assert isinstance(result['a'][0]['stop'], datetime)\n assert result['a'][0]['stop'].tzinfo is None\n\n result = pa.table({'a': arr4}).to_pandas()\n assert isinstance(result['a'][0]['start'], datetime)\n assert result['a'][0]['start'].tzinfo is not None\n assert isinstance(result['a'][0]['stop'], datetime)\n assert result['a'][0]['stop'].tzinfo is not None\n\n\n# ----------------------------------------------------------------------\n# DictionaryArray tests\n\n\ndef test_dictionary_with_pandas():\n src_indices = np.repeat([0, 1, 2], 2)\n dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)\n mask = np.array([False, False, True, False, False, False])\n\n for index_type in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32',\n 'uint64', 'int64']:\n indices = src_indices.astype(index_type)\n d1 = pa.DictionaryArray.from_arrays(indices, dictionary)\n d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)\n\n if index_type[0] == 'u':\n # TODO: unsigned dictionary indices to pandas\n with pytest.raises(TypeError):\n d1.to_pandas()\n continue\n\n pandas1 = d1.to_pandas()\n ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary)\n\n tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1))\n\n pandas2 = d2.to_pandas()\n assert pandas2.isnull().sum() == 1\n\n # Unsigned integers converted to signed\n signed_indices = indices\n if index_type[0] == 'u':\n signed_indices = indices.astype(index_type[1:])\n ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1,\n signed_indices),\n categories=dictionary)\n\n tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2))\n\n\ndef random_strings(n, item_size, pct_null=0, dictionary=None):\n if dictionary is not None:\n result = dictionary[np.random.randint(0, len(dictionary), size=n)]\n else:\n result = np.array([random_ascii(item_size) for i in range(n)],\n dtype=object)\n\n if pct_null > 0:\n result[np.random.rand(n) < pct_null] = None\n\n return result\n\n\ndef test_variable_dictionary_to_pandas():\n np.random.seed(12345)\n\n d1 = pa.array(random_strings(100, 32), type='string')\n d2 = pa.array(random_strings(100, 16), type='string')\n d3 = pa.array(random_strings(10000, 10), type='string')\n\n a1 = pa.DictionaryArray.from_arrays(\n np.random.randint(0, len(d1), size=1000, dtype='i4'),\n d1\n )\n a2 = pa.DictionaryArray.from_arrays(\n np.random.randint(0, len(d2), size=1000, dtype='i4'),\n d2\n )\n\n # With some nulls\n a3 = pa.DictionaryArray.from_arrays(\n np.random.randint(0, len(d3), size=1000, dtype='i4'), d3)\n\n i4 = pa.array(\n np.random.randint(0, len(d3), size=1000, dtype='i4'),\n mask=np.random.rand(1000) < 0.1\n )\n a4 = pa.DictionaryArray.from_arrays(i4, d3)\n\n expected_dict = pa.concat_arrays([d1, d2, d3])\n\n a = pa.chunked_array([a1, a2, a3, a4])\n a_dense = pa.chunked_array([a1.cast('string'),\n a2.cast('string'),\n a3.cast('string'),\n a4.cast('string')])\n\n result = a.to_pandas()\n result_dense = a_dense.to_pandas()\n\n assert (result.cat.categories == expected_dict.to_pandas()).all()\n\n expected_dense = result.astype('str')\n expected_dense[result_dense.isnull()] = None\n tm.assert_series_equal(result_dense, expected_dense)\n\n\ndef test_dictionary_encoded_nested_to_pandas():\n # ARROW-6899\n child = pa.array(['a', 'a', 'a', 'b', 'b']).dictionary_encode()\n\n arr = pa.ListArray.from_arrays([0, 3, 5], child)\n\n result = arr.to_pandas()\n expected = pd.Series([np.array(['a', 'a', 'a'], dtype=object),\n np.array(['b', 'b'], dtype=object)])\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_dictionary_from_pandas():\n cat = pd.Categorical(['a', 'b', 'a'])\n expected_type = pa.dictionary(pa.int8(), pa.string())\n\n result = pa.array(cat)\n assert result.to_pylist() == ['a', 'b', 'a']\n assert result.type.equals(expected_type)\n\n # with missing values in categorical\n cat = pd.Categorical(['a', 'b', None, 'a'])\n\n result = pa.array(cat)\n assert result.to_pylist() == ['a', 'b', None, 'a']\n assert result.type.equals(expected_type)\n\n # with additional mask\n result = pa.array(cat, mask=np.array([False, False, False, True]))\n assert result.to_pylist() == ['a', 'b', None, None]\n assert result.type.equals(expected_type)\n\n\ndef test_dictionary_from_pandas_specified_type():\n # ARROW-7168 - ensure specified type is always respected\n\n # the same as cat = pd.Categorical(['a', 'b']) but explicit about dtypes\n cat = pd.Categorical.from_codes(\n np.array([0, 1], dtype='int8'), np.array(['a', 'b'], dtype=object))\n\n # different index type -> allow this\n # (the type of the 'codes' in pandas is not part of the data type)\n typ = pa.dictionary(index_type=pa.int16(), value_type=pa.string())\n result = pa.array(cat, type=typ)\n assert result.type.equals(typ)\n assert result.to_pylist() == ['a', 'b']\n\n # mismatching values type -> raise error\n typ = pa.dictionary(index_type=pa.int8(), value_type=pa.int64())\n with pytest.raises(pa.ArrowInvalid):\n result = pa.array(cat, type=typ)\n\n # mismatching order -> raise error (for now a deprecation warning)\n typ = pa.dictionary(\n index_type=pa.int8(), value_type=pa.string(), ordered=True)\n with pytest.warns(FutureWarning, match=\"The 'ordered' flag of the passed\"):\n result = pa.array(cat, type=typ)\n assert result.to_pylist() == ['a', 'b']\n\n # with mask\n typ = pa.dictionary(index_type=pa.int16(), value_type=pa.string())\n result = pa.array(cat, type=typ, mask=np.array([False, True]))\n assert result.type.equals(typ)\n assert result.to_pylist() == ['a', None]\n\n # empty categorical -> be flexible in values type to allow\n cat = pd.Categorical([])\n\n typ = pa.dictionary(index_type=pa.int8(), value_type=pa.string())\n result = pa.array(cat, type=typ)\n assert result.type.equals(typ)\n assert result.to_pylist() == []\n typ = pa.dictionary(index_type=pa.int8(), value_type=pa.int64())\n result = pa.array(cat, type=typ)\n assert result.type.equals(typ)\n assert result.to_pylist() == []\n\n # passing non-dictionary type\n cat = pd.Categorical(['a', 'b'])\n result = pa.array(cat, type=pa.string())\n expected = pa.array(['a', 'b'], type=pa.string())\n assert result.equals(expected)\n assert result.to_pylist() == ['a', 'b']\n\n\n# ----------------------------------------------------------------------\n# Array protocol in pandas conversions tests\n\n\ndef test_array_protocol():\n if Version(pd.__version__) < Version('0.24.0'):\n pytest.skip('IntegerArray only introduced in 0.24')\n\n df = pd.DataFrame({'a': pd.Series([1, 2, None], dtype='Int64')})\n\n if Version(pd.__version__) < Version('0.26.0.dev'):\n # with pandas<=0.25, trying to convert nullable integer errors\n with pytest.raises(TypeError):\n pa.table(df)\n else:\n # __arrow_array__ added to pandas IntegerArray in 0.26.0.dev\n\n # default conversion\n result = pa.table(df)\n expected = pa.array([1, 2, None], pa.int64())\n assert result[0].chunk(0).equals(expected)\n\n # with specifying schema\n schema = pa.schema([('a', pa.float64())])\n result = pa.table(df, schema=schema)\n expected2 = pa.array([1, 2, None], pa.float64())\n assert result[0].chunk(0).equals(expected2)\n\n # pass Series to pa.array\n result = pa.array(df['a'])\n assert result.equals(expected)\n result = pa.array(df['a'], type=pa.float64())\n assert result.equals(expected2)\n\n # pass actual ExtensionArray to pa.array\n result = pa.array(df['a'].values)\n assert result.equals(expected)\n result = pa.array(df['a'].values, type=pa.float64())\n assert result.equals(expected2)\n\n\nclass DummyExtensionType(pa.PyExtensionType):\n\n def __init__(self):\n pa.PyExtensionType.__init__(self, pa.int64())\n\n def __reduce__(self):\n return DummyExtensionType, ()\n\n\ndef PandasArray__arrow_array__(self, type=None):\n # hardcode dummy return regardless of self - we only want to check that\n # this method is correctly called\n storage = pa.array([1, 2, 3], type=pa.int64())\n return pa.ExtensionArray.from_storage(DummyExtensionType(), storage)\n\n\ndef test_array_protocol_pandas_extension_types(monkeypatch):\n # ARROW-7022 - ensure protocol works for Period / Interval extension dtypes\n\n if Version(pd.__version__) < Version('0.24.0'):\n pytest.skip('Period/IntervalArray only introduced in 0.24')\n\n storage = pa.array([1, 2, 3], type=pa.int64())\n expected = pa.ExtensionArray.from_storage(DummyExtensionType(), storage)\n\n monkeypatch.setattr(pd.arrays.PeriodArray, \"__arrow_array__\",\n PandasArray__arrow_array__, raising=False)\n monkeypatch.setattr(pd.arrays.IntervalArray, \"__arrow_array__\",\n PandasArray__arrow_array__, raising=False)\n for arr in [pd.period_range(\"2012-01-01\", periods=3, freq=\"D\").array,\n pd.interval_range(1, 4).array]:\n result = pa.array(arr)\n assert result.equals(expected)\n result = pa.array(pd.Series(arr))\n assert result.equals(expected)\n result = pa.array(pd.Index(arr))\n assert result.equals(expected)\n result = pa.table(pd.DataFrame({'a': arr})).column('a').chunk(0)\n assert result.equals(expected)\n\n\n# ----------------------------------------------------------------------\n# Pandas ExtensionArray support\n\n\ndef _Int64Dtype__from_arrow__(self, array):\n # for test only deal with single chunk for now\n # TODO: do we require handling of chunked arrays in the protocol?\n if isinstance(array, pa.Array):\n arr = array\n else:\n # ChunkedArray - here only deal with a single chunk for the test\n arr = array.chunk(0)\n buflist = arr.buffers()\n data = np.frombuffer(buflist[-1], dtype='int64')[\n arr.offset:arr.offset + len(arr)]\n bitmask = buflist[0]\n if bitmask is not None:\n mask = pa.BooleanArray.from_buffers(\n pa.bool_(), len(arr), [None, bitmask])\n mask = np.asarray(mask)\n else:\n mask = np.ones(len(arr), dtype=bool)\n int_arr = pd.arrays.IntegerArray(data.copy(), ~mask, copy=False)\n return int_arr\n\n\ndef test_convert_to_extension_array(monkeypatch):\n if Version(pd.__version__) < Version(\"0.26.0.dev\"):\n pytest.skip(\"Conversion from IntegerArray to arrow not yet supported\")\n\n import pandas.core.internals as _int\n\n # table converted from dataframe with extension types (so pandas_metadata\n # has this information)\n df = pd.DataFrame(\n {'a': [1, 2, 3], 'b': pd.array([2, 3, 4], dtype='Int64'),\n 'c': [4, 5, 6]})\n table = pa.table(df)\n\n # Int64Dtype is recognized -> convert to extension block by default\n # for a proper roundtrip\n result = table.to_pandas()\n assert not isinstance(result._data.blocks[0], _int.ExtensionBlock)\n assert result._data.blocks[0].values.dtype == np.dtype(\"int64\")\n assert isinstance(result._data.blocks[1], _int.ExtensionBlock)\n tm.assert_frame_equal(result, df)\n\n # test with missing values\n df2 = pd.DataFrame({'a': pd.array([1, 2, None], dtype='Int64')})\n table2 = pa.table(df2)\n result = table2.to_pandas()\n assert isinstance(result._data.blocks[0], _int.ExtensionBlock)\n tm.assert_frame_equal(result, df2)\n\n # monkeypatch pandas Int64Dtype to *not* have the protocol method\n if Version(pd.__version__) < Version(\"1.3.0.dev\"):\n monkeypatch.delattr(\n pd.core.arrays.integer._IntegerDtype, \"__from_arrow__\")\n else:\n monkeypatch.delattr(\n pd.core.arrays.integer.NumericDtype, \"__from_arrow__\")\n # Int64Dtype has no __from_arrow__ -> use normal conversion\n result = table.to_pandas()\n assert len(result._data.blocks) == 1\n assert not isinstance(result._data.blocks[0], _int.ExtensionBlock)\n\n\nclass MyCustomIntegerType(pa.PyExtensionType):\n\n def __init__(self):\n pa.PyExtensionType.__init__(self, pa.int64())\n\n def __reduce__(self):\n return MyCustomIntegerType, ()\n\n def to_pandas_dtype(self):\n return pd.Int64Dtype()\n\n\ndef test_conversion_extensiontype_to_extensionarray(monkeypatch):\n # converting extension type to linked pandas ExtensionDtype/Array\n import pandas.core.internals as _int\n\n if Version(pd.__version__) < Version(\"0.24.0\"):\n pytest.skip(\"ExtensionDtype introduced in pandas 0.24\")\n\n storage = pa.array([1, 2, 3, 4], pa.int64())\n arr = pa.ExtensionArray.from_storage(MyCustomIntegerType(), storage)\n table = pa.table({'a': arr})\n\n if Version(pd.__version__) < Version(\"0.26.0.dev\"):\n # ensure pandas Int64Dtype has the protocol method (for older pandas)\n monkeypatch.setattr(\n pd.Int64Dtype, '__from_arrow__', _Int64Dtype__from_arrow__,\n raising=False)\n\n # extension type points to Int64Dtype, which knows how to create a\n # pandas ExtensionArray\n result = arr.to_pandas()\n assert isinstance(result._data.blocks[0], _int.ExtensionBlock)\n expected = pd.Series([1, 2, 3, 4], dtype='Int64')\n tm.assert_series_equal(result, expected)\n\n result = table.to_pandas()\n assert isinstance(result._data.blocks[0], _int.ExtensionBlock)\n expected = pd.DataFrame({'a': pd.array([1, 2, 3, 4], dtype='Int64')})\n tm.assert_frame_equal(result, expected)\n\n # monkeypatch pandas Int64Dtype to *not* have the protocol method\n # (remove the version added above and the actual version for recent pandas)\n if Version(pd.__version__) < Version(\"0.26.0.dev\"):\n monkeypatch.delattr(pd.Int64Dtype, \"__from_arrow__\")\n elif Version(pd.__version__) < Version(\"1.3.0.dev\"):\n monkeypatch.delattr(\n pd.core.arrays.integer._IntegerDtype, \"__from_arrow__\")\n else:\n monkeypatch.delattr(\n pd.core.arrays.integer.NumericDtype, \"__from_arrow__\")\n\n result = arr.to_pandas()\n assert not isinstance(result._data.blocks[0], _int.ExtensionBlock)\n expected = pd.Series([1, 2, 3, 4])\n tm.assert_series_equal(result, expected)\n\n with pytest.raises(ValueError):\n table.to_pandas()\n\n\ndef test_to_pandas_extension_dtypes_mapping():\n if Version(pd.__version__) < Version(\"0.26.0.dev\"):\n pytest.skip(\"Conversion to pandas IntegerArray not yet supported\")\n\n table = pa.table({'a': pa.array([1, 2, 3], pa.int64())})\n\n # default use numpy dtype\n result = table.to_pandas()\n assert result['a'].dtype == np.dtype('int64')\n\n # specify to override the default\n result = table.to_pandas(types_mapper={pa.int64(): pd.Int64Dtype()}.get)\n assert isinstance(result['a'].dtype, pd.Int64Dtype)\n\n # types that return None in function get normal conversion\n table = pa.table({'a': pa.array([1, 2, 3], pa.int32())})\n result = table.to_pandas(types_mapper={pa.int64(): pd.Int64Dtype()}.get)\n assert result['a'].dtype == np.dtype('int32')\n\n # `types_mapper` overrules the pandas metadata\n table = pa.table(pd.DataFrame({'a': pd.array([1, 2, 3], dtype=\"Int64\")}))\n result = table.to_pandas()\n assert isinstance(result['a'].dtype, pd.Int64Dtype)\n result = table.to_pandas(\n types_mapper={pa.int64(): pd.PeriodDtype('D')}.get)\n assert isinstance(result['a'].dtype, pd.PeriodDtype)\n\n\ndef test_array_to_pandas():\n if Version(pd.__version__) < Version(\"1.1\"):\n pytest.skip(\"ExtensionDtype to_pandas method missing\")\n\n for arr in [pd.period_range(\"2012-01-01\", periods=3, freq=\"D\").array,\n pd.interval_range(1, 4).array]:\n result = pa.array(arr).to_pandas()\n expected = pd.Series(arr)\n tm.assert_series_equal(result, expected)\n\n # TODO implement proper conversion for chunked array\n # result = pa.table({\"col\": arr})[\"col\"].to_pandas()\n # expected = pd.Series(arr, name=\"col\")\n # tm.assert_series_equal(result, expected)\n\n\ndef test_roundtrip_empty_table_with_extension_dtype_index():\n if Version(pd.__version__) < Version(\"1.0.0\"):\n pytest.skip(\"ExtensionDtype to_pandas method missing\")\n\n df = pd.DataFrame(index=pd.interval_range(start=0, end=3))\n table = pa.table(df)\n table.to_pandas().index == pd.Index([{'left': 0, 'right': 1},\n {'left': 1, 'right': 2},\n {'left': 2, 'right': 3}],\n dtype='object')\n\n\n# ----------------------------------------------------------------------\n# Legacy metadata compatibility tests\n\n\ndef test_metadata_compat_range_index_pre_0_12():\n # Forward compatibility for metadata created from pandas.RangeIndex\n # prior to pyarrow 0.13.0\n a_values = ['foo', 'bar', None, 'baz']\n b_values = ['a', 'a', 'b', 'b']\n a_arrow = pa.array(a_values, type='utf8')\n b_arrow = pa.array(b_values, type='utf8')\n\n rng_index_arrow = pa.array([0, 2, 4, 6], type='int64')\n\n gen_name_0 = '__index_level_0__'\n gen_name_1 = '__index_level_1__'\n\n # Case 1: named RangeIndex\n e1 = pd.DataFrame({\n 'a': a_values\n }, index=pd.RangeIndex(0, 8, step=2, name='qux'))\n t1 = pa.Table.from_arrays([a_arrow, rng_index_arrow],\n names=['a', 'qux'])\n t1 = t1.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': ['qux'],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': 'qux',\n 'field_name': 'qux',\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r1 = t1.to_pandas()\n tm.assert_frame_equal(r1, e1)\n\n # Case 2: named RangeIndex, but conflicts with an actual column\n e2 = pd.DataFrame({\n 'qux': a_values\n }, index=pd.RangeIndex(0, 8, step=2, name='qux'))\n t2 = pa.Table.from_arrays([a_arrow, rng_index_arrow],\n names=['qux', gen_name_0])\n t2 = t2.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': [gen_name_0],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': 'qux',\n 'field_name': gen_name_0,\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r2 = t2.to_pandas()\n tm.assert_frame_equal(r2, e2)\n\n # Case 3: unnamed RangeIndex\n e3 = pd.DataFrame({\n 'a': a_values\n }, index=pd.RangeIndex(0, 8, step=2, name=None))\n t3 = pa.Table.from_arrays([a_arrow, rng_index_arrow],\n names=['a', gen_name_0])\n t3 = t3.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': [gen_name_0],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_0,\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r3 = t3.to_pandas()\n tm.assert_frame_equal(r3, e3)\n\n # Case 4: MultiIndex with named RangeIndex\n e4 = pd.DataFrame({\n 'a': a_values\n }, index=[pd.RangeIndex(0, 8, step=2, name='qux'), b_values])\n t4 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow],\n names=['a', 'qux', gen_name_1])\n t4 = t4.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': ['qux', gen_name_1],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': 'qux',\n 'field_name': 'qux',\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_1,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r4 = t4.to_pandas()\n tm.assert_frame_equal(r4, e4)\n\n # Case 4: MultiIndex with unnamed RangeIndex\n e5 = pd.DataFrame({\n 'a': a_values\n }, index=[pd.RangeIndex(0, 8, step=2, name=None), b_values])\n t5 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow],\n names=['a', gen_name_0, gen_name_1])\n t5 = t5.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': [gen_name_0, gen_name_1],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_0,\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_1,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r5 = t5.to_pandas()\n tm.assert_frame_equal(r5, e5)\n\n\ndef test_metadata_compat_missing_field_name():\n # Combination of missing field name but with index column as metadata.\n # This combo occurs in the latest versions of fastparquet (0.3.2), but not\n # in pyarrow itself (since field_name was added in 0.8, index as metadata\n # only added later)\n\n a_values = [1, 2, 3, 4]\n b_values = ['a', 'b', 'c', 'd']\n a_arrow = pa.array(a_values, type='int64')\n b_arrow = pa.array(b_values, type='utf8')\n\n expected = pd.DataFrame({\n 'a': a_values,\n 'b': b_values,\n }, index=pd.RangeIndex(0, 8, step=2, name='qux'))\n table = pa.table({'a': a_arrow, 'b': b_arrow})\n\n # metadata generated by fastparquet 0.3.2 with missing field_names\n table = table.replace_schema_metadata({\n b'pandas': json.dumps({\n 'column_indexes': [\n {'field_name': None,\n 'metadata': None,\n 'name': None,\n 'numpy_type': 'object',\n 'pandas_type': 'mixed-integer'}\n ],\n 'columns': [\n {'metadata': None,\n 'name': 'a',\n 'numpy_type': 'int64',\n 'pandas_type': 'int64'},\n {'metadata': None,\n 'name': 'b',\n 'numpy_type': 'object',\n 'pandas_type': 'unicode'}\n ],\n 'index_columns': [\n {'kind': 'range',\n 'name': 'qux',\n 'start': 0,\n 'step': 2,\n 'stop': 8}\n ],\n 'pandas_version': '0.25.0'}\n\n )})\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_metadata_index_name_not_json_serializable():\n name = np.int64(6) # not json serializable by default\n table = pa.table(pd.DataFrame(index=pd.RangeIndex(0, 4, name=name)))\n metadata = table.schema.pandas_metadata\n assert metadata['index_columns'][0]['name'] == '6'\n\n\ndef test_metadata_index_name_is_json_serializable():\n name = 6 # json serializable by default\n table = pa.table(pd.DataFrame(index=pd.RangeIndex(0, 4, name=name)))\n metadata = table.schema.pandas_metadata\n assert metadata['index_columns'][0]['name'] == 6\n\n\ndef make_df_with_timestamps():\n # Some of the milliseconds timestamps deliberately don't fit in the range\n # that is possible with nanosecond timestamps.\n df = pd.DataFrame({\n 'dateTimeMs': [\n np.datetime64('0001-01-01 00:00', 'ms'),\n np.datetime64('2012-05-02 12:35', 'ms'),\n np.datetime64('2012-05-03 15:42', 'ms'),\n np.datetime64('3000-05-03 15:42', 'ms'),\n ],\n 'dateTimeNs': [\n np.datetime64('1991-01-01 00:00', 'ns'),\n np.datetime64('2012-05-02 12:35', 'ns'),\n np.datetime64('2012-05-03 15:42', 'ns'),\n np.datetime64('2050-05-03 15:42', 'ns'),\n ],\n })\n # Not part of what we're testing, just ensuring that the inputs are what we\n # expect.\n assert (df.dateTimeMs.dtype, df.dateTimeNs.dtype) == (\n # O == object, M8[ns] == timestamp64[ns]\n np.dtype(\"O\"), np.dtype(\"M8[ns]\")\n )\n return df\n\n\[email protected]\ndef test_timestamp_as_object_parquet(tempdir):\n # Timestamps can be stored as Parquet and reloaded into Pandas with no loss\n # of information if the timestamp_as_object option is True.\n df = make_df_with_timestamps()\n table = pa.Table.from_pandas(df)\n filename = tempdir / \"timestamps_from_pandas.parquet\"\n pq.write_table(table, filename, version=\"2.0\")\n result = pq.read_table(filename)\n df2 = result.to_pandas(timestamp_as_object=True)\n tm.assert_frame_equal(df, df2)\n\n\ndef test_timestamp_as_object_out_of_range():\n # Out of range timestamps can be converted Arrow and reloaded into Pandas\n # with no loss of information if the timestamp_as_object option is True.\n df = make_df_with_timestamps()\n table = pa.Table.from_pandas(df)\n df2 = table.to_pandas(timestamp_as_object=True)\n tm.assert_frame_equal(df, df2)\n\n\[email protected](\"resolution\", [\"s\", \"ms\", \"us\"])\[email protected](\"tz\", [None, \"America/New_York\"])\n# One datetime outside nanosecond range, one inside nanosecond range:\[email protected](\"dt\", [datetime(1553, 1, 1), datetime(2020, 1, 1)])\ndef test_timestamp_as_object_non_nanosecond(resolution, tz, dt):\n # Timestamps can be converted Arrow and reloaded into Pandas with no loss\n # of information if the timestamp_as_object option is True.\n arr = pa.array([dt], type=pa.timestamp(resolution, tz=tz))\n table = pa.table({'a': arr})\n\n for result in [\n arr.to_pandas(timestamp_as_object=True),\n table.to_pandas(timestamp_as_object=True)['a']\n ]:\n assert result.dtype == object\n assert isinstance(result[0], datetime)\n if tz:\n assert result[0].tzinfo is not None\n expected = result[0].tzinfo.fromutc(dt)\n else:\n assert result[0].tzinfo is None\n expected = dt\n assert result[0] == expected\n\n\ndef test_threaded_pandas_import():\n invoke_script(\"pandas_threaded_import.py\")\n"
] |
[
[
"pandas.to_datetime",
"pandas.testing.assert_series_equal",
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"pandas.RangeIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"numpy.random.random_sample",
"pandas.testing.assert_frame_equal",
"numpy.random.randn",
"numpy.iinfo",
"numpy.where",
"numpy.bool_",
"numpy.random.randint",
"numpy.testing.assert_equal",
"numpy.arange",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.tseries.offsets.DateOffset",
"numpy.frombuffer",
"pandas.PeriodDtype",
"numpy.repeat",
"numpy.zeros",
"pandas.to_numeric",
"pandas.Categorical.from_codes",
"pandas.concat",
"pandas.interval_range",
"numpy.isnan",
"pandas.Categorical",
"pandas.array",
"numpy.int64",
"numpy.random.rand",
"pandas.date_range",
"pandas.DataFrame.from_dict",
"numpy.array",
"pandas.SparseArray",
"pandas.isnull",
"numpy.random.seed",
"pandas.period_range",
"pandas.arrays.SparseArray",
"numpy.int32",
"pandas.MultiIndex.from_arrays",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.datetime64",
"pandas.Timestamp.now",
"numpy.float64",
"numpy.ma.masked_array",
"pandas.Int64Dtype",
"pandas.Timestamp",
"numpy.empty"
]
] |
Tillsten/pyqtgraph
|
[
"0045863165fe526988c58cf4f8232ae2d261a5ee"
] |
[
"pyqtgraph/graphicsItems/ScatterPlotItem.py"
] |
[
"from ..Qt import QtGui, QtCore, USE_PYSIDE\nfrom ..Point import Point\nfrom .. import functions as fn\nfrom .GraphicsItem import GraphicsItem\nfrom .GraphicsObject import GraphicsObject\nfrom itertools import starmap, repeat\ntry:\n from itertools import imap\nexcept ImportError:\n imap = map\nimport numpy as np\nimport weakref\nfrom .. import getConfigOption\nfrom .. import debug as debug\nfrom ..pgcollections import OrderedDict\nfrom .. import debug\n\n__all__ = ['ScatterPlotItem', 'SpotItem']\n\n\n## Build all symbol paths\nSymbols = OrderedDict([(name, QtGui.QPainterPath()) for name in ['o', 's', 't', 'd', '+', 'x']])\nSymbols['o'].addEllipse(QtCore.QRectF(-0.5, -0.5, 1, 1))\nSymbols['s'].addRect(QtCore.QRectF(-0.5, -0.5, 1, 1))\ncoords = {\n 't': [(-0.5, -0.5), (0, 0.5), (0.5, -0.5)],\n 'd': [(0., -0.5), (-0.4, 0.), (0, 0.5), (0.4, 0)],\n '+': [\n (-0.5, -0.05), (-0.5, 0.05), (-0.05, 0.05), (-0.05, 0.5),\n (0.05, 0.5), (0.05, 0.05), (0.5, 0.05), (0.5, -0.05), \n (0.05, -0.05), (0.05, -0.5), (-0.05, -0.5), (-0.05, -0.05)\n ],\n}\nfor k, c in coords.items():\n Symbols[k].moveTo(*c[0])\n for x,y in c[1:]:\n Symbols[k].lineTo(x, y)\n Symbols[k].closeSubpath()\ntr = QtGui.QTransform()\ntr.rotate(45)\nSymbols['x'] = tr.map(Symbols['+'])\n\n \ndef drawSymbol(painter, symbol, size, pen, brush):\n if symbol is None:\n return\n painter.scale(size, size)\n painter.setPen(pen)\n painter.setBrush(brush)\n if isinstance(symbol, basestring):\n symbol = Symbols[symbol]\n if np.isscalar(symbol):\n symbol = list(Symbols.values())[symbol % len(Symbols)]\n painter.drawPath(symbol)\n\n \ndef renderSymbol(symbol, size, pen, brush, device=None):\n \"\"\"\n Render a symbol specification to QImage.\n Symbol may be either a QPainterPath or one of the keys in the Symbols dict.\n If *device* is None, a new QPixmap will be returned. Otherwise,\n the symbol will be rendered into the device specified (See QPainter documentation \n for more information).\n \"\"\"\n ## Render a spot with the given parameters to a pixmap\n penPxWidth = max(np.ceil(pen.widthF()), 1)\n if device is None:\n device = QtGui.QImage(int(size+penPxWidth), int(size+penPxWidth), QtGui.QImage.Format_ARGB32)\n device.fill(0)\n p = QtGui.QPainter(device)\n p.setRenderHint(p.Antialiasing)\n p.translate(device.width()*0.5, device.height()*0.5)\n drawSymbol(p, symbol, size, pen, brush)\n p.end()\n return device\n\ndef makeSymbolPixmap(size, pen, brush, symbol):\n ## deprecated\n img = renderSymbol(symbol, size, pen, brush)\n return QtGui.QPixmap(img)\n \nclass SymbolAtlas(object):\n \"\"\"\n Used to efficiently construct a single QPixmap containing all rendered symbols\n for a ScatterPlotItem. This is required for fragment rendering.\n \n Use example:\n atlas = SymbolAtlas()\n sc1 = atlas.getSymbolCoords('o', 5, QPen(..), QBrush(..))\n sc2 = atlas.getSymbolCoords('t', 10, QPen(..), QBrush(..))\n pm = atlas.getAtlas()\n \n \"\"\"\n def __init__(self):\n # symbol key : QRect(...) coordinates where symbol can be found in atlas.\n # note that the coordinate list will always be the same list object as \n # long as the symbol is in the atlas, but the coordinates may\n # change if the atlas is rebuilt.\n # weak value; if all external refs to this list disappear, \n # the symbol will be forgotten.\n self.symbolMap = weakref.WeakValueDictionary()\n \n self.atlasData = None # numpy array of atlas image\n self.atlas = None # atlas as QPixmap\n self.atlasValid = False\n self.max_width=0\n \n def getSymbolCoords(self, opts):\n \"\"\"\n Given a list of spot records, return an object representing the coordinates of that symbol within the atlas\n \"\"\"\n sourceRect = np.empty(len(opts), dtype=object)\n keyi = None\n sourceRecti = None\n for i, rec in enumerate(opts):\n key = (rec[3], rec[2], id(rec[4]), id(rec[5])) # TODO: use string indexes?\n if key == keyi:\n sourceRect[i] = sourceRecti\n else:\n try:\n sourceRect[i] = self.symbolMap[key]\n except KeyError:\n newRectSrc = QtCore.QRectF()\n newRectSrc.pen = rec['pen']\n newRectSrc.brush = rec['brush']\n self.symbolMap[key] = newRectSrc\n self.atlasValid = False\n sourceRect[i] = newRectSrc\n keyi = key\n sourceRecti = newRectSrc\n return sourceRect\n \n def buildAtlas(self):\n # get rendered array for all symbols, keep track of avg/max width\n rendered = {}\n avgWidth = 0.0\n maxWidth = 0\n images = []\n for key, sourceRect in self.symbolMap.items():\n if sourceRect.width() == 0:\n img = renderSymbol(key[0], key[1], sourceRect.pen, sourceRect.brush)\n images.append(img) ## we only need this to prevent the images being garbage collected immediately\n arr = fn.imageToArray(img, copy=False, transpose=False)\n else:\n (y,x,h,w) = sourceRect.getRect()\n arr = self.atlasData[x:x+w, y:y+w]\n rendered[key] = arr\n w = arr.shape[0]\n avgWidth += w\n maxWidth = max(maxWidth, w)\n \n nSymbols = len(rendered)\n if nSymbols > 0:\n avgWidth /= nSymbols\n width = max(maxWidth, avgWidth * (nSymbols**0.5))\n else:\n avgWidth = 0\n width = 0\n \n # sort symbols by height\n symbols = sorted(rendered.keys(), key=lambda x: rendered[x].shape[1], reverse=True)\n \n self.atlasRows = []\n\n x = width\n y = 0\n rowheight = 0\n for key in symbols:\n arr = rendered[key]\n w,h = arr.shape[:2]\n if x+w > width:\n y += rowheight\n x = 0\n rowheight = h\n self.atlasRows.append([y, rowheight, 0])\n self.symbolMap[key].setRect(y, x, h, w)\n x += w\n self.atlasRows[-1][2] = x\n height = y + rowheight\n\n self.atlasData = np.zeros((width, height, 4), dtype=np.ubyte)\n for key in symbols:\n y, x, h, w = self.symbolMap[key].getRect()\n self.atlasData[x:x+w, y:y+h] = rendered[key]\n self.atlas = None\n self.atlasValid = True\n self.max_width = maxWidth\n \n def getAtlas(self):\n if not self.atlasValid:\n self.buildAtlas()\n if self.atlas is None:\n if len(self.atlasData) == 0:\n return QtGui.QPixmap(0,0)\n img = fn.makeQImage(self.atlasData, copy=False, transpose=False)\n self.atlas = QtGui.QPixmap(img)\n return self.atlas\n \n \n \n \nclass ScatterPlotItem(GraphicsObject):\n \"\"\"\n Displays a set of x/y points. Instances of this class are created\n automatically as part of PlotDataItem; these rarely need to be instantiated\n directly.\n \n The size, shape, pen, and fill brush may be set for each point individually \n or for all points. \n \n \n ======================== ===============================================\n **Signals:**\n sigPlotChanged(self) Emitted when the data being plotted has changed\n sigClicked(self, points) Emitted when the curve is clicked. Sends a list\n of all the points under the mouse pointer.\n ======================== ===============================================\n \n \"\"\"\n #sigPointClicked = QtCore.Signal(object, object)\n sigClicked = QtCore.Signal(object, object) ## self, points\n sigPlotChanged = QtCore.Signal(object)\n def __init__(self, *args, **kargs):\n \"\"\"\n Accepts the same arguments as setData()\n \"\"\"\n profiler = debug.Profiler()\n GraphicsObject.__init__(self)\n \n self.picture = None # QPicture used for rendering when pxmode==False\n self.fragmentAtlas = SymbolAtlas()\n \n self.data = np.empty(0, dtype=[('x', float), ('y', float), ('size', float), ('symbol', object), ('pen', object), ('brush', object), ('data', object), ('item', object), ('sourceRect', object), ('targetRect', object), ('width', float)])\n self.bounds = [None, None] ## caches data bounds\n self._maxSpotWidth = 0 ## maximum size of the scale-variant portion of all spots\n self._maxSpotPxWidth = 0 ## maximum size of the scale-invariant portion of all spots\n self.opts = {\n 'pxMode': True, \n 'useCache': True, ## If useCache is False, symbols are re-drawn on every paint. \n 'antialias': getConfigOption('antialias'),\n 'name': None,\n } \n \n self.setPen(fn.mkPen(getConfigOption('foreground')), update=False)\n self.setBrush(fn.mkBrush(100,100,150), update=False)\n self.setSymbol('o', update=False)\n self.setSize(7, update=False)\n profiler()\n self.setData(*args, **kargs)\n profiler('setData')\n\n #self.setCacheMode(self.DeviceCoordinateCache)\n \n def setData(self, *args, **kargs):\n \"\"\"\n **Ordered Arguments:**\n \n * If there is only one unnamed argument, it will be interpreted like the 'spots' argument.\n * If there are two unnamed arguments, they will be interpreted as sequences of x and y values.\n \n ====================== ===============================================================================================\n **Keyword Arguments:**\n *spots* Optional list of dicts. Each dict specifies parameters for a single spot:\n {'pos': (x,y), 'size', 'pen', 'brush', 'symbol'}. This is just an alternate method\n of passing in data for the corresponding arguments.\n *x*,*y* 1D arrays of x,y values.\n *pos* 2D structure of x,y pairs (such as Nx2 array or list of tuples)\n *pxMode* If True, spots are always the same size regardless of scaling, and size is given in px.\n Otherwise, size is in scene coordinates and the spots scale with the view.\n Default is True\n *symbol* can be one (or a list) of:\n * 'o' circle (default)\n * 's' square\n * 't' triangle\n * 'd' diamond\n * '+' plus\n * any QPainterPath to specify custom symbol shapes. To properly obey the position and size,\n custom symbols should be centered at (0,0) and width and height of 1.0. Note that it is also\n possible to 'install' custom shapes by setting ScatterPlotItem.Symbols[key] = shape.\n *pen* The pen (or list of pens) to use for drawing spot outlines.\n *brush* The brush (or list of brushes) to use for filling spots.\n *size* The size (or list of sizes) of spots. If *pxMode* is True, this value is in pixels. Otherwise,\n it is in the item's local coordinate system.\n *data* a list of python objects used to uniquely identify each spot.\n *identical* *Deprecated*. This functionality is handled automatically now.\n *antialias* Whether to draw symbols with antialiasing. Note that if pxMode is True, symbols are \n always rendered with antialiasing (since the rendered symbols can be cached, this \n incurs very little performance cost)\n *name* The name of this item. Names are used for automatically\n generating LegendItem entries and by some exporters.\n ====================== ===============================================================================================\n \"\"\"\n oldData = self.data ## this causes cached pixmaps to be preserved while new data is registered.\n self.clear() ## clear out all old data\n self.addPoints(*args, **kargs)\n\n def addPoints(self, *args, **kargs):\n \"\"\"\n Add new points to the scatter plot. \n Arguments are the same as setData()\n \"\"\"\n \n ## deal with non-keyword arguments\n if len(args) == 1:\n kargs['spots'] = args[0]\n elif len(args) == 2:\n kargs['x'] = args[0]\n kargs['y'] = args[1]\n elif len(args) > 2:\n raise Exception('Only accepts up to two non-keyword arguments.')\n \n ## convert 'pos' argument to 'x' and 'y'\n if 'pos' in kargs:\n pos = kargs['pos']\n if isinstance(pos, np.ndarray):\n kargs['x'] = pos[:,0]\n kargs['y'] = pos[:,1]\n else:\n x = []\n y = []\n for p in pos:\n if isinstance(p, QtCore.QPointF):\n x.append(p.x())\n y.append(p.y())\n else:\n x.append(p[0])\n y.append(p[1])\n kargs['x'] = x\n kargs['y'] = y\n \n ## determine how many spots we have\n if 'spots' in kargs:\n numPts = len(kargs['spots'])\n elif 'y' in kargs and kargs['y'] is not None:\n numPts = len(kargs['y'])\n else:\n kargs['x'] = []\n kargs['y'] = []\n numPts = 0\n \n ## Extend record array\n oldData = self.data\n self.data = np.empty(len(oldData)+numPts, dtype=self.data.dtype)\n ## note that np.empty initializes object fields to None and string fields to ''\n \n self.data[:len(oldData)] = oldData\n #for i in range(len(oldData)):\n #oldData[i]['item']._data = self.data[i] ## Make sure items have proper reference to new array\n \n newData = self.data[len(oldData):]\n newData['size'] = -1 ## indicates to use default size\n \n if 'spots' in kargs:\n spots = kargs['spots']\n for i in range(len(spots)):\n spot = spots[i]\n for k in spot:\n #if k == 'pen':\n #newData[k] = fn.mkPen(spot[k])\n #elif k == 'brush':\n #newData[k] = fn.mkBrush(spot[k])\n if k == 'pos':\n pos = spot[k]\n if isinstance(pos, QtCore.QPointF):\n x,y = pos.x(), pos.y()\n else:\n x,y = pos[0], pos[1]\n newData[i]['x'] = x\n newData[i]['y'] = y\n elif k in ['x', 'y', 'size', 'symbol', 'pen', 'brush', 'data']:\n newData[i][k] = spot[k]\n #elif k == 'data':\n #self.pointData[i] = spot[k]\n else:\n raise Exception(\"Unknown spot parameter: %s\" % k)\n elif 'y' in kargs:\n newData['x'] = kargs['x']\n newData['y'] = kargs['y']\n \n if 'pxMode' in kargs:\n self.setPxMode(kargs['pxMode'])\n if 'antialias' in kargs:\n self.opts['antialias'] = kargs['antialias']\n \n ## Set any extra parameters provided in keyword arguments\n for k in ['pen', 'brush', 'symbol', 'size']:\n if k in kargs:\n setMethod = getattr(self, 'set' + k[0].upper() + k[1:])\n setMethod(kargs[k], update=False, dataSet=newData, mask=kargs.get('mask', None))\n \n if 'data' in kargs:\n self.setPointData(kargs['data'], dataSet=newData)\n \n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n self.bounds = [None, None]\n self.invalidate()\n self.updateSpots(newData)\n self.sigPlotChanged.emit(self)\n \n def invalidate(self):\n ## clear any cached drawing state\n self.picture = None\n self.update()\n \n def getData(self):\n return self.data['x'], self.data['y'] \n \n def setPoints(self, *args, **kargs):\n ##Deprecated; use setData\n return self.setData(*args, **kargs)\n \n def implements(self, interface=None):\n ints = ['plotData']\n if interface is None:\n return ints\n return interface in ints\n \n def name(self):\n return self.opts.get('name', None)\n \n def setPen(self, *args, **kargs):\n \"\"\"Set the pen(s) used to draw the outline around each spot. \n If a list or array is provided, then the pen for each spot will be set separately.\n Otherwise, the arguments are passed to pg.mkPen and used as the default pen for \n all spots which do not have a pen explicitly set.\"\"\"\n update = kargs.pop('update', True)\n dataSet = kargs.pop('dataSet', self.data)\n \n if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)):\n pens = args[0]\n if kargs['mask'] is not None:\n pens = pens[kargs['mask']]\n if len(pens) != len(dataSet):\n raise Exception(\"Number of pens does not match number of points (%d != %d)\" % (len(pens), len(dataSet)))\n dataSet['pen'] = pens\n else:\n self.opts['pen'] = fn.mkPen(*args, **kargs)\n \n dataSet['sourceRect'] = None\n if update:\n self.updateSpots(dataSet)\n \n def setBrush(self, *args, **kargs):\n \"\"\"Set the brush(es) used to fill the interior of each spot. \n If a list or array is provided, then the brush for each spot will be set separately.\n Otherwise, the arguments are passed to pg.mkBrush and used as the default brush for \n all spots which do not have a brush explicitly set.\"\"\"\n update = kargs.pop('update', True)\n dataSet = kargs.pop('dataSet', self.data)\n \n if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)):\n brushes = args[0]\n if kargs['mask'] is not None:\n brushes = brushes[kargs['mask']]\n if len(brushes) != len(dataSet):\n raise Exception(\"Number of brushes does not match number of points (%d != %d)\" % (len(brushes), len(dataSet)))\n #for i in xrange(len(brushes)):\n #self.data[i]['brush'] = fn.mkBrush(brushes[i], **kargs)\n dataSet['brush'] = brushes\n else:\n self.opts['brush'] = fn.mkBrush(*args, **kargs)\n #self._spotPixmap = None\n \n dataSet['sourceRect'] = None\n if update:\n self.updateSpots(dataSet)\n\n def setSymbol(self, symbol, update=True, dataSet=None, mask=None):\n \"\"\"Set the symbol(s) used to draw each spot. \n If a list or array is provided, then the symbol for each spot will be set separately.\n Otherwise, the argument will be used as the default symbol for \n all spots which do not have a symbol explicitly set.\"\"\"\n if dataSet is None:\n dataSet = self.data\n \n if isinstance(symbol, np.ndarray) or isinstance(symbol, list):\n symbols = symbol\n if mask is not None:\n symbols = symbols[mask]\n if len(symbols) != len(dataSet):\n raise Exception(\"Number of symbols does not match number of points (%d != %d)\" % (len(symbols), len(dataSet)))\n dataSet['symbol'] = symbols\n else:\n self.opts['symbol'] = symbol\n self._spotPixmap = None\n \n dataSet['sourceRect'] = None\n if update:\n self.updateSpots(dataSet)\n \n def setSize(self, size, update=True, dataSet=None, mask=None):\n \"\"\"Set the size(s) used to draw each spot. \n If a list or array is provided, then the size for each spot will be set separately.\n Otherwise, the argument will be used as the default size for \n all spots which do not have a size explicitly set.\"\"\"\n if dataSet is None:\n dataSet = self.data\n \n if isinstance(size, np.ndarray) or isinstance(size, list):\n sizes = size\n if mask is not None:\n sizes = sizes[mask]\n if len(sizes) != len(dataSet):\n raise Exception(\"Number of sizes does not match number of points (%d != %d)\" % (len(sizes), len(dataSet)))\n dataSet['size'] = sizes\n else:\n self.opts['size'] = size\n self._spotPixmap = None\n \n dataSet['sourceRect'] = None\n if update:\n self.updateSpots(dataSet)\n \n def setPointData(self, data, dataSet=None, mask=None):\n if dataSet is None:\n dataSet = self.data\n \n if isinstance(data, np.ndarray) or isinstance(data, list):\n if mask is not None:\n data = data[mask]\n if len(data) != len(dataSet):\n raise Exception(\"Length of meta data does not match number of points (%d != %d)\" % (len(data), len(dataSet)))\n \n ## Bug: If data is a numpy record array, then items from that array must be copied to dataSet one at a time.\n ## (otherwise they are converted to tuples and thus lose their field names.\n if isinstance(data, np.ndarray) and (data.dtype.fields is not None)and len(data.dtype.fields) > 1:\n for i, rec in enumerate(data):\n dataSet['data'][i] = rec\n else:\n dataSet['data'] = data\n \n def setPxMode(self, mode):\n if self.opts['pxMode'] == mode:\n return\n \n self.opts['pxMode'] = mode\n self.invalidate()\n \n def updateSpots(self, dataSet=None):\n if dataSet is None:\n dataSet = self.data\n\n invalidate = False\n if self.opts['pxMode']:\n mask = np.equal(dataSet['sourceRect'], None)\n if np.any(mask):\n invalidate = True\n opts = self.getSpotOpts(dataSet[mask])\n sourceRect = self.fragmentAtlas.getSymbolCoords(opts)\n dataSet['sourceRect'][mask] = sourceRect\n \n self.fragmentAtlas.getAtlas() # generate atlas so source widths are available.\n \n dataSet['width'] = np.array(list(imap(QtCore.QRectF.width, dataSet['sourceRect'])))/2\n dataSet['targetRect'] = None\n self._maxSpotPxWidth = self.fragmentAtlas.max_width\n else:\n self._maxSpotWidth = 0\n self._maxSpotPxWidth = 0\n self.measureSpotSizes(dataSet)\n\n if invalidate:\n self.invalidate()\n\n def getSpotOpts(self, recs, scale=1.0):\n if recs.ndim == 0:\n rec = recs\n symbol = rec['symbol']\n if symbol is None:\n symbol = self.opts['symbol']\n size = rec['size']\n if size < 0:\n size = self.opts['size']\n pen = rec['pen']\n if pen is None:\n pen = self.opts['pen']\n brush = rec['brush']\n if brush is None:\n brush = self.opts['brush']\n return (symbol, size*scale, fn.mkPen(pen), fn.mkBrush(brush))\n else:\n recs = recs.copy()\n recs['symbol'][np.equal(recs['symbol'], None)] = self.opts['symbol']\n recs['size'][np.equal(recs['size'], -1)] = self.opts['size']\n recs['size'] *= scale\n recs['pen'][np.equal(recs['pen'], None)] = fn.mkPen(self.opts['pen'])\n recs['brush'][np.equal(recs['brush'], None)] = fn.mkBrush(self.opts['brush'])\n return recs\n \n \n \n def measureSpotSizes(self, dataSet):\n for rec in dataSet:\n ## keep track of the maximum spot size and pixel size\n symbol, size, pen, brush = self.getSpotOpts(rec)\n width = 0\n pxWidth = 0\n if self.opts['pxMode']:\n pxWidth = size + pen.widthF()\n else:\n width = size\n if pen.isCosmetic():\n pxWidth += pen.widthF()\n else:\n width += pen.widthF()\n self._maxSpotWidth = max(self._maxSpotWidth, width)\n self._maxSpotPxWidth = max(self._maxSpotPxWidth, pxWidth)\n self.bounds = [None, None]\n \n \n def clear(self):\n \"\"\"Remove all spots from the scatter plot\"\"\"\n #self.clearItems()\n self.data = np.empty(0, dtype=self.data.dtype)\n self.bounds = [None, None]\n self.invalidate()\n\n def dataBounds(self, ax, frac=1.0, orthoRange=None):\n if frac >= 1.0 and orthoRange is None and self.bounds[ax] is not None:\n return self.bounds[ax]\n \n #self.prepareGeometryChange()\n if self.data is None or len(self.data) == 0:\n return (None, None)\n \n if ax == 0:\n d = self.data['x']\n d2 = self.data['y']\n elif ax == 1:\n d = self.data['y']\n d2 = self.data['x']\n \n if orthoRange is not None:\n mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1])\n d = d[mask]\n d2 = d2[mask]\n \n if frac >= 1.0:\n self.bounds[ax] = (np.nanmin(d) - self._maxSpotWidth*0.7072, np.nanmax(d) + self._maxSpotWidth*0.7072)\n return self.bounds[ax]\n elif frac <= 0.0:\n raise Exception(\"Value for parameter 'frac' must be > 0. (got %s)\" % str(frac))\n else:\n mask = np.isfinite(d)\n d = d[mask]\n return np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])\n\n def pixelPadding(self):\n return self._maxSpotPxWidth*0.7072\n\n def boundingRect(self):\n (xmn, xmx) = self.dataBounds(ax=0)\n (ymn, ymx) = self.dataBounds(ax=1)\n if xmn is None or xmx is None:\n xmn = 0\n xmx = 0\n if ymn is None or ymx is None:\n ymn = 0\n ymx = 0\n \n px = py = 0.0\n pxPad = self.pixelPadding()\n if pxPad > 0:\n # determine length of pixel in local x, y directions \n px, py = self.pixelVectors()\n px = 0 if px is None else px.length() \n py = 0 if py is None else py.length()\n \n # return bounds expanded by pixel size\n px *= pxPad\n py *= pxPad\n return QtCore.QRectF(xmn-px, ymn-py, (2*px)+xmx-xmn, (2*py)+ymx-ymn)\n\n def viewTransformChanged(self):\n self.prepareGeometryChange()\n GraphicsObject.viewTransformChanged(self)\n self.bounds = [None, None]\n self.data['targetRect'] = None\n\n def setExportMode(self, *args, **kwds):\n GraphicsObject.setExportMode(self, *args, **kwds)\n self.invalidate()\n\n\n def mapPointsToDevice(self, pts):\n # Map point locations to device \n tr = self.deviceTransform()\n if tr is None:\n return None\n\n #pts = np.empty((2,len(self.data['x'])))\n #pts[0] = self.data['x']\n #pts[1] = self.data['y']\n pts = fn.transformCoordinates(tr, pts)\n pts -= self.data['width']\n pts = np.clip(pts, -2**30, 2**30) ## prevent Qt segmentation fault.\n \n return pts\n\n def getViewMask(self, pts):\n # Return bool mask indicating all points that are within viewbox\n # pts is expressed in *device coordiantes*\n vb = self.getViewBox()\n if vb is None:\n return None\n viewBounds = vb.mapRectToDevice(vb.boundingRect())\n w = self.data['width']\n mask = ((pts[0] + w > viewBounds.left()) &\n (pts[0] - w < viewBounds.right()) &\n (pts[1] + w > viewBounds.top()) &\n (pts[1] - w < viewBounds.bottom())) ## remove out of view points \n return mask\n \n \n @debug.warnOnException ## raising an exception here causes crash\n def paint(self, p, *args):\n\n #p.setPen(fn.mkPen('r'))\n #p.drawRect(self.boundingRect())\n \n if self._exportOpts is not False:\n aa = self._exportOpts.get('antialias', True)\n scale = self._exportOpts.get('resolutionScale', 1.0) ## exporting to image; pixel resolution may have changed\n else:\n aa = self.opts['antialias']\n scale = 1.0\n \n if self.opts['pxMode'] is True:\n p.resetTransform()\n \n # Map point coordinates to device\n pts = np.vstack([self.data['x'], self.data['y']])\n pts = self.mapPointsToDevice(pts)\n if pts is None:\n return\n \n # Cull points that are outside view\n viewMask = self.getViewMask(pts)\n #pts = pts[:,mask]\n #data = self.data[mask]\n \n if self.opts['useCache'] and self._exportOpts is False:\n # Draw symbols from pre-rendered atlas\n atlas = self.fragmentAtlas.getAtlas()\n \n # Update targetRects if necessary\n updateMask = viewMask & np.equal(self.data['targetRect'], None)\n if np.any(updateMask):\n updatePts = pts[:,updateMask]\n width = self.data[updateMask]['width']*2\n self.data['targetRect'][updateMask] = list(imap(QtCore.QRectF, updatePts[0,:], updatePts[1,:], width, width))\n \n data = self.data[viewMask]\n if USE_PYSIDE:\n list(imap(p.drawPixmap, data['targetRect'], repeat(atlas), data['sourceRect']))\n else:\n p.drawPixmapFragments(data['targetRect'].tolist(), data['sourceRect'].tolist(), atlas)\n else:\n # render each symbol individually\n p.setRenderHint(p.Antialiasing, aa)\n\n data = self.data[viewMask]\n pts = pts[:,viewMask]\n for i, rec in enumerate(data):\n p.resetTransform()\n p.translate(pts[0,i] + rec['width'], pts[1,i] + rec['width'])\n drawSymbol(p, *self.getSpotOpts(rec, scale))\n else:\n if self.picture is None:\n self.picture = QtGui.QPicture()\n p2 = QtGui.QPainter(self.picture)\n for rec in self.data:\n if scale != 1.0:\n rec = rec.copy()\n rec['size'] *= scale\n p2.resetTransform()\n p2.translate(rec['x'], rec['y'])\n drawSymbol(p2, *self.getSpotOpts(rec, scale))\n p2.end()\n \n p.setRenderHint(p.Antialiasing, aa)\n self.picture.play(p)\n \n def points(self):\n for rec in self.data:\n if rec['item'] is None:\n rec['item'] = SpotItem(rec, self)\n return self.data['item']\n \n def pointsAt(self, pos):\n x = pos.x()\n y = pos.y()\n pw = self.pixelWidth()\n ph = self.pixelHeight()\n pts = []\n for s in self.points():\n sp = s.pos()\n ss = s.size()\n sx = sp.x()\n sy = sp.y()\n s2x = s2y = ss * 0.5\n if self.opts['pxMode']:\n s2x *= pw\n s2y *= ph\n if x > sx-s2x and x < sx+s2x and y > sy-s2y and y < sy+s2y:\n pts.append(s)\n #print \"HIT:\", x, y, sx, sy, s2x, s2y\n #else:\n #print \"No hit:\", (x, y), (sx, sy)\n #print \" \", (sx-s2x, sy-s2y), (sx+s2x, sy+s2y)\n #pts.sort(lambda a,b: cmp(b.zValue(), a.zValue()))\n return pts[::-1]\n \n\n def mouseClickEvent(self, ev):\n if ev.button() == QtCore.Qt.LeftButton:\n pts = self.pointsAt(ev.pos())\n if len(pts) > 0:\n self.ptsClicked = pts\n self.sigClicked.emit(self, self.ptsClicked)\n ev.accept()\n else:\n #print \"no spots\"\n ev.ignore()\n else:\n ev.ignore()\n\n\nclass SpotItem(object):\n \"\"\"\n Class referring to individual spots in a scatter plot.\n These can be retrieved by calling ScatterPlotItem.points() or \n by connecting to the ScatterPlotItem's click signals.\n \"\"\"\n\n def __init__(self, data, plot):\n #GraphicsItem.__init__(self, register=False)\n self._data = data\n self._plot = plot\n #self.setParentItem(plot)\n #self.setPos(QtCore.QPointF(data['x'], data['y']))\n #self.updateItem()\n \n def data(self):\n \"\"\"Return the user data associated with this spot.\"\"\"\n return self._data['data']\n \n def size(self):\n \"\"\"Return the size of this spot. \n If the spot has no explicit size set, then return the ScatterPlotItem's default size instead.\"\"\"\n if self._data['size'] == -1:\n return self._plot.opts['size']\n else:\n return self._data['size']\n \n def pos(self):\n return Point(self._data['x'], self._data['y'])\n \n def viewPos(self):\n return self._plot.mapToView(self.pos())\n \n def setSize(self, size):\n \"\"\"Set the size of this spot. \n If the size is set to -1, then the ScatterPlotItem's default size \n will be used instead.\"\"\"\n self._data['size'] = size\n self.updateItem()\n \n def symbol(self):\n \"\"\"Return the symbol of this spot. \n If the spot has no explicit symbol set, then return the ScatterPlotItem's default symbol instead.\n \"\"\"\n symbol = self._data['symbol']\n if symbol is None:\n symbol = self._plot.opts['symbol']\n try:\n n = int(symbol)\n symbol = list(Symbols.keys())[n % len(Symbols)]\n except:\n pass\n return symbol\n \n def setSymbol(self, symbol):\n \"\"\"Set the symbol for this spot.\n If the symbol is set to '', then the ScatterPlotItem's default symbol will be used instead.\"\"\"\n self._data['symbol'] = symbol\n self.updateItem()\n\n def pen(self):\n pen = self._data['pen']\n if pen is None:\n pen = self._plot.opts['pen']\n return fn.mkPen(pen)\n \n def setPen(self, *args, **kargs):\n \"\"\"Set the outline pen for this spot\"\"\"\n pen = fn.mkPen(*args, **kargs)\n self._data['pen'] = pen\n self.updateItem()\n \n def resetPen(self):\n \"\"\"Remove the pen set for this spot; the scatter plot's default pen will be used instead.\"\"\"\n self._data['pen'] = None ## Note this is NOT the same as calling setPen(None)\n self.updateItem()\n \n def brush(self):\n brush = self._data['brush']\n if brush is None:\n brush = self._plot.opts['brush']\n return fn.mkBrush(brush)\n \n def setBrush(self, *args, **kargs):\n \"\"\"Set the fill brush for this spot\"\"\"\n brush = fn.mkBrush(*args, **kargs)\n self._data['brush'] = brush\n self.updateItem()\n \n def resetBrush(self):\n \"\"\"Remove the brush set for this spot; the scatter plot's default brush will be used instead.\"\"\"\n self._data['brush'] = None ## Note this is NOT the same as calling setBrush(None)\n self.updateItem()\n \n def setData(self, data):\n \"\"\"Set the user-data associated with this spot\"\"\"\n self._data['data'] = data\n\n def updateItem(self):\n self._data['sourceRect'] = None\n self._plot.updateSpots(self._data.reshape(1))\n self._plot.invalidate()\n\n#class PixmapSpotItem(SpotItem, QtGui.QGraphicsPixmapItem):\n #def __init__(self, data, plot):\n #QtGui.QGraphicsPixmapItem.__init__(self)\n #self.setFlags(self.flags() | self.ItemIgnoresTransformations)\n #SpotItem.__init__(self, data, plot)\n \n #def setPixmap(self, pixmap):\n #QtGui.QGraphicsPixmapItem.setPixmap(self, pixmap)\n #self.setOffset(-pixmap.width()/2.+0.5, -pixmap.height()/2.)\n \n #def updateItem(self):\n #symbolOpts = (self._data['pen'], self._data['brush'], self._data['size'], self._data['symbol'])\n \n ### If all symbol options are default, use default pixmap\n #if symbolOpts == (None, None, -1, ''):\n #pixmap = self._plot.defaultSpotPixmap()\n #else:\n #pixmap = makeSymbolPixmap(size=self.size(), pen=self.pen(), brush=self.brush(), symbol=self.symbol())\n #self.setPixmap(pixmap)\n\n\n#class PathSpotItem(SpotItem, QtGui.QGraphicsPathItem):\n #def __init__(self, data, plot):\n #QtGui.QGraphicsPathItem.__init__(self)\n #SpotItem.__init__(self, data, plot)\n\n #def updateItem(self):\n #QtGui.QGraphicsPathItem.setPath(self, Symbols[self.symbol()])\n #QtGui.QGraphicsPathItem.setPen(self, self.pen())\n #QtGui.QGraphicsPathItem.setBrush(self, self.brush())\n #size = self.size()\n #self.resetTransform()\n #self.scale(size, size)\n"
] |
[
[
"numpy.nanmax",
"numpy.isfinite",
"numpy.clip",
"numpy.vstack",
"numpy.nanmin",
"numpy.percentile",
"numpy.equal",
"numpy.isscalar",
"numpy.any",
"numpy.zeros",
"numpy.empty"
]
] |
melhabr/edge-model-converter
|
[
"26d967d29413d2e8f4bacc8ab2e5809a1289eae0"
] |
[
"converter_util.py"
] |
[
"import queue\n\n\n# Returns a GraphCharacteristics object given a tensorflow graphdef, which has the following properties:\n# nodes_by_name: A dictionary that maps node names to node objects in the graph\n# node_outputs_by_name: A dictionary that maps node names to their respective output node objects\n# input_node_names: List of likely input node names\n# input_names: List of likely input nodes\n# output_node_names: List of likely output node names\n# output_nodes: List of likely output nodes\nclass GraphCharacteristics:\n\n def __init__(self, graph_def):\n self.nodes_by_name = {}\n self.node_outputs_by_name = {}\n\n # Build node-name graph\n for node in graph_def.node:\n self.nodes_by_name[node.name] = node\n self.node_outputs_by_name[node.name] = []\n\n # Build name-output graph\n for node in graph_def.node:\n for input_node_name in node.input:\n input_node_proper = input_node_name.split(':')[0].lstrip('^')\n self.node_outputs_by_name[input_node_proper].append(node)\n\n # Ascertain input nodes\n self.input_node_names = [node.name for node in graph_def.node if node.op == 'Placeholder']\n self.input_nodes = [self.nodes_by_name[node_name] for node_name in self.input_node_names]\n print(\"Input names: \", self.input_node_names)\n\n # Ascertain output nodes\n self.output_node_names = [node for node in self.node_outputs_by_name.keys() if\n self.node_outputs_by_name[node] == []]\n self.output_nodes = [self.nodes_by_name[node_name] for node_name in self.output_node_names]\n print(\"Output names: \", self.output_node_names)\n\n # Returns all nodes that are inputs of (but not in) a particular subgraph\n def get_subgraph_inputs(self, subgraph_names):\n\n if type(subgraph_names) is str:\n subgraph_names = [subgraph_names]\n\n nodes = []\n\n for node in self.nodes_by_name.values():\n if not any(x in node.name for x in subgraph_names):\n continue\n for input_node in node.input:\n input_node_proper = input_node.split(':')[0].lstrip('^')\n if not any(x in input_node_proper for x in subgraph_names) and self.nodes_by_name[\n input_node_proper] not in nodes:\n nodes.append(self.nodes_by_name[input_node_proper])\n\n return nodes\n\n\n# Determines the input dimensions, by whichever means necessary\n# TODO: Automatically determine input dimensions for trickier graphs\ndef get_input_dims(args, input_node):\n proposed_dims = args.input_dims\n node_dims = [input_node.attr['shape'].shape.dim[i].size for i in\n range(len(input_node.attr['shape'].shape.dim))]\n\n if proposed_dims is not None:\n\n if len(proposed_dims) != len(node_dims):\n raise ValueError(\"Number of provided input dimensions does not match number of existing dimensions. \"\n \"Given: {}. Existing: {} \"\n .format(len(proposed_dims), len(node_dims)))\n\n for idx, dim in enumerate(node_dims):\n if dim != -1 and dim != proposed_dims[idx]:\n raise ValueError(\n \"Provided input dimension {} contradicts existing input dimension. Given: {}. Existing: {}\"\n .format(idx, proposed_dims[idx], dim))\n\n return proposed_dims\n\n unknown_dims = node_dims.count(-1)\n\n if unknown_dims == 0:\n return node_dims\n\n node_dims_print = [(dim if dim != -1 else '?') for dim in node_dims]\n while True:\n proposed_dims = input(\"Existing input structure is {}, enter {} missing dimensions:\\n\"\n .format(node_dims_print, unknown_dims)).split()\n if len(proposed_dims) != unknown_dims:\n print(\"Incorrect number of dimensions\")\n else:\n break\n\n proposed_iter = iter(proposed_dims)\n node_dims = [(dim if dim != -1 else int(next(proposed_iter))) for dim in node_dims]\n print(\"Using input dimensions \", node_dims)\n return node_dims\n\n\n# Performs a depth-first-search across a tensorflow graph for a node with a name containing the target string\n# starting at the node \"start\"\n# note this search from moves outputs to inputs\n# target can be either a string or list of strings\n# returns a result node if one is found, otherwise returns None\n# Will not search more nodes than DFS_SEARCH_LIMIT\ndef BFS(graph, start, target, graph_chars=None, search_limit=50):\n if graph_chars is None:\n graph_chars = GraphCharacteristics(graph)\n\n if type(target) == str:\n target = [target]\n\n target_node = None\n\n q = queue.Queue()\n searchCount = 0\n q.put(start)\n while q is not None and not q.empty():\n node = q.get()\n for node_name in node.input:\n if any(x in node_name for x in target):\n target_node = graph_chars.nodes_by_name[node_name]\n q = None\n break\n q.put(graph_chars.nodes_by_name[node_name.split(':')[0].lstrip('^')])\n\n if searchCount > search_limit:\n break\n\n searchCount += 1\n\n return target_node\n\n\n# returns number of classes in SSD classification, where\n# graph is a graphDef\n# graph_chars is the graph characteristics object, if you've already computed it\ndef get_num_classes(graph, graph_chars=None):\n import tensorflow as tf\n\n initial_node_names = [\"Postprocessor\", \"PostProcess\"]\n class_node_names = [\"ClassPredictor\", \"TFLite_Detection_PostProcess:1\", \"Reshape_5\"]\n\n if graph_chars is None:\n graph_chars = GraphCharacteristics(graph)\n\n search_nodes = graph_chars.get_subgraph_inputs(initial_node_names)\n\n class_node = None\n for node in search_nodes:\n if BFS(graph, node, class_node_names, graph_chars=graph_chars) is not None:\n class_node = node\n break\n if class_node is None:\n print(\"Error: Could not find a class output node for # class determination\")\n exit(1)\n\n # TODO: Remove tensorflow dependency, if possible\n class_tensor = tf.graph_util.import_graph_def(graph, return_elements=[class_node.name + \":0\"])\n\n return int(class_tensor[0].shape[-1])\n\n\n# Returns the NMS input order, which are the order of [loc_data, conf_data, priorbox_data]\n# Where the order is the input order to the postprocessor subgraph with prefix postprocessor_prefix\ndef get_NMS_input_order(graph, postprocessor_prefix, graph_chars=None):\n\n loc_targets = [\"BoxEncodingPredictor\", \"refined_locations\"]\n conf_targets = [\"ClassPredictor\", \"class_predictions\"]\n priorbox_targets = [\"GridAnchor\", \"TensorArrayStack_4\"]\n all_targets = loc_targets + conf_targets + priorbox_targets\n\n if graph_chars is None:\n graph_chars = GraphCharacteristics(graph)\n\n order = [-1, -1, -1]\n\n input_nodes = graph_chars.get_subgraph_inputs(postprocessor_prefix)\n\n # Trim irrelevant inputs\n relevant_input_nodes = []\n for input_node in input_nodes:\n if BFS(graph, input_node, all_targets, graph_chars=graph_chars) \\\n is not None and input_node not in relevant_input_nodes:\n relevant_input_nodes.append(input_node)\n\n if len(relevant_input_nodes) != 3:\n print(\"NMS input order error: {} relevant input nodes, should be 3\".format(len(relevant_input_nodes)))\n\n # Find locations\n for idx, node in enumerate(relevant_input_nodes):\n if BFS(graph, node, loc_targets, graph_chars=graph_chars) is not None:\n order[0] = idx\n break\n if order[0] == -1:\n print(\"NMS input order error: Could not find the locations input\")\n\n # Find confidences\n for idx, node in enumerate(relevant_input_nodes):\n if BFS(graph, node, conf_targets, graph_chars=graph_chars) is not None:\n order[1] = idx\n break\n if order[1] == -1:\n print(\"NMS input order error: Could not find the classes input\")\n\n # Find priorboxes\n for idx, node in enumerate(relevant_input_nodes):\n if BFS(graph, node, priorbox_targets, graph_chars=graph_chars) is not None:\n order[2] = idx\n break\n if order[2] == -1:\n print(\"NMS input order error: Could not find the priorboxes input\")\n\n return order\n"
] |
[
[
"tensorflow.graph_util.import_graph_def"
]
] |
hshaban/epathermostat_nw
|
[
"6fec9402484e1ef7e4e59e2c679d9a8efee99ad6"
] |
[
"tests/test_eeweather_wrapper.py"
] |
[
"import pytest\nimport pandas as pd\nfrom thermostat_nw.eeweather_wrapper import get_indexed_temperatures_eeweather\nfrom .fixtures.single_stage import thermostat_type_1\n\n\ndef test_get_indexed_temperatures_eeweather_empty_index():\n empty_index = pd.DataFrame()\n results = get_indexed_temperatures_eeweather(\"720648\", empty_index)\n assert results.empty is True\n\n\ndef test_get_index_temperatures_eeweather():\n begin_timestamp = pd.Timestamp(\"2011-01-01 00:00:00\")\n periods = 8766\n hourly_index = pd.date_range(begin_timestamp, periods=periods, freq=\"H\")\n results = get_indexed_temperatures_eeweather(\"720648\", hourly_index)\n assert results.shape == (8766,)\n"
] |
[
[
"pandas.Timestamp",
"pandas.DataFrame",
"pandas.date_range"
]
] |
grantseiter/OECD-Corporate-Tax-Burden-App
|
[
"bdf4029af7ee393814de72bb983bda37fe6c9d78"
] |
[
"app.py"
] |
[
"import os\nimport pandas as pd\nimport numpy as np\nimport plotly.io as pio\nimport plotly.graph_objects as go\nimport dash\nfrom dash import dcc\nfrom dash import html\nfrom dash import dash_table\nfrom dash.dependencies import Input, Output\nimport base64\n\nimage_filename = \"assets/aei_logo.png\"\nencoded_image = base64.b64encode(open(image_filename, \"rb\").read())\n\npio.templates.default = \"plotly_white\"\n\nAPP_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# Data\noutput = pd.read_csv(\"data/output.csv\")\n\n# Code\ndef make_bar_figure(rate, ratetitle, ratelabel, stat_marker):\n \"\"\"\n Function creates bar chart for section one.\n \"\"\"\n data = output[0:39]\n data = data.sort_values(by=[rate], ascending=True).reset_index(drop=True)\n oecd_avg = np.average(data[rate], weights=data[\"weight\"])\n btm = data.name[0]\n mid = data.name[12]\n top = data.name[38]\n usloc = int(data[data[\"name\"] == \"United States (Current Law)\"].index[0])\n ushloc = int(data[data[\"name\"] == \"United States (House)\"].index[0])\n usbloc = int(data[data[\"name\"] == \"United States (Biden)\"].index[0])\n\n colors = [\"#008CCC\"] * 100\n colors[usloc] = \"#00D56F\"\n colors[ushloc] = \"#FFB400\"\n colors[usbloc] = \"#FF8100\"\n stat_colors = [\"#67C5F0\"] * 100\n stat_colors[usloc] = \"#00D56F\"\n stat_colors[ushloc] = \"#FFB400\"\n stat_colors[usbloc] = \"#FF8100\"\n\n bar_figure = go.Figure(\n data=go.Bar(\n x=data[\"name\"],\n y=data[rate],\n marker_color=colors,\n name=ratelabel,\n )\n )\n\n if stat_marker:\n bar_figure.add_trace(\n go.Scatter(\n x=data[\"name\"],\n y=data[\"statutory_tax_rate\"],\n mode=\"markers\",\n marker_color=stat_colors,\n name=\"Statutory Rate\",\n )\n )\n\n bar_figure.add_trace(\n go.Scatter(\n x=[btm, mid, top],\n y=[oecd_avg, oecd_avg, oecd_avg],\n mode=\"lines+text\",\n name=ratelabel,\n text=[\"\", \"OECD Average \" + ratelabel, \"\"],\n textposition=\"top center\",\n textfont=dict(color=\"#FF5C68\"),\n line=dict(\n color=\"#FF5C68\",\n dash=\"dash\",\n ),\n hovertemplate=\"(OECD Average, %{y})\",\n hoverlabel=dict(bgcolor=\"#FF5C68\"),\n )\n )\n\n layout = go.Layout(\n showlegend=False,\n title=ratetitle\n + \" in the OECD, Current Law and Proposals \"\n + \"<br><sup><i>Hover over data to view more information.</i></sup>\",\n yaxis=dict(\n gridcolor=\"#F2F2F2\",\n tickformat=\".1%\",\n ),\n paper_bgcolor=\"#FFFFFF\",\n plot_bgcolor=\"#FFFFFF\",\n )\n\n bar_figure.update_layout(layout)\n\n return bar_figure\n\n\ndef make_country_figure(country1, country2, measure, measurename, measuretitle):\n \"\"\"\n Function creates scatter chart for section two.\n \"\"\"\n df = output[0:40]\n data1 = (df.loc[(df[\"country\"] == country1)]).reset_index(drop=True)\n data2 = (df.loc[(df[\"country\"] == country2)]).reset_index(drop=True)\n\n data1_assets = data1[\n [\n \"country\",\n \"name\",\n measure + \"_land\",\n measure + \"_inventory\",\n measure + \"_ip\",\n measure + \"_buildings\",\n measure + \"_machines\",\n ]\n ]\n data2_assets = data2[\n [\n \"country\",\n \"name\",\n measure + \"_land\",\n measure + \"_inventory\",\n measure + \"_ip\",\n measure + \"_buildings\",\n measure + \"_machines\",\n ]\n ]\n data1_assets = data1_assets.rename(\n columns={\n measure + \"_machines\": \"Machines\",\n measure + \"_buildings\": \"Buildings\",\n measure + \"_ip\": \"Intellectual Property\",\n measure + \"_land\": \"Land\",\n measure + \"_inventory\": \"Inventory\",\n }\n )\n data2_assets = data2_assets.rename(\n columns={\n measure + \"_machines\": \"Machines\",\n measure + \"_buildings\": \"Buildings\",\n measure + \"_ip\": \"Intellectual Property\",\n measure + \"_land\": \"Land\",\n measure + \"_inventory\": \"Inventory\",\n }\n )\n data1_assets = pd.melt(data1_assets, id_vars=[\"country\", \"name\"])\n data2_assets = pd.melt(data2_assets, id_vars=[\"country\", \"name\"])\n\n def make_fig(country1, country2, measure, measurename, measuretitle):\n \"\"\"\n creates the Plotly traces\n \"\"\"\n assets_trace1 = go.Scatter(\n x=data1_assets[\"value\"],\n y=data1_assets[\"variable\"],\n marker=dict(\n size=20,\n color=\"#008CCC\",\n ),\n mode=\"markers\",\n name=data1_assets[\"name\"][0],\n marker_symbol=\"circle\",\n )\n assets_trace2 = go.Scatter(\n x=data2_assets[\"value\"],\n y=data2_assets[\"variable\"],\n marker=dict(\n size=20,\n color=\"#FFB400\",\n ),\n mode=\"markers\",\n name=data2_assets[\"name\"][0],\n marker_symbol=\"circle\",\n )\n\n layout = go.Layout(\n title=\"<i>\"\n + data1_assets[\"name\"][0]\n + \" vs. \"\n + data2_assets[\"name\"][0]\n + \",</i>\"\n + \" \"\n + measuretitle\n + \" by Asset and Form of Financing\"\n + \"<br><sup><i>Hover over data to view more information. Toggle legend items to show or hide elements.</i></sup>\",\n xaxis=dict(\n tickformat=\".1%\",\n gridcolor=\"#F2F2F2\",\n zeroline=False,\n ),\n yaxis=dict(gridcolor=\"#8E919A\", linecolor=\"#F2F2F2\", type=\"category\"),\n paper_bgcolor=\"#F2F2F2\",\n plot_bgcolor=\"#F2F2F2\",\n height=400,\n )\n\n fig = go.Figure(data=[assets_trace1, assets_trace2], layout=layout)\n return fig\n\n country_figure = make_fig(\n country1,\n country2,\n measure,\n measurename,\n measuretitle,\n )\n\n return country_figure\n\n\ndef make_financing_figure(rate, ratetitle, ratelabel_bar, ratelabel_point):\n \"\"\"\n Function creates bar chart for section three.\n \"\"\"\n data = output[0:39]\n data = data.sort_values(by=[rate + \"_debt_bias\"], ascending=True).reset_index(\n drop=True\n )\n oecd_avg = np.average(data[rate + \"_debt_bias\"], weights=data[\"weight\"])\n btm = data.name[0]\n mid = data.name[12]\n top = data.name[38]\n usloc = int(data[data[\"name\"] == \"United States (Current Law)\"].index[0])\n ushloc = int(data[data[\"name\"] == \"United States (House)\"].index[0])\n usbloc = int(data[data[\"name\"] == \"United States (Biden)\"].index[0])\n\n colors = [\"#008CCC\"] * 100\n colors[usloc] = \"#00D56F\"\n colors[ushloc] = \"#FFB400\"\n colors[usbloc] = \"#FF8100\"\n stat_colors = [\"#8E919A\"] * 100\n stat_colors[usloc] = \"#00D56F\"\n stat_colors[ushloc] = \"#FFB400\"\n stat_colors[usbloc] = \"#FF8100\"\n\n fig_bar = go.Bar(\n x=data[\"name\"],\n y=data[rate + \"_debt_bias\"],\n marker_color=colors,\n name=ratelabel_bar,\n )\n fig_equity = go.Scatter(\n x=data[\"name\"],\n y=data[rate + \"_equity_overall\"],\n mode=\"markers\",\n marker_symbol=\"circle\",\n marker_size=8,\n marker_color=stat_colors,\n marker_line_color=\"#8E919A\",\n marker_line_width=2,\n name=ratelabel_point + \" on Equity <br>Financed Investment\",\n )\n fig_debt = go.Scatter(\n x=data[\"name\"],\n y=data[rate + \"_debt_overall\"],\n mode=\"markers\",\n marker_symbol=\"square-open\",\n marker_size=8,\n marker_color=stat_colors,\n marker_line_width=2,\n name=ratelabel_point + \" on Debt <br>Financed Investment\",\n )\n fig_oecd = go.Scatter(\n x=[btm, mid, top],\n y=[oecd_avg, oecd_avg, oecd_avg],\n mode=\"lines+text\",\n name=\"OECD Average<br>\" + ratelabel_bar,\n text=[\"\", \"OECD Average \" + ratelabel_bar, \"\"],\n textposition=\"top center\",\n textfont=dict(color=\"#FB0023\"),\n line=dict(\n color=\"#FB0023\",\n dash=\"dash\",\n ),\n hovertemplate=\"(OECD Average, %{y})\",\n hoverlabel=dict(bgcolor=\"#FB0023\"),\n )\n layout = go.Layout(\n title=ratelabel_bar\n + \", Measured by \"\n + ratetitle\n + \" in the OECD, Current Law and Proposals\"\n + \"<br><sup><i>Hover over data to view more information. Toggle legend items to show or hide elements.</i></sup>\",\n yaxis=dict(\n gridcolor=\"#F2F2F2\",\n tickformat=\".1%\",\n zerolinecolor=\"#F2F2F2\",\n ),\n paper_bgcolor=\"#FFFFFF\",\n plot_bgcolor=\"#FFFFFF\",\n height=600,\n )\n financing_figure = go.Figure(\n data=[fig_equity, fig_debt, fig_bar, fig_oecd], layout=layout\n )\n return financing_figure\n\n\ndef make_alternative_figure(rate, ratetitle, ratelabel, alternative, axisrange):\n \"\"\"\n Function creates bar chart for section four.\n \"\"\"\n data = output[0:39]\n data_alt = output[40:]\n data = data.sort_values(by=[rate], ascending=True).reset_index(drop=True)\n oecd_avg = np.average(data[rate], weights=data[\"weight\"])\n btm = data.name[0]\n mid = data.name[12]\n top = data.name[38]\n\n usloc = int(data[data[\"name\"] == \"United States (Current Law)\"].index[0])\n ushloc = int(data[data[\"name\"] == \"United States (House)\"].index[0])\n usbloc = int(data[data[\"name\"] == \"United States (Biden)\"].index[0])\n\n hoverlabel = \"\"\n\n if alternative != \"CL\":\n if alternative == \"BONUS\":\n cl_alt = data_alt[data_alt[\"country\"] == \"USA_1\"]\n cl_alt = cl_alt.set_index([pd.Index([usloc])])\n data.loc[cl_alt.index] = np.nan\n data = data.combine_first(cl_alt)\n h_alt = data_alt[data_alt[\"country\"] == \"USA_H1\"]\n h_alt = h_alt.set_index([pd.Index([ushloc])])\n data.loc[h_alt.index] = np.nan\n data = data.combine_first(h_alt)\n b_alt = data_alt[data_alt[\"country\"] == \"USA_B1\"]\n b_alt = b_alt.set_index([pd.Index([usbloc])])\n data.loc[b_alt.index] = np.nan\n data = data.combine_first(b_alt)\n hoverlabel = \"100% Bonus Depreciation\"\n if alternative == \"RND\":\n cl_alt = data_alt[data_alt[\"country\"] == \"USA_2\"]\n cl_alt = cl_alt.set_index([pd.Index([usloc])])\n data.loc[cl_alt.index] = np.nan\n data = data.combine_first(cl_alt)\n h_alt = data_alt[data_alt[\"country\"] == \"USA_H2\"]\n h_alt = h_alt.set_index([pd.Index([ushloc])])\n data.loc[h_alt.index] = np.nan\n data = data.combine_first(h_alt)\n b_alt = data_alt[data_alt[\"country\"] == \"USA_B2\"]\n b_alt = b_alt.set_index([pd.Index([usbloc])])\n data.loc[b_alt.index] = np.nan\n data = data.combine_first(b_alt)\n hoverlabel = \"100% Bonus Depreciation<br>and R&D Expensing\"\n if alternative == \"EBITDA\":\n cl_alt = data_alt[data_alt[\"country\"] == \"USA_3\"]\n cl_alt = cl_alt.set_index([pd.Index([usloc])])\n data.loc[cl_alt.index] = np.nan\n data = data.combine_first(cl_alt)\n h_alt = data_alt[data_alt[\"country\"] == \"USA_H3\"]\n h_alt = h_alt.set_index([pd.Index([ushloc])])\n data.loc[h_alt.index] = np.nan\n data = data.combine_first(h_alt)\n b_alt = data_alt[data_alt[\"country\"] == \"USA_B3\"]\n b_alt = b_alt.set_index([pd.Index([usbloc])])\n data.loc[b_alt.index] = np.nan\n data = data.combine_first(b_alt)\n hoverlabel = \"100% Bonus Depreciation,<br>R&D Expensing,<br>and 30% EBITDA Limitation\"\n if alternative == \"FDII\":\n cl_alt = data_alt[data_alt[\"country\"] == \"USA_4\"]\n cl_alt = cl_alt.set_index([pd.Index([usloc])])\n data.loc[cl_alt.index] = np.nan\n data = data.combine_first(cl_alt)\n h_alt = data_alt[data_alt[\"country\"] == \"USA_H4\"]\n h_alt = h_alt.set_index([pd.Index([ushloc])])\n data.loc[h_alt.index] = np.nan\n data = data.combine_first(h_alt)\n b_alt = data_alt[data_alt[\"country\"] == \"USA_B4\"]\n b_alt = b_alt.set_index([pd.Index([usbloc])])\n data.loc[b_alt.index] = np.nan\n data = data.combine_first(b_alt)\n hoverlabel = \"100% Bonus Depreciation,<br>R&D Expensing,<br>30% EBITDA Limitation,<br>and FDII\"\n\n data = data.sort_values(by=[rate], ascending=True).reset_index(drop=True)\n btm = data.name[0]\n mid = data.name[12]\n top = data.name[38]\n\n usloc = int(data[data[\"name\"] == \"United States (Current Law)\"].index[0])\n ushloc = int(data[data[\"name\"] == \"United States (House)\"].index[0])\n usbloc = int(data[data[\"name\"] == \"United States (Biden)\"].index[0])\n\n colors = [\"#008CCC\"] * 100\n colors[usloc] = \"#00D56F\"\n colors[ushloc] = \"#FFB400\"\n colors[usbloc] = \"#FF8100\"\n stat_colors = [\"#67C5F0\"] * 100\n stat_colors[usloc] = \"#00D56F\"\n stat_colors[ushloc] = \"#FFB400\"\n stat_colors[usbloc] = \"#FF8100\"\n\n alternative_figure = go.Figure(\n data=go.Bar(\n x=data[\"name\"],\n y=data[rate],\n marker_color=colors,\n name=ratelabel,\n )\n )\n\n alternative_figure.add_trace(\n go.Scatter(\n x=[btm, mid, top],\n y=[oecd_avg, oecd_avg, oecd_avg],\n mode=\"lines+text\",\n name=ratelabel,\n text=[\"\", \"OECD Average \" + ratelabel, \"\"],\n textposition=\"top center\",\n textfont=dict(color=\"#FF5C68\"),\n line=dict(\n color=\"#FF5C68\",\n dash=\"dash\",\n ),\n hovertemplate=\"(OECD Average, %{y})\",\n hoverlabel=dict(bgcolor=\"#FF5C68\"),\n )\n )\n\n if alternative != \"CL\":\n alternative_figure.add_trace(\n go.Scatter(\n x=[\n \"United States (Current Law)\",\n \"United States (House)\",\n \"United States (Biden)\",\n ],\n y=[\n data[rate][usloc] + 0.015,\n data[rate][ushloc] + 0.015,\n data[rate][usbloc] + 0.015,\n ],\n mode=\"markers\",\n marker_symbol=\"asterisk\",\n marker_size=8,\n marker_line_color=[\"#00D56F\", \"#FFB400\", \"#FF8100\"],\n marker_line_width=1,\n name=\"Alternative Policy\",\n hovertemplate=\"<b>This Estimate Includes:</b><br>\" + hoverlabel,\n )\n )\n\n layout = go.Layout(\n showlegend=False,\n title=ratetitle\n + \" in the OECD, Current Law, Proposals, and Alternative Policies\"\n + \"<br><sup><i>Hover over data to view more information.</i></sup>\",\n yaxis=dict(\n gridcolor=\"#8E919A\",\n zerolinecolor=\"#8E919A\",\n tickformat=\".1%\",\n range=axisrange,\n ),\n paper_bgcolor=\"#F2F2F2\",\n plot_bgcolor=\"#F2F2F2\",\n height=500,\n )\n\n alternative_figure.update_layout(layout)\n\n return alternative_figure\n\n\n# Initialize App\napp = dash.Dash(\n __name__,\n url_base_pathname=os.environ.get(\"URL_BASE_PATHNAME\", \"/\"),\n)\n\n# Create App Layout\napp.layout = html.Div(\n [\n # HEADER\n html.Div(\n [\n html.Img(\n src=\"data:image/png;base64,{}\".format(encoded_image.decode()),\n height=80,\n )\n ]\n ),\n dcc.Markdown(\n \"\"\"\n ## The Tax Burden on Corporations and Proposals to Reform the US Tax System\n \"\"\"\n \"\"\"\n *Modeling by <a href=\"https://www.aei.org/profile/kyle-pomerleau/\" children=\"Kyle Pomerleau\" style=\"color:#4f5866;text-decoration:none\" target=\"blank\" />. Dashboard development by <a href=\"https://grantseiter.com/\" children=\"Grant M. Seiter\" style=\"color:#4f5866;text-decoration:none\" target=\"blank\" />.*\n \"\"\",\n style={\"max-width\": \"1000px\"},\n dangerously_allow_html=True,\n ),\n html.Div(\n [\n dcc.Markdown(\n \"\"\"\n The Biden Administration and Democratic lawmakers in Congress are now considering proposals to raise the tax burden on corporations in the United States. Their proposals would increase the corporate income tax rate from 21 percent to a rate between 25 percent and 28 percent. In addition, they have proposed reforming the tax treatment of foreign profits of US multinational corporations and repealing or reforming FDII. Their goals are to increase federal revenue, increase the tax burden on capital income, and reduce profit shifting by US multinational corporations. \n \n This dashboard compares the tax burden on corporations in the United States under current law to the corporate tax burdens of 36 member nations of the Organisation for Economic Co-operation and Development (OECD). It also considers two leading proposals to reform US corporate income taxation and several alternative changes to policy.\n \"\"\",\n style={\"text-align\": \"justify\"},\n ),\n ],\n className=\"twelve columns\",\n ),\n # COMPARING TAX RATES (SECTION ONE)\n html.Div(\n [\n html.H6(\n \"Comparing Effective Corporate Tax Rates in OECD Nations\",\n style={\n \"margin-top\": \"0\",\n \"font-weight\": \"bold\",\n \"text-align\": \"justify\",\n },\n ),\n dcc.Markdown(\n \"\"\"\n Corporate tax systems are complex and vary significantly throughout the OECD. There is no single measure of the corporate tax burden that captures every aspect of a corporate income tax. This analysis focuses on three measures: the combined statutory corporate income tax rate, the marginal effective corporate tax rate (METR), and the average effective corporate tax rate (AETR). Each measure represents a different component of a corporation’s tax burden and can be used to evaluate how a corporate income tax may distort behavior.\n \"\"\",\n className=\"twelve columns\",\n style={\"text-align\": \"justify\"},\n dangerously_allow_html=True,\n ),\n html.Label(\n \"Toggle the tabs below to view estimates of each measure.\",\n className=\"twelve columns\",\n style={\n \"font-style\": \"italic\",\n \"font-size\": \"90%\",\n \"margin-bottom\": \"10px\",\n },\n ),\n dcc.Markdown(\n \"\"\" \n **The Statutory Corporate Income Tax Rate** is the rate at which each dollar of corporate taxable income is taxed. Statutory corporate tax rates in the OECD include both central (federal) corporate rates and sub-central (state and local) tax rates. The statutory corporate tax rate impacts the incentive to locate profits in a given jurisdiction.\n\n **The Marginal Effective Tax Rate (METR)** measures the tax burden on marginal investment for an investment that breaks even in present value. The METR incorporates the statutory tax rate, deductions and credits that corporations receive for new investments, special lower tax rates for certain types of income, and deductions for financing costs (interest payments or equity payments). The METR measures the impact a corporate tax has on the level of investment in a country.\n \n **The Average Effective Tax Rate (AETR)** measures the tax burden on new investments that earn above-normal returns or economic rents. Like the METR, the AETR considers both the statutory corporate tax rate, deductions, credits, and other special provisions that a tax system may provide. This rate can affect the decision to locate investment in different jurisdictions. \n \"\"\",\n className=\"results_container three columns\",\n style={\n \"text-align\": \"justify\",\n \"margin-bottom\": \"10px\",\n \"font-size\": \"90%\",\n },\n dangerously_allow_html=True,\n ),\n html.Div(\n [\n dcc.Tabs(\n id=\"bar_figure_tabs\",\n value=\"stat_tab\",\n children=[\n dcc.Tab(\n label=\"Statutory Rate\",\n value=\"stat_tab\",\n className=\"custom_tab\",\n selected_className=\"custom_tab_selected\",\n ),\n dcc.Tab(\n label=\"Marginal Rate (METR)\",\n value=\"metr_tab\",\n className=\"custom-tab\",\n selected_className=\"custom_tab_selected\",\n ),\n dcc.Tab(\n label=\"Average Rate (AETR)\",\n value=\"aetr_tab\",\n className=\"custom-tab\",\n selected_className=\"custom_tab_selected\",\n ),\n ],\n )\n ],\n className=\"custom_tabs_container eight columns\",\n ),\n html.Div(\n [\n dcc.Graph(id=\"bar_figure\"),\n ],\n className=\"eight columns\",\n ),\n dcc.Markdown(\n id=\"analysis_text\",\n className=\"eight columns\",\n style={\"text-align\": \"justify\"},\n dangerously_allow_html=True,\n ),\n html.P(\n \"Source: Author's calculations.\",\n className=\"control_label twelve columns\",\n style={\n \"text-align\": \"right\",\n \"font-style\": \"italic\",\n \"font-size\": \"80%\",\n },\n ),\n # ASSET BIAS (SECTION TWO)\n html.Div(\n [\n dcc.Markdown(\n \"\"\"\n ** Effective Tax Rates by Asset Type **\n\n Effective tax rates in the OECD vary significantly by type of asset. Some countries provide accelerated depreciation for certain assets. Effective tax rates also can be impacted by special lower tax rates on certain assets. For example, several countries provide special lower tax rates on intellectual property (IP) products through patent boxes. The United States provides a lower tax rate on imputed returns to IP through FDII.\n\n Under current law, the US marginal effective tax rate on buildings (19 percent), inventories (25.8 percent), and land (17.5 percent) are all higher than the OECD averages. The US METR on IP (13.8 percent) is significantly higher than the OECD average, under the current-law specification, which assumes that the requirement to amortize research and development expenses (slated to take effect in 2022) is in place. That requirement is unique to the United States. Proposals in the United States to increase the corporate tax burden would raise the METR on all assets.\n\n The US average effective tax rate under current law on each asset is roughly in line with the OECD average except for intellectual property. The higher-than-average AETR on IP reflects the amortization of research and development costs under current law. IP still faces a slightly lower AETR than other assets in the United States, however, due to FDII. Under either proposal to increase the US corporate tax burden, AETRs on all assets would be the highest or close to the highest in the OECD. \n \"\"\",\n className=\"twelve columns\",\n style={\"text-align\": \"justify\"},\n dangerously_allow_html=True,\n ),\n html.Label(\n \"Select an effective tax rate to display in the figure below.\",\n style={\"font-style\": \"italic\", \"font-size\": \"90%\"},\n className=\"twelve columns\",\n ),\n dcc.Dropdown(\n id=\"country_drop_rate\",\n options=[\n {\n \"label\": \"Marginal Effective Tax Rate (METR)\",\n \"value\": \"metr\",\n },\n {\n \"label\": \"Average Effective Tax Rate (AETR)\",\n \"value\": \"aetr\",\n },\n ],\n value=\"metr\",\n clearable=False,\n searchable=False,\n className=\"twelve columns\",\n style={\n \"justify-content\": \"center\",\n \"margin-bottom\": \"5px\",\n },\n ),\n html.Label(\n \"Select two different countries to compare.\",\n style={\"font-style\": \"italic\", \"font-size\": \"90%\"},\n className=\"twelve columns\",\n ),\n dcc.Dropdown(\n id=\"country_drop_value1\",\n options=[\n {\"label\": \"Australia\", \"value\": \"AUS\"},\n {\"label\": \"Austria\", \"value\": \"AUT\"},\n {\"label\": \"Belgium\", \"value\": \"BEL\"},\n {\"label\": \"Canada\", \"value\": \"CAN\"},\n {\"label\": \"Chile\", \"value\": \"CHL\"},\n {\"label\": \"Colombia\", \"value\": \"COL\"},\n {\"label\": \"Czech Republic\", \"value\": \"CZE\"},\n {\"label\": \"Denmark\", \"value\": \"DNK\"},\n {\"label\": \"Estonia\", \"value\": \"EST\"},\n {\"label\": \"Finland\", \"value\": \"FIN\"},\n {\"label\": \"France\", \"value\": \"FRA\"},\n {\"label\": \"Germany\", \"value\": \"DEU\"},\n {\"label\": \"Greece\", \"value\": \"GRC\"},\n {\"label\": \"Hungary\", \"value\": \"HUN\"},\n {\"label\": \"Iceland\", \"value\": \"ISL\"},\n {\"label\": \"Ireland\", \"value\": \"IRL\"},\n {\"label\": \"Israel\", \"value\": \"ISR\"},\n {\"label\": \"Italy\", \"value\": \"ITA\"},\n {\"label\": \"Japan\", \"value\": \"JPN\"},\n {\"label\": \"Korea\", \"value\": \"KOR\"},\n {\"label\": \"Latvia\", \"value\": \"LVA\"},\n {\"label\": \"Lithuania\", \"value\": \"LTU\"},\n {\"label\": \"Luxembourg\", \"value\": \"LUX\"},\n {\"label\": \"Mexico\", \"value\": \"MEX\"},\n {\"label\": \"Netherlands\", \"value\": \"NLD\"},\n {\"label\": \"New Zealand\", \"value\": \"NZL\"},\n {\"label\": \"Norway\", \"value\": \"NOR\"},\n {\"label\": \"OECD Average\", \"value\": \"OECD\"},\n {\"label\": \"Poland\", \"value\": \"POL\"},\n {\"label\": \"Portugal\", \"value\": \"PRT\"},\n {\"label\": \"Slovakia\", \"value\": \"SVK\"},\n {\"label\": \"Slovenia\", \"value\": \"SVN\"},\n {\"label\": \"Spain\", \"value\": \"ESP\"},\n {\"label\": \"Sweden\", \"value\": \"SWE\"},\n {\"label\": \"Switzerland\", \"value\": \"CHE\"},\n {\"label\": \"Turkey\", \"value\": \"TUR\"},\n {\"label\": \"United Kingdom\", \"value\": \"GBR\"},\n {\n \"label\": \"United States (Current Law)\",\n \"value\": \"USA\",\n },\n {\"label\": \"United States (House)\", \"value\": \"USA_H\"},\n {\"label\": \"United States (Biden)\", \"value\": \"USA_B\"},\n ],\n multi=False,\n clearable=False,\n searchable=True,\n value=\"USA\",\n className=\"six columns\",\n style={\n \"justify-content\": \"center\",\n },\n ),\n dcc.Dropdown(\n id=\"country_drop_value2\",\n options=[\n {\"label\": \"Australia\", \"value\": \"AUS\"},\n {\"label\": \"Austria\", \"value\": \"AUT\"},\n {\"label\": \"Belgium\", \"value\": \"BEL\"},\n {\"label\": \"Canada\", \"value\": \"CAN\"},\n {\"label\": \"Chile\", \"value\": \"CHL\"},\n {\"label\": \"Colombia\", \"value\": \"COL\"},\n {\"label\": \"Czech Republic\", \"value\": \"CZE\"},\n {\"label\": \"Denmark\", \"value\": \"DNK\"},\n {\"label\": \"Estonia\", \"value\": \"EST\"},\n {\"label\": \"Finland\", \"value\": \"FIN\"},\n {\"label\": \"France\", \"value\": \"FRA\"},\n {\"label\": \"Germany\", \"value\": \"DEU\"},\n {\"label\": \"Greece\", \"value\": \"GRC\"},\n {\"label\": \"Hungary\", \"value\": \"HUN\"},\n {\"label\": \"Iceland\", \"value\": \"ISL\"},\n {\"label\": \"Ireland\", \"value\": \"IRL\"},\n {\"label\": \"Israel\", \"value\": \"ISR\"},\n {\"label\": \"Italy\", \"value\": \"ITA\"},\n {\"label\": \"Japan\", \"value\": \"JPN\"},\n {\"label\": \"Korea\", \"value\": \"KOR\"},\n {\"label\": \"Latvia\", \"value\": \"LVA\"},\n {\"label\": \"Lithuania\", \"value\": \"LTU\"},\n {\"label\": \"Luxembourg\", \"value\": \"LUX\"},\n {\"label\": \"Mexico\", \"value\": \"MEX\"},\n {\"label\": \"Netherlands\", \"value\": \"NLD\"},\n {\"label\": \"New Zealand\", \"value\": \"NZL\"},\n {\"label\": \"Norway\", \"value\": \"NOR\"},\n {\"label\": \"OECD Average\", \"value\": \"OECD\"},\n {\"label\": \"Poland\", \"value\": \"POL\"},\n {\"label\": \"Portugal\", \"value\": \"PRT\"},\n {\"label\": \"Slovakia\", \"value\": \"SVK\"},\n {\"label\": \"Slovenia\", \"value\": \"SVN\"},\n {\"label\": \"Spain\", \"value\": \"ESP\"},\n {\"label\": \"Sweden\", \"value\": \"SWE\"},\n {\"label\": \"Switzerland\", \"value\": \"CHE\"},\n {\"label\": \"Turkey\", \"value\": \"TUR\"},\n {\"label\": \"United Kingdom\", \"value\": \"GBR\"},\n {\n \"label\": \"United States (Current Law)\",\n \"value\": \"USA\",\n },\n {\"label\": \"United States (House)\", \"value\": \"USA_H\"},\n {\"label\": \"United States (Biden)\", \"value\": \"USA_B\"},\n ],\n multi=False,\n clearable=False,\n searchable=True,\n value=\"OECD\",\n className=\"six columns\",\n style={\n \"justify-content\": \"center\",\n },\n ),\n html.Div(\n [dcc.Graph(id=\"country_figure\")],\n className=\"twelve columns\",\n style={\n \"justify-content\": \"center\",\n },\n ),\n html.P(\n \"Source: Author's calculations.\",\n className=\"control_label twelve columns\",\n style={\n \"text-align\": \"right\",\n \"font-style\": \"italic\",\n \"font-size\": \"80%\",\n },\n ),\n ],\n className=\"effect_container twelve columns\",\n ),\n # FINANCING BIAS (SECTION THREE)\n html.Div(\n [\n dcc.Markdown(\n \"\"\"\n ** Effective Tax Rates by Source of Financing **\n\n Corporations can finance new projects through equity by using either retained earnings or issuing new shares. Alternatively, corporations can finance new investments with debt by issuing bonds. Equity payments made to shareholders (dividends) are not deductible against taxable income, while interest on debt is deductible against taxable income. Debt-financed investment is therefore tax-preferred. Some countries have policies that offset the traditional bias in favor of debt, such as allowances for corporate equity, limitations on interest expense, or cash-flow taxes (which mostly avoid the debt-equity bias by disallowing interest deductions).\n\n Under current law, the US corporate tax creates a 29-percentage point bias in favor of debt-financed investment as measured by METRs (7.1 percentage points as measured by AETRs). This is slightly below the OECD average of 32 percent (7.3 percent for AETRs) and is in line with most countries. However, the Biden and House Ways and Means proposals would increase the bias in favor of debt-financed investment by increasing the value of the interest deduction (because the deductions would be claimed at the new higher corporate tax rates) and raising the tax burden on equity-financed investment. Under the House Ways and Means proposal, the bias in favor of debt (35.0 percent for METRs and 8.6 percent for AETRs) would be slightly higher than the OECD average (32 percent and 7.3 percent). Under Biden’s proposal, the bias in favor of debt (36.9 percent and 9 percent) would also be slightly above the OECD average.\n \"\"\",\n className=\"twelve columns\",\n style={\"text-align\": \"justify\"},\n dangerously_allow_html=True,\n ),\n html.Label(\n \"Select a different effective tax rate to display in the figure below.\",\n style={\"font-style\": \"italic\", \"font-size\": \"90%\"},\n className=\"twelve columns\",\n ),\n dcc.Dropdown(\n id=\"financing_drop_rate\",\n options=[\n {\n \"label\": \"Marginal Effective Tax Rate (METR)\",\n \"value\": \"metr\",\n },\n {\n \"label\": \"Average Effective Tax Rate (AETR)\",\n \"value\": \"aetr\",\n },\n ],\n value=\"metr\",\n clearable=False,\n searchable=False,\n className=\"twelve columns\",\n style={\n \"justify-content\": \"center\",\n \"margin-bottom\": \"5px\",\n },\n ),\n html.Div(\n [dcc.Graph(id=\"financing_figure\")],\n className=\"twelve columns\",\n style={\n \"justify-content\": \"center\",\n },\n ),\n html.P(\n \"Source: Author's calculations.\",\n className=\"control_label twelve columns\",\n style={\n \"text-align\": \"right\",\n \"font-style\": \"italic\",\n \"font-size\": \"80%\",\n },\n ),\n ],\n className=\"effect2_container twelve columns\",\n ),\n # ALTERNATIVE POLICIES (SECTION FOUR)\n html.Div(\n [\n dcc.Markdown(\n \"\"\"\n ** Impact of Alternative Policies on Effective Tax Rates in the United States **\n\n The effective tax rate estimates for the OECD countries, above, reflect current law and exclude temporary policies scheduled to change over the next decade. For the United States, the estimates exclude 100 percent bonus depreciation, which is scheduled to phase out over five years starting in 2023 and include amortization of research and development costs and tighter limitations on net interest expenses, which are scheduled to change in 2022. Likewise, the deduction for FDII is set to 21.875 percent, which is its scheduled value in 2026. Extending temporary proposals and maintaining FDII at current policy levels would have a significant impact on the tax burden on investment in the United States. The interactive figure below allows users to consider the impact of maintaining these alternative policies on effective tax rates in the United States.\n \"\"\",\n className=\"twelve columns\",\n style={\"text-align\": \"justify\"},\n dangerously_allow_html=True,\n ),\n html.Label(\n \"Use the buttons and toggle the tabs to update US effective tax rates in the figure below. The impact of each policy includes the sum of the previous policies.\",\n style={\n \"font-style\": \"italic\",\n \"font-size\": \"90%\",\n \"margin-bottom\": \"10px\",\n },\n className=\"twelve columns\",\n ),\n html.Div(\n [\n dcc.RadioItems(\n id=\"alternative_radio_value\",\n options=[\n {\"label\": \"Current Law\", \"value\": \"CL\"},\n {\n \"label\": \"(1) Maintain 100% Bonus Depreciation\",\n \"value\": \"BONUS\",\n },\n {\n \"label\": \"(2) Maintain R&D Expensing\",\n \"value\": \"RND\",\n },\n {\n \"label\": \"(3) Maintain 30% EBITDA Limitation\",\n \"value\": \"EBITDA\",\n },\n {\"label\": \"(4) Maintain FDII\", \"value\": \"FDII\"},\n ],\n value=\"CL\",\n labelStyle={\n \"width\": \"160%\",\n \"display\": \"inline-block\",\n },\n ),\n dcc.Markdown(\n id=\"alternative_text\",\n style={\n \"text-align\": \"justify\",\n \"margin-top\": \"20px\",\n \"margin-bottom\": \"20px\",\n },\n dangerously_allow_html=True,\n ),\n ],\n className=\"three columns\",\n ),\n html.Div(\n [\n dcc.Tabs(\n id=\"alternative_figure_tabs\",\n value=\"metr_tab\",\n children=[\n dcc.Tab(\n label=\"Marginal Rate (METR)\",\n value=\"metr_tab\",\n className=\"custom-tab\",\n selected_className=\"custom_tab2_selected\",\n ),\n dcc.Tab(\n label=\"Average Rate (AETR)\",\n value=\"aetr_tab\",\n className=\"custom-tab\",\n selected_className=\"custom_tab2_selected\",\n ),\n ],\n ),\n ],\n className=\"custom_tabs_container eight columns\",\n ),\n html.Div(\n [\n dcc.Graph(id=\"alternative_figure\"),\n ],\n className=\"eight columns\",\n ),\n html.P(\n \"Source: Author's calculations.\",\n className=\"control_label twelve columns\",\n style={\n \"text-align\": \"right\",\n \"font-style\": \"italic\",\n \"font-size\": \"80%\",\n },\n ),\n ],\n className=\"effect_container twelve columns\",\n ),\n ],\n className=\"description_container twelve columns\",\n ),\n # FOOTER\n html.Hr(),\n html.Div(\n [\n dcc.Markdown(\n \"\"\"\n **Notes:** This dashboard is an extension of research presented in <a href=\"https://www.aei.org/research-products/report/the-tax-burden-on-corporations-a-comparison-of-organisation-for-economic-co-operation-and-development-countries-and-proposals-to-reform-the-us-tax-system/\" children=\"The Tax Burden on Corporations: A Comparison of Organisation for Economic Co-operation and Development Countries and Proposals to Reform the US Tax System\" style=\"color:#008CCC;font-style:italic\" target=\"blank\" /> (Pomerleau, 2021). The code that powers this data visualization can be found\n <a href=\"https://github.com/grantseiter/OECD-Corporate-Tax-Burden-App\" children=\"here\" style=\"color:#008CCC\" target=\"blank\" />.\n Feedback or questions? Contact us <a href=\"mailto:[email protected]\" children=\"here\" style=\"color:#008CCC\" />.\n\n Effective tax rates on corporate investment were estimated using a framework developed by Devereux and Griffith (1999) and by generally following the method outlined in Spengel et. Al. (2019). These rates are forward-looking and measure the tax burden that a corporation expects to pay on new domestic investment in each jurisdiction. The parameters used to estimate effective tax rates reflect current law in each country and are set to their long-run values. As such, this analysis ignores several temporary changes made to corporate taxes in response to the COVID-19 pandemic. The full methodology is detailed in *The Tax Burden on Corporations: A Comparison of Organisation for Economic Co-operation and Development Countries and Proposals to Reform the US Tax System* (Pomerleau, 2021). \n \n For more details on additional corporate tax reforms put forth by the Biden Administration and lawmakers in congress, see <a href=\"https://www.aei.org/research-products/report/bidens-reforms-to-the-tax-treatment-of-us-multinational-corporations-the-knowns-and-unknowns/\" children=\"Biden’s Reforms to the Tax Treatment of US Multinational Corporations: The Knowns and Unknowns\" style=\"color:#008CCC;font-style:italic\" target=\"blank\" /> (Pomerleau, 2021).\n \"\"\",\n dangerously_allow_html=True,\n style={\n \"font-size\": \"90%\",\n \"margin-bottom\": \"10px\",\n },\n ),\n html.Div(\n [\n html.Button(\n \"Download Data as CSV\",\n id=\"btn_csv\",\n style={\n \"font-size\": \"90%\",\n \"margin-bottom\": \"20px\",\n },\n ),\n dcc.Download(id=\"download-dataframe-csv\"),\n ]\n ),\n ],\n className=\"footer twelve columns\",\n ),\n ]\n)\n\n# Callbacks\[email protected](\n Output(\"bar_figure\", \"figure\"),\n Input(\"bar_figure_tabs\", \"value\"),\n)\ndef update(bar_figure_tabs):\n if bar_figure_tabs == \"stat_tab\":\n rate = \"statutory_tax_rate\"\n ratetitle = \"Statutory Corporate Tax Rates\"\n ratelabel = \"Statutory Rate\"\n stat_marker = False\n if bar_figure_tabs == \"metr_tab\":\n rate = \"metr_overall\"\n ratetitle = \"Marginal Effective Corporate Tax Rates (METRs)\"\n ratelabel = \"METR\"\n stat_marker = True\n if bar_figure_tabs == \"aetr_tab\":\n rate = \"aetr_overall\"\n ratetitle = \"Average Effective Corporate Tax Rates (AETRs)\"\n ratelabel = \"AETR\"\n stat_marker = True\n\n bar_figure = make_bar_figure(rate, ratetitle, ratelabel, stat_marker)\n\n return bar_figure\n\n\[email protected](\n Output(\"analysis_text\", \"children\"),\n Input(\"bar_figure_tabs\", \"value\"),\n)\ndef update(bar_figure_tabs):\n if bar_figure_tabs == \"stat_tab\":\n text = \"\"\"\n If the US federal corporate income tax rate is increased to 28 percent, as proposed in Biden’s proposal, the United States would have the second-highest combined statutory corporate tax rate in the OECD at 32.3 percent (only lower than Colombia). The House Ways and Means proposal, which would raise the federal tax rate to 26.5 percent, would increase the United States’ combined statutory corporate tax rate to 30.9 percent, which would be among the highest, but still below Colombia and Portugal. \n \"\"\"\n if bar_figure_tabs == \"metr_tab\":\n text = \"\"\"\n The proposals to raise the corporate tax burden in the United States would increase the tax burden on new corporate investment in the United States to one of the highest in the OECD. Under the Biden proposal, the METR would be 23.7 percent, which would be the second-highest in the OECD (only lower than Colombia). The House Ways and Means proposal would increase the METR to 22.4 percent, which would only be lower than Japan (22.9 percent) and Colombia (23.9 percent).\n \"\"\"\n if bar_figure_tabs == \"aetr_tab\":\n text = \"\"\"\n The Biden Administration proposal would raise the AETR to 29.5 percent. This would be the second-highest among all OECD nations (only lower than Colombia) and 6.6 percentage points above the OECD average. The House Ways and Means proposal would raise the US AETR to 28 percent. This would also result in the second-highest AETR among OECD nations.\n \"\"\"\n return text\n\n\[email protected](\n Output(\"country_figure\", \"figure\"),\n Input(\"country_drop_rate\", \"value\"),\n Input(\"country_drop_value1\", \"value\"),\n Input(\"country_drop_value2\", \"value\"),\n)\ndef update(country_drop_rate, country_drop_value1, country_drop_value2):\n if country_drop_rate == \"metr\":\n country1 = country_drop_value1\n country2 = country_drop_value2\n measure = country_drop_rate\n measurename = \"METR\"\n measuretitle = \"METRs\"\n if country_drop_rate == \"aetr\":\n country1 = country_drop_value1\n country2 = country_drop_value2\n measure = country_drop_rate\n measurename = \"AETR\"\n measuretitle = \"AETRs\"\n country_figure = make_country_figure(\n country1, country2, measure, measurename, measuretitle\n )\n\n return country_figure\n\n\[email protected](\n Output(\"financing_figure\", \"figure\"),\n Input(\"financing_drop_rate\", \"value\"),\n)\ndef update(financing_drop_rate):\n if financing_drop_rate == \"metr\":\n rate = financing_drop_rate\n ratetitle = \"METRs\"\n ratelabel_bar = \"Debt-Equity Bias\"\n ratelabel_point = \"METR\"\n if financing_drop_rate == \"aetr\":\n rate = financing_drop_rate\n ratetitle = \"AETRs\"\n ratelabel_bar = \"Debt-Equity Bias\"\n ratelabel_point = \"AETR\"\n financing_figure = make_financing_figure(\n rate, ratetitle, ratelabel_bar, ratelabel_point\n )\n\n return financing_figure\n\n\[email protected](\n Output(\"alternative_figure\", \"figure\"),\n Input(\"alternative_radio_value\", \"value\"),\n Input(\"alternative_figure_tabs\", \"value\"),\n)\ndef update(alternative_radio_value, alternative_figure_tabs):\n if alternative_figure_tabs == \"metr_tab\":\n rate = \"metr_overall\"\n ratetitle = \"METRs\"\n ratelabel = \"METR\"\n alternative = alternative_radio_value\n axisrange = [-0.20, 0.20]\n if alternative_figure_tabs == \"aetr_tab\":\n rate = \"aetr_overall\"\n ratetitle = \"AETRs\"\n ratelabel = \"AETR\"\n alternative = alternative_radio_value\n axisrange = [0.00, 0.31]\n\n alternative_figure = make_alternative_figure(\n rate, ratetitle, ratelabel, alternative, axisrange\n )\n\n return alternative_figure\n\n\[email protected](\n Output(\"alternative_text\", \"children\"),\n Input(\"alternative_radio_value\", \"value\"),\n Input(\"alternative_figure_tabs\", \"value\"),\n)\ndef update(alternative_radio_value, alternative_figure_tabs):\n if alternative_figure_tabs == \"metr_tab\":\n if alternative_radio_value == \"CL\":\n text = \"\"\"\n Under current law, the tax treatment of certain capital expenses, research and development, interest expense, and intellectual property are scheduled to change over the next few years. These changes contribute to the United States' relatively high effective tax rate on new investment. \n \"\"\"\n if alternative_radio_value == \"BONUS\":\n text = \"\"\"\n Maintaining 100 percent bonus depreciation would have a large impact on the METR on new investment in the United States. 100 percent bonus depreciation would reduce the METR on investment by 5.3 percentage points under current law, 6.2 percentage points under the House Ways and Means proposal, and 6.5 percentage points under Biden’s proposal. \n \"\"\"\n if alternative_radio_value == \"RND\":\n text = \"\"\"\n Maintaining expensing of research and development costs would reduce the METR on new investment in the United States. However, the impact would be slightly smaller than that of bonus depreciation (0.9 percentage points under current law, 1.1 under the House Ways and Means proposal, and 1.3 under Biden's proposal).\n \"\"\"\n if alternative_radio_value == \"EBITDA\":\n text = \"\"\"\n Canceling the switch from 30 percent of EBITDA to 30 percent of earnings before interest and taxes (EBIT) for the net interest deduction would reduce the METR on new investment by roughly the same extent as maintaining expensing for research and development costs.\n \"\"\"\n if alternative_radio_value == \"FDII\":\n text = \"\"\"\n Maintaining current policy FDII would have a negligible impact on the marginal tax rate on new investment. If research and development is already expensed, the METR on new investment is already zero. Reducing the rate has no impact on the incentive to invest in research and development. \n \"\"\"\n if alternative_figure_tabs == \"aetr_tab\":\n if alternative_radio_value == \"CL\":\n text = \"\"\"\n Under current law, the tax treatment of certain capital expenses, research and development, interest expense, and intellectual property are scheduled to change over the next few years. These changes contribute to the United States' relatively high effective tax rate on new investment. \n \"\"\"\n if alternative_radio_value == \"BONUS\":\n text = \"\"\"\n Maintaining 100 percent bonus depreciation would have a smaller impact on the AETR on new investment compared to its impact on the METR. 100 percent bonus depreciation would reduce the AETR on investment by 1.4 percentage points under current law and 1.7 percentage points under the House Ways and Means and Biden proposals. \n \"\"\"\n if alternative_radio_value == \"RND\":\n text = \"\"\"\n Maintaining expensing of research and development costs would have a smaller impact on the AETR on new investment compared to its impact on the METR. The policy would reduce the AETR on investment by 0.2 percentage points under current law and 0.3 percentage points under the House Ways and Means and Biden proposals.\n \"\"\"\n if alternative_radio_value == \"EBITDA\":\n text = \"\"\"\n Canceling the switch from 30 percent of EBITDA to 30 percent of earnings before interest and taxes (EBIT) for the net interest deduction would have a smaller impact on the AETR on new investment compared to its impact on the METR. It would reduce the AETR on new investment by roughly the same extent as maintaining expensing for research and development costs.\n \"\"\"\n if alternative_radio_value == \"FDII\":\n text = \"\"\"\n Maintaining current policy FDII would reduce the AETR more than it would reduce the METR on new investment because the FDII deduction reduces the effective statutory tax rate on IP income. The policy would reduce the AETR on investment by 0.1 percentage points under current law and the House Ways and Means proposal and 0.3 percentage points under the Biden proposal.\n \"\"\"\n return text\n\n\[email protected](\n Output(\"download-dataframe-csv\", \"data\"),\n Input(\"btn_csv\", \"n_clicks\"),\n prevent_initial_call=True,\n)\ndef func(n_clicks):\n return dcc.send_data_frame(output[0:39].to_csv, \"OECD-Effective-Tax-Rates.csv\")\n\n\n# Endcode\nserver = app.server\n# Turn debug=False for production\nif __name__ == \"__main__\":\n app.run_server(debug=False, use_reloader=True)\n"
] |
[
[
"pandas.Index",
"pandas.read_csv",
"pandas.melt",
"numpy.average"
]
] |
felixhjh/Serving
|
[
"3979a174f1a5c905b11545dbeb9db27b5a83243b"
] |
[
"examples/Pipeline/PaddleOCR/ocr/web_service.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom paddle_serving_server.web_service import WebService, Op\nimport logging\nimport numpy as np\nimport cv2\nimport base64\nfrom paddle_serving_app.reader import OCRReader\nfrom paddle_serving_app.reader import Sequential, ResizeByFactor\nfrom paddle_serving_app.reader import Div, Normalize, Transpose\nfrom paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes\n\n_LOGGER = logging.getLogger()\nclass PreDetOp(Op):\n def init_op(self):\n self.det_preprocess = Sequential([\n ResizeByFactor(32, 960), Div(255),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(\n (2, 0, 1))\n ])\n\n def postprocess(self, input_dicts, fetch_dict, data_id, log_id):\n (_, input_dict), = input_dicts.items()\n imgs = []\n for key in input_dict.keys():\n data = base64.b64decode(input_dict[key].encode('utf8'))\n self.raw_im = data\n data = np.frombuffer(data, np.uint8)\n self.im = cv2.imdecode(data, cv2.IMREAD_COLOR)\n self.ori_h, self.ori_w, _ = self.im.shape\n det_img = self.det_preprocess(self.im)\n _, self.new_h, self.new_w = det_img.shape\n imgs.append(det_img[np.newaxis, :].copy())\n return {\"image\": np.concatenate(imgs, axis=0), \n \"ori_h\": self.ori_h, \n \"ori_w\": self.ori_w, \n \"new_h\": self.new_h, \n \"new_w\": self.new_w, \n \"raw_im\": self.raw_im}, None, \"\"\n\n\nclass DetOp(Op):\n def init_op(self):\n self.filter_func = FilterBoxes(10, 10)\n self.post_func = DBPostProcess({\n \"thresh\": 0.3,\n \"box_thresh\": 0.5,\n \"max_candidates\": 1000,\n \"unclip_ratio\": 1.5,\n \"min_size\": 3\n })\n\n \"\"\" \n when opening tensorrt(configure in config.yml) and each time the input shape \n for inferring is different, using this method for configuring tensorrt \n dynamic shape to infer in each op model\n \"\"\"\n def set_dynamic_shape_info(self):\n min_input_shape = {\n \"x\": [1, 3, 50, 50],\n \"conv2d_182.tmp_0\": [1, 1, 20, 20],\n \"nearest_interp_v2_2.tmp_0\": [1, 1, 20, 20],\n \"nearest_interp_v2_3.tmp_0\": [1, 1, 20, 20],\n \"nearest_interp_v2_4.tmp_0\": [1, 1, 20, 20],\n \"nearest_interp_v2_5.tmp_0\": [1, 1, 20, 20]\n }\n max_input_shape = {\n \"x\": [1, 3, 1536, 1536],\n \"conv2d_182.tmp_0\": [20, 200, 960, 960],\n \"nearest_interp_v2_2.tmp_0\": [20, 200, 960, 960],\n \"nearest_interp_v2_3.tmp_0\": [20, 200, 960, 960],\n \"nearest_interp_v2_4.tmp_0\": [20, 200, 960, 960],\n \"nearest_interp_v2_5.tmp_0\": [20, 200, 960, 960],\n }\n opt_input_shape = {\n \"x\": [1, 3, 960, 960],\n \"conv2d_182.tmp_0\": [3, 96, 240, 240],\n \"nearest_interp_v2_2.tmp_0\": [3, 96, 240, 240],\n \"nearest_interp_v2_3.tmp_0\": [3, 24, 240, 240],\n \"nearest_interp_v2_4.tmp_0\": [3, 24, 240, 240],\n \"nearest_interp_v2_5.tmp_0\": [3, 24, 240, 240],\n }\n self.dynamic_shape_info = {\n \"min_input_shape\": min_input_shape,\n \"max_input_shape\": max_input_shape,\n \"opt_input_shape\": opt_input_shape,\n } \n\n def preprocess(self, input_dicts, data_id, log_id):\n (_, input_dict), = input_dicts.items()\n self.ori_h = input_dict[\"ori_h\"]\n self.ori_w = input_dict[\"ori_w\"]\n self.new_h = input_dict[\"new_h\"]\n self.new_w = input_dict[\"new_w\"]\n self.raw_im = input_dict[\"raw_im\"]\n return {\"x\": input_dict[\"image\"]}, False, None, \"\"\n\n def postprocess(self, input_dicts, fetch_dict, data_id, log_id):\n det_out = fetch_dict[\"save_infer_model/scale_0.tmp_1\"]\n ratio_list = [\n float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w\n ]\n dt_boxes_list = self.post_func(det_out, [ratio_list])\n dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])\n out_dict = {\"dt_boxes\": dt_boxes, \"image\": self.raw_im}\n return out_dict, None, \"\"\n\nclass PreRecOp(Op):\n def init_op(self):\n self.ocr_reader = OCRReader()\n self.get_rotate_crop_image = GetRotateCropImage()\n self.sorted_boxes = SortedBoxes()\n\n def postprocess(self, input_dicts,fetch_dict, data_id, log_id):\n (_, input_dict), = input_dicts.items()\n raw_im = input_dict[\"image\"]\n data = np.frombuffer(raw_im, np.uint8)\n im = cv2.imdecode(data, cv2.IMREAD_COLOR)\n dt_boxes = input_dict[\"dt_boxes\"]\n dt_boxes = self.sorted_boxes(dt_boxes)\n feed_list = []\n img_list = []\n max_wh_ratio = 0\n\n ## One batch, the type of feed_data is dict.\n \"\"\" \n for i, dtbox in enumerate(dt_boxes):\n boximg = self.get_rotate_crop_image(im, dt_boxes[i])\n img_list.append(boximg)\n h, w = boximg.shape[0:2]\n wh_ratio = w * 1.0 / h\n max_wh_ratio = max(max_wh_ratio, wh_ratio)\n _, w, h = self.ocr_reader.resize_norm_img(img_list[0],\n max_wh_ratio).shape\n imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')\n for id, img in enumerate(img_list):\n norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)\n imgs[id] = norm_img\n feed = {\"image\": imgs.copy()}\n\n \"\"\"\n\n ## Many mini-batchs, the type of feed_data is list.\n max_batch_size = len(dt_boxes)\n\n # If max_batch_size is 0, skipping predict stage\n if max_batch_size == 0:\n return {}, True, None, \"\"\n boxes_size = len(dt_boxes)\n batch_size = boxes_size // max_batch_size\n rem = boxes_size % max_batch_size\n #_LOGGER.info(\"max_batch_len:{}, batch_size:{}, rem:{}, boxes_size:{}\".format(max_batch_size, batch_size, rem, boxes_size))\n for bt_idx in range(0, batch_size + 1):\n imgs = None\n boxes_num_in_one_batch = 0\n if bt_idx == batch_size:\n if rem == 0:\n continue\n else:\n boxes_num_in_one_batch = rem\n elif bt_idx < batch_size:\n boxes_num_in_one_batch = max_batch_size\n else:\n _LOGGER.error(\"batch_size error, bt_idx={}, batch_size={}\".\n format(bt_idx, batch_size))\n break\n\n start = bt_idx * max_batch_size\n end = start + boxes_num_in_one_batch\n img_list = []\n for box_idx in range(start, end):\n boximg = self.get_rotate_crop_image(im, dt_boxes[box_idx])\n img_list.append(boximg)\n h, w = boximg.shape[0:2]\n wh_ratio = w * 1.0 / h\n max_wh_ratio = max(max_wh_ratio, wh_ratio)\n _, w, h = self.ocr_reader.resize_norm_img(img_list[0],\n max_wh_ratio).shape\n #_LOGGER.info(\"---- idx:{}, w:{}, h:{}\".format(bt_idx, w, h))\n\n imgs = np.zeros((boxes_num_in_one_batch, 3, w, h)).astype('float32')\n for id, img in enumerate(img_list):\n norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)\n imgs[id] = norm_img\n feed = {\"x\": imgs.copy()}\n feed_list.append(feed)\n return {\"feed_list\" : feed_list}, None, \"\"\n\nclass RecOp(Op):\n def init_op(self):\n self.ocr_reader = OCRReader()\n\n \"\"\" \n when opening tensorrt(configure in config.yml) and each time the input shape \n for inferring is different, using this method for configuring tensorrt \n dynamic shape to infer in each op model\n \"\"\"\n def set_dynamic_shape_info(self):\n min_input_shape = {\n \"x\": [1, 3, 32, 10],\n \"lstm_1.tmp_0\": [1, 1, 128]\n }\n max_input_shape = {\n \"x\": [50, 3, 32, 1000],\n \"lstm_1.tmp_0\": [500, 50, 128]\n }\n opt_input_shape = {\n \"x\": [6, 3, 32, 100],\n \"lstm_1.tmp_0\": [25, 5, 128]\n }\n self.dynamic_shape_info = {\n \"min_input_shape\": min_input_shape,\n \"max_input_shape\": max_input_shape,\n \"opt_input_shape\": opt_input_shape,\n }\n\n def preprocess(self, input_dicts, data_id, log_id):\n (_, input_dict), = input_dicts.items()\n return input_dict[\"feed_list\"], False, None, \"\"\n\n def postprocess(self, input_dicts, fetch_data, data_id, log_id):\n res_list = []\n if isinstance(fetch_data, dict):\n if len(fetch_data) > 0:\n rec_batch_res = self.ocr_reader.postprocess_ocrv2(\n fetch_data, with_score=True)\n for res in rec_batch_res:\n res_list.append(res[0])\n elif isinstance(fetch_data, list):\n for one_batch in fetch_data:\n one_batch_res = self.ocr_reader.postprocess_ocrv2(\n one_batch, with_score=True)\n for res in one_batch_res:\n res_list.append(res[0])\n\n res = {\"res\": str(res_list)}\n return res, None, \"\"\n\n\nclass OcrService(WebService):\n def get_pipeline_response(self, read_op):\n pre_det_op = PreDetOp(name=\"pre_det\", input_ops=[read_op])\n det_op = DetOp(name=\"det\", input_ops=[pre_det_op])\n pre_rec_op = PreRecOp(name=\"pre_rec\", input_ops=[det_op])\n rec_op = RecOp(name=\"rec\", input_ops=[pre_rec_op])\n return rec_op\n\n\nocr_service = OcrService(name=\"ocr\")\nocr_service.prepare_pipeline_config(\"config.yml\")\nocr_service.run_service()\n"
] |
[
[
"numpy.concatenate",
"numpy.frombuffer",
"numpy.zeros"
]
] |
airxiechao/gap
|
[
"1262bb7063da95011479839b4ccb4d9ed2e97020"
] |
[
"models/gap/exec.py"
] |
[
"import argparse\nimport csv\nimport logging\nimport os\nimport random\nimport sys\nimport shutil\nimport contextlib\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nfrom attrdict import AttrDict\nfrom timeit import default_timer as timer\nfrom datetime import datetime, timedelta\nfrom pprint import pformat\n\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import matthews_corrcoef, f1_score, log_loss\n\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\nfrom pytorch_pretrained_bert.modeling import BertConfig, WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam\n\nfrom .features import convert_examples_to_features\nfrom .probert import ProBERT\nfrom .grep import GREP\n\nfrom scipy.special import softmax\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\ndef acc_and_f1(preds, y_true, label_list):\n label_list = [0, 1, 2]\n acc = simple_accuracy(np.argmax(preds, axis=-1), y_true)\n f1 = f1_score(y_true=y_true, y_pred=np.argmax(preds, axis=-1), average='micro', labels=label_list)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n \"log_loss\": log_loss(y_true=y_true, y_pred=preds, labels=label_list),\n }\n\ndef compute_metrics(preds, labels, label_list):\n assert len(preds) == len(labels) \n return acc_and_f1(preds, labels, label_list)\n\ndef evaluate(model,\n eval_features,\n device, \n args, \n label_list,\n num_labels,\n eval_mode=False):\n\n model.eval()\n\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.uint8)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_gpr_tags_mask = torch.tensor([f.gpr_tags_mask for f in eval_features], dtype=torch.uint8)\n\n all_mention_p_ids = torch.tensor([f.mention_p_ids for f in eval_features], dtype=torch.long)\n all_mention_a_ids = torch.tensor([f.mention_a_ids for f in eval_features], dtype=torch.long)\n all_mention_b_ids = torch.tensor([f.mention_b_ids for f in eval_features], dtype=torch.long)\n all_mention_p_mask = torch.tensor([f.mention_p_mask for f in eval_features], dtype=torch.uint8)\n all_mention_a_mask = torch.tensor([f.mention_a_mask for f in eval_features], dtype=torch.uint8)\n all_mention_b_mask = torch.tensor([f.mention_b_mask for f in eval_features], dtype=torch.uint8)\n\n all_cluster_ids_a = torch.tensor([f.cluster_ids_a for f in eval_features], dtype=torch.long)\n all_cluster_mask_a = torch.tensor([f.cluster_mask_a for f in eval_features], dtype=torch.uint8)\n all_cluster_ids_b = torch.tensor([f.cluster_ids_b for f in eval_features], dtype=torch.long)\n all_cluster_mask_b = torch.tensor([f.cluster_mask_b for f in eval_features], dtype=torch.uint8)\n all_cluster_ids_p = torch.tensor([f.cluster_ids_p for f in eval_features], dtype=torch.long)\n all_cluster_mask_p = torch.tensor([f.cluster_mask_p for f in eval_features], dtype=torch.uint8)\n\n all_pretrained = torch.tensor([f.pretrained for f in eval_features], dtype=torch.float)\n\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\n eval_data = TensorDataset(all_input_ids, \n all_input_mask, \n all_segment_ids, \n all_gpr_tags_mask,\n all_mention_p_ids,\n all_mention_a_ids,\n all_mention_b_ids,\n all_mention_p_mask,\n all_mention_a_mask,\n all_mention_b_mask,\n all_cluster_ids_a,\n all_cluster_mask_a,\n all_cluster_ids_b,\n all_cluster_mask_b,\n all_cluster_ids_p,\n all_cluster_mask_p,\n all_pretrained,\n all_label_ids)\n\n # Run prediction for full data\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, \n sampler=eval_sampler, \n batch_size=args.eval_batch_size)\n\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n attn_wts = []\n pbar = tqdm(desc=\"Evaluating\", total=len(eval_dataloader)) if eval_mode else contextlib.suppress()\n with pbar:\n for step, batch in enumerate(eval_dataloader):\n # with torch.cuda.device(0):\n batch = tuple(t.to(device) for t in batch)\n (input_ids, input_mask, segment_ids, \n gpr_tags_mask,\n mention_p_ids, mention_a_ids, mention_b_ids,\n mention_p_mask, mention_a_mask, mention_b_mask,\n cluster_ids_a, cluster_mask_a, cluster_ids_b, cluster_mask_b,\n cluster_ids_p, cluster_mask_p, pretrained, label_ids) = batch\n\n with torch.no_grad():\n res = model(input_ids, \n segment_ids, \n input_mask, \n gpr_tags_mask=gpr_tags_mask,\n mention_p_ids=mention_p_ids,\n mention_a_ids=mention_a_ids,\n mention_b_ids=mention_b_ids,\n mention_p_mask=mention_p_mask,\n mention_a_mask=mention_a_mask,\n mention_b_mask=mention_b_mask, \n cluster_ids_a=cluster_ids_a,\n cluster_mask_a=cluster_mask_a,\n cluster_ids_b=cluster_ids_b,\n cluster_mask_b=cluster_mask_b,\n cluster_ids_p=cluster_ids_p,\n cluster_mask_p=cluster_mask_p,\n pretrained=pretrained,\n labels=None,\n training=False,\n eval_mode=eval_mode)\n\n if eval_mode:\n logits, probabilties, attn_wts_m, attn_wts_c, attn_wts_co = res\n else:\n logits, probabilties = res\n # create eval loss and other metric required by the task\n loss_fct = CrossEntropyLoss()\n tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if len(preds) == 0:\n preds.append(probabilties.detach().cpu().numpy())\n else:\n preds[0] = np.append(\n preds[0], probabilties.detach().cpu().numpy(), axis=0)\n\n if eval_mode:\n pbar.set_description('Evaluating, Loss={:.3f}'.format(eval_loss / nb_eval_steps))\n pbar.update()\n\n if len(attn_wts) == 0:\n attn_wts = [attn_wts_m, attn_wts_c, attn_wts_co]\n else:\n attn_wts[0] = np.append(attn_wts[0], attn_wts_m, axis=0)\n attn_wts[1] = np.append(attn_wts[1], attn_wts_c, axis=0)\n attn_wts[2] = np.append(attn_wts[2], attn_wts_co, axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds = preds[0]\n\n result = compute_metrics(preds, all_label_ids.numpy(), label_list)\n\n result['eval_loss'] = eval_loss\n\n # pbar.set_description('Evaluating, Loss={:.3f}, F1={:.3f}, Log loss={:.3f}'.format(eval_loss, \n # result['f1'], \n # result['log_loss']))\n\n return preds, result['log_loss'], result, attn_wts\n\n\n\ndef fit(model, \n train_features,\n eval_features,\n test_features,\n label_list, \n num_labels,\n tokenizer,\n device, \n n_gpu,\n args,\n swa_model=None,\n verbose=0):\n\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.uint8)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n all_gpr_tags_mask = torch.tensor([f.gpr_tags_mask for f in train_features], dtype=torch.uint8)\n\n all_mention_p_ids = torch.tensor([f.mention_p_ids for f in train_features], dtype=torch.long)\n all_mention_a_ids = torch.tensor([f.mention_a_ids for f in train_features], dtype=torch.long)\n all_mention_b_ids = torch.tensor([f.mention_b_ids for f in train_features], dtype=torch.long)\n all_mention_p_mask = torch.tensor([f.mention_p_mask for f in train_features], dtype=torch.uint8)\n all_mention_a_mask = torch.tensor([f.mention_a_mask for f in train_features], dtype=torch.uint8)\n all_mention_b_mask = torch.tensor([f.mention_b_mask for f in train_features], dtype=torch.uint8)\n\n all_cluster_ids_a = torch.tensor([f.cluster_ids_a for f in train_features], dtype=torch.long)\n all_cluster_mask_a = torch.tensor([f.cluster_mask_a for f in train_features], dtype=torch.uint8)\n all_cluster_ids_b = torch.tensor([f.cluster_ids_b for f in train_features], dtype=torch.long)\n all_cluster_mask_b = torch.tensor([f.cluster_mask_b for f in train_features], dtype=torch.uint8)\n all_cluster_ids_p = torch.tensor([f.cluster_ids_p for f in train_features], dtype=torch.long)\n all_cluster_mask_p = torch.tensor([f.cluster_mask_p for f in train_features], dtype=torch.uint8)\n\n all_pretrained = torch.tensor([f.pretrained for f in train_features], dtype=torch.float)\n\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n\n train_data = TensorDataset(all_input_ids, \n all_input_mask, \n all_segment_ids, \n all_gpr_tags_mask,\n all_mention_p_ids,\n all_mention_a_ids,\n all_mention_b_ids,\n all_mention_p_mask,\n all_mention_a_mask,\n all_mention_b_mask,\n all_cluster_ids_a,\n all_cluster_mask_a,\n all_cluster_ids_b,\n all_cluster_mask_b,\n all_cluster_ids_p,\n all_cluster_mask_p,\n all_pretrained,\n all_label_ids)\n\n train_sampler = RandomSampler(train_data)\n\n train_dataloader = DataLoader(train_data, \n sampler=train_sampler, \n batch_size=args.train_batch_size)\n\n # for name, param in model.named_parameters():\n # if param.requires_grad:\n # print(name, param.data)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n optimizer = BertAdam(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion\n )\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n best_score = np.inf\n best_epoch = 0\n since_best = 0\n preds = None\n tst_preds = None\n tst_score = np.inf\n best_swa_score = np.inf\n swa_n = 0\n for epoch in range(int(args.num_train_epochs)):\n model.train()\n\n # BertAdam has a default schedule\n # lr = lr_schedule(0.05, args.learning_rate, epoch, args.num_train_epochs)\n # adjust_learning_rate(optimizer, lr)\n lr = args.learning_rate\n\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n total = len(train_features) + len(eval_features) + len(test_features)\n with tqdm(desc=\"Trn, Epoch {}\".format(epoch), total=total) as pbar:\n for step, batch in enumerate(train_dataloader):\n # with torch.cuda.device(0):\n batch = tuple(t.to(device) for t in batch)\n (input_ids, input_mask, segment_ids,\n gpr_tags_mask, \n mention_p_ids, mention_a_ids, mention_b_ids,\n mention_p_mask, mention_a_mask, mention_b_mask,\n cluster_ids_a, cluster_mask_a, cluster_ids_b, \n cluster_mask_b, cluster_ids_p, cluster_mask_p, \n pretrained, label_ids) = batch\n\n # define a new function to compute loss values for both output_modes\n logits, _ = model(input_ids, \n segment_ids, \n input_mask, \n gpr_tags_mask=gpr_tags_mask,\n mention_p_ids=mention_p_ids,\n mention_a_ids=mention_a_ids,\n mention_b_ids=mention_b_ids,\n mention_p_mask=mention_p_mask,\n mention_a_mask=mention_a_mask,\n mention_b_mask=mention_b_mask,\n cluster_ids_a=cluster_ids_a,\n cluster_mask_a=cluster_mask_a,\n cluster_ids_b=cluster_ids_b,\n cluster_mask_b=cluster_mask_b,\n cluster_ids_p=cluster_ids_p,\n cluster_mask_p=cluster_mask_p,\n pretrained=pretrained,\n labels=None,\n training=True)\n\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n pbar.set_description('Trn {:2d}, Loss={:.3f}, Val score={:.3f}, Val SWA={:.3f}, Test score={:.3f}'.format(epoch, \n tr_loss/nb_tr_steps, \n np.inf,\n best_swa_score,\n np.inf))\n pbar.update(len(batch[0]))\n\n # if global_step % (500//args.train_batch_size) == 0 and global_step > 0:\n # break\n\n # if nb_tr_steps % (500//args.train_batch_size) == 0 and nb_tr_steps > 0:\n # break\n \n preds_, score, res, _ = evaluate(model,\n eval_features,\n device, \n args, \n label_list,\n num_labels)\n\n pbar.set_description('Trn {:2d}, Loss={:.3f}, Val score={:.3f} F1={:.3f}, Test score={:.3f}'.format(epoch, \n tr_loss/nb_tr_steps, \n score,\n res['f1'],\n np.inf))\n pbar.update(len(eval_features))\n\n if swa_model is not None and global_step > 150:\n preds_swa, swa_score, _, _ = evaluate(swa_model,\n eval_features,\n device, \n args, \n label_list,\n num_labels)\n if swa_score < best_swa_score:\n best_swa_score = swa_score\n\n if score <= best_score:\n best_score = score\n best_epoch = epoch\n since_best = 0\n preds = preds_\n\n if len(test_features):\n tst_preds_, tst_score, tst_res, _ = evaluate(model,\n test_features,\n device, \n args, \n label_list,\n num_labels)\n\n pbar.set_description('Trn {:2d}, Loss={:.3f}, Val score={:.3f} F1={:.3f}, Test score={:.3f} F1={:.3f}'.format( \n epoch,\n tr_loss/nb_tr_steps, \n score,\n res['f1'],\n tst_res['log_loss'],\n tst_res['f1']\n ))\n pbar.update(len(test_features))\n\n # score = tst_score\n\n else:\n tst_preds_, tst_score = [], np.inf\n\n tst_preds = tst_preds_\n\n \n # Save a trained model and the associated configuration\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n with open(output_config_file, 'w') as f:\n f.write(model_to_save.config.to_json_string())\n\n else:\n since_best += 1\n\n if since_best == args.patience:\n break\n\n return best_epoch, preds, best_score, tst_preds, tst_score, None\n\ndef init_model(X_trn,\n X_val=None,\n X_tst=None,\n bert_model='bert-large-uncased', \n model_version='grep',\n do_lower_case=True, \n do_train=True, \n do_eval=True, \n eval_batch_size=8, \n learning_rate=2e-05, \n max_seq_length=512, \n no_cuda=False, \n num_train_epochs=10.0, \n output_dir=None, \n seed=42, \n train_batch_size=32, \n warmup_proportion=0.1, \n n_coref_models=0,\n patience=1,\n verbose=0):\n args = AttrDict(locals())\n\n # logger.info('Executing with parameters {}.'.format(pformat(args)))\n\n # Environment config\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n \n logger.info(\"device: {} n_gpu: {}, \".format(device, n_gpu))\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Load tokenizer\n # Download default vocab, no customizations\n # vocab_file = 'externals/bert/{}'.format(args.bert_model)\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, \n # vocab_file, \n do_lower_case=args.do_lower_case,\n never_split=[\"[UNK]\", \"[SEP]\", \"[PAD]\", \n \"[CLS]\", \"[MASK]\",\n # \"<C_0>\", \"<C_1>\", \"<D_0>\", \"<D_1>\"\n ])\n\n # Load data\n label_list = sorted(set(X_trn['label']))\n num_labels = len(label_list)\n\n train_features = convert_examples_to_features(X_trn, \n tokenizer,\n args.max_seq_length, \n n_coref_models=args.n_coref_models, \n verbose=verbose\n )\n\n eval_features = convert_examples_to_features(X_val, \n tokenizer,\n args.max_seq_length, \n n_coref_models=args.n_coref_models, \n verbose=verbose\n )\n\n test_features = convert_examples_to_features(X_tst, \n tokenizer,\n args.max_seq_length, \n n_coref_models=args.n_coref_models,\n verbose=verbose\n )\n\n logger.info(\"***** Training *****\")\n logger.info(\" Num examples = %d\", len(train_features))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n # logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n logger.info(\"***** Evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_features))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n logger.info(\"***** Testing *****\")\n logger.info(\" Num examples = %d\", len(test_features))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n # Prepare model\n print('Preparing Model.')\n cache_dir = str(PYTORCH_PRETRAINED_BERT_CACHE)\n if model_version == 'probert':\n model = ProBERT.from_pretrained(args.bert_model,\n cache_dir=cache_dir,\n num_labels=num_labels\n )\n elif model_version == 'grep':\n model = GREP.from_pretrained(args.bert_model,\n cache_dir=cache_dir,\n num_labels=num_labels\n )\n\n model.to(device)\n\n if n_gpu > 1:\n model = torch.nn.DataParallel(model)\n \n if args.do_train:\n return fit(model, \n train_features,\n eval_features,\n test_features,\n label_list,\n num_labels, \n tokenizer,\n device, \n n_gpu,\n args,\n swa_model=None,\n verbose=verbose\n )\n else:\n # model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)\n \n # Load a trained model and config that you have fine-tuned\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n config = BertConfig(output_config_file)\n\n if model_version == 'probert':\n model = ProBERT(config,\n num_labels=num_labels,\n pretrained_dim_size=pretrained_dim_size\n )\n elif model_version == 'grep':\n model = GREP(config,\n num_labels=num_labels,\n pretrained_dim_size=pretrained_dim_size\n )\n\n if torch.cuda.is_available():\n map_location=lambda storage, loc: storage.cuda()\n else:\n map_location='cpu'\n\n model.load_state_dict(torch.load(output_model_file, map_location=map_location))\n model.to(device)\n\n tst_preds, tst_score, _, attn_wts = evaluate(model,\n test_features,\n device, \n args, \n label_list,\n num_labels,\n eval_mode=True)\n\n return 0, tst_preds, tst_score, tst_preds, tst_score, attn_wts\n\ndef fit_fold(fold_n, \n exp_dir,\n X_trn, \n X_val=None, \n X_tst=None,\n verbose=0, \n args={}):\n start = timer()\n\n logger_level = logging.INFO\n if verbose == 0:\n logger_level = logging.WARNING\n logger.setLevel(logger_level)\n logging.getLogger('pytorch_pretrained_bert.tokenization').setLevel(logger_level)\n logging.getLogger('pytorch_pretrained_bert.modeling').setLevel(logger_level)\n\n OUTPUT_DIR = exp_dir / args['model_version']/ str(fold_n) / 'model'\n DATA_DIR = exp_dir / args['model_version']/ str(fold_n) / 'data'\n\n if args['do_train']:\n logger.info('Running in train mode. Clearing model output directory.')\n shutil.rmtree(OUTPUT_DIR, ignore_errors=True)\n\n logger.info('Clearing data directory with train, val and test files in bert format.')\n shutil.rmtree(DATA_DIR, ignore_errors=True)\n\n OUTPUT_DIR.mkdir(parents=True, exist_ok=True)\n DATA_DIR.mkdir(parents=True, exist_ok=True)\n\n best_epoch, val_preds, score, tst_preds, tst_score, attn_wts = init_model(X_trn,\n X_val,\n X_tst, \n output_dir=OUTPUT_DIR,\n verbose=verbose, \n **args)\n\n print('Fold {} done in {}. \\nTest score - {}'.format(fold_n, \n timedelta(seconds=int(timer()-start)), \n tst_score))\n\n if not len(X_tst):\n tst_preds = val_preds\n tst_score = score\n\n return best_epoch, val_preds, score, tst_preds, tst_score, attn_wts"
] |
[
[
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.utils.data.DataLoader",
"torch.tensor",
"sklearn.metrics.log_loss",
"numpy.argmax",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"numpy.append",
"torch.cuda.device_count"
]
] |
VGGVRobotics/doper
|
[
"d0ebb9bc8ad9c326deac0b457769d04a92066f11"
] |
[
"doper/scenes/multiple_scenes.py"
] |
[
"__all__ = [\"MultipleScenes\"]\n\nimport logging\nfrom glob import glob\n\nimport numpy as onp\n\nfrom doper.utils.assets import get_svg_scene\nfrom .single_scene import SingleScene\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultipleScenes():\n def __init__(self, config: dict):\n self.config = config\n scene_paths = glob(config[\"sim\"][\"scene_params\"][\"svg_scene_path\"])\n self.single_scenes = []\n for scene_path in scene_paths:\n single_config = config.copy()\n single_config[\"sim\"][\"scene_params\"][\"svg_scene_path\"] = scene_path\n self.single_scenes.append(SingleScene(single_config))\n\n def get_init_state(self, batch_size: int):\n single_scene = onp.random.choice(self.single_scenes, 1)[0]\n self.jax_scene = single_scene.jax_scene\n return single_scene.get_init_state(batch_size)\n"
] |
[
[
"numpy.random.choice"
]
] |
coded5282/youtube-8m
|
[
"888354ff1b20de529e3270f4eeec320e692de935"
] |
[
"ensemble.py"
] |
[
"# Ensemble submission csv files together\n\nimport numpy as np\nimport pandas as pd\nimport itertools\n\nfns = [ # files for ensembling\n \"lstm.csv\"\n \"moe4_do.csv\"\n]\nfn0, fn1 = fns # getting each file to variable\n\noutfn=\"weighted_predictions.csv\" # output file\n\ndef parse_line(ln):\n id_, vals = ln.strip().split(',') # split id and vals\n vals = vals.split() # split vals, which are space-separated\n ix = np.array(vals[::2]).astype(np.int16) # ix stores all predicted labels\n p = np.array(vals[1::2]).astype(np.float32) # p stores all predicted labels' probabilities\n df = pd.DataFrame(p, index=ix, columns=['p']) # puts into data frame\n return id_, df # returns the id and data frame\n\ncnt = 0\n\nwith open(fn0,'r') as inf0, open(fn1, 'r') as inf1, open(outfn, 'w') as outf:\n first_line0 = next(inf0) # the header\n first_line1 = next(inf1)\n assert first_line0 == first_line1\n outf.write(first_line0) # writing the header\n stop = None\n for ln0, ln1 in itertools.islice(zip(inf0, inf1), stop):\n cnt += 1\n if cnt % 100 == 99:\n print(cnt)\n id0, df0 = parse_line(ln0)\n id1, df1 = parse_line(ln1)\n assert id0 == id1\n vid = id0\n df = pd.concat([df0, df1], axis=1).fillna(0)\n df.columns = ['p0','p1']\n df['wp'] = (df['p0'] + df['p1']) / 2.0\n df = df.sort_values('wp', ascending=False)\n df = df.iloc[:20,:]\n z = zip(list(df.index), list(df['wp']))\n out = \" \".join([\"%d %6f\"%x for x in z])\n out = \",\".join([vid, out])\n outf.write(out+'\\n')"
] |
[
[
"pandas.concat",
"numpy.array",
"pandas.DataFrame"
]
] |
zigonk/CMPC-Refseg
|
[
"0d59c90e9968ed836c695976ff90081e1c24378a"
] |
[
"generate_black.py"
] |
[
"import numpy as np\nimport os\nimport json\nimport cv2\n\ncfg = {\n \"meta\": \"/mnt/MyPassport/Youtube-VOS/meta_expressions/meta_expressions/valid/meta_expressions.json\",\n \"visdir\": \"./Annotations_1channel\",\n}\n\n\n\nblack_img = np.zeros((720, 1280)).astype(np.uint8)\n\ncnt = 0\nmeta_expression = {}\nwith open(cfg['meta']) as meta_file:\n meta_expression = json.load(meta_file)\nvideos = meta_expression['videos']\nfor vid_ind, vid in enumerate(videos.keys()): \n print(\"Running on video {}/{}\".format(vid_ind + 1, len(videos.keys())))\n expressions = [videos[vid]['expressions'][expression_id]['exp'] for expression_id in videos[vid]['expressions'].keys()]\n # instance_ids = [expression['obj_id'] for expression_id in videos[vid]['expressions']]\n frame_ids = videos[vid]['frames']\n for index, exp in enumerate(expressions):\n vis_dir = os.path.join(cfg['visdir'], str('{}/{}/'.format(vid, index)))\n # mask_dir = os.path.join(cfg.maskdir, str('{}/{}/'.format(vid, index)))\n avg_time = 0\n total_frame = 0\n for fid in frame_ids:\n if not os.path.exists(vis_dir):\n os.makedirs(vis_dir)\n vis_path = os.path.join(vis_dir, str('{}.png'.format(fid)))\n cnt += 1\n cv2.imwrite(vis_path, black_img)\n\nprint(cnt)\n"
] |
[
[
"numpy.zeros"
]
] |
lchorbadjiev/SCGV
|
[
"7b2fd1fbada7bea49166e37bcb82bd742617fe51"
] |
[
"scgv/views/sample.py"
] |
[
"'''\nCreated on Dec 14, 2016\n\n@author: lubo\n'''\nimport numpy as np\nfrom scgv.views.base import ViewerBase\n\n\nclass SamplesViewer(ViewerBase):\n\n def __init__(self, model):\n super(SamplesViewer, self).__init__(model)\n\n def calc_chrom_lines(self):\n return self.model.calc_chrom_lines()\n\n def calc_ploidy(self, sample_name):\n return self.model.data\\\n .seg_df[sample_name]\\\n .iloc[:self.model.chrom_x_index]\\\n .mean()\n\n def upto_chrom_x(self, data):\n assert len(data) == len(self.model.data.seg_df)\n return data[0:self.model.chrom_x_index]\n\n def calc_error(self, sample_name):\n if self.model.data.ratio_df is None:\n return np.NaN\n df_r = self.upto_chrom_x(self.model.data.ratio_df[sample_name].values)\n df_s = self.upto_chrom_x(self.model.data.seg_df[sample_name].values)\n return np.sqrt(np.sum(((df_r - df_s) / df_s)**2))\n\n def calc_shredded(self, sample_name):\n upto_x_data = \\\n self.model.data \\\n .seg_df[sample_name].values[0:self.model.chrom_x_index]\n shredded = np.sum(upto_x_data < 0.4) / float(self.model.chrom_x_index)\n return shredded\n\n def draw_samples(self, fig, sample_list):\n self.chrom_lines = self.calc_chrom_lines()\n\n ax_common = None\n for num, sample_name in enumerate(sample_list):\n if num == 0:\n ax = fig.add_subplot(len(sample_list), 1, num + 1)\n ax_common = ax\n else:\n ax = fig.add_subplot(\n len(sample_list), 1, num + 1,\n sharex=ax_common,\n sharey=ax_common)\n\n for chrom_line in self.chrom_lines:\n ax.axvline(x=chrom_line, color=\"#000000\", linewidth=1)\n for hl in [1, 2, 3, 4, 5, 6]:\n ax.axhline(y=hl, color=\"#000000\", linewidth=1, linestyle=\"--\")\n\n if self.model.data.ratio_df is not None:\n ax.plot(\n self.model.data.ratio_df['abspos'],\n self.model.data.ratio_df[sample_name],\n color=\"#bbbbbb\", alpha=0.8)\n ax.plot(\n self.model.data.seg_df['abspos'],\n self.model.data.seg_df[sample_name],\n color='b', alpha=0.8)\n ax.set_yscale('log')\n\n ax.set_xlim((0, self.model.data.seg_df['abspos'].values[-1]))\n ax.set_ylim((0.05, 20))\n\n ploidy = self.calc_ploidy(sample_name)\n error = self.calc_error(sample_name)\n shredded = self.calc_shredded(sample_name)\n\n ax.set_title(\n \"{} Ploidy={:.2f} Error={:.2f} Shredded={:.2f}\".format(\n sample_name, ploidy, error, shredded))\n\n ax.set_xticks([])\n ax.set_xticklabels([])\n\n chrom_labels_pos = self.calc_chrom_labels_pos(self.chrom_lines)\n\n assert len(chrom_labels_pos) <= len(self.CHROM_LABELS)\n chrom_labels = self.CHROM_LABELS[:len(chrom_labels_pos)]\n\n ax.set_xticks(chrom_labels_pos)\n ax.set_xticklabels(chrom_labels, rotation='vertical')\n fig.tight_layout()\n"
] |
[
[
"numpy.sum"
]
] |
qilei123/vision2
|
[
"e62f8cc0030b4d0943b818759c6bf99dae7f2694"
] |
[
"torchvision/transforms/transforms.py"
] |
[
"from __future__ import division\nimport torch\nimport math\nimport sys\nimport random\nfrom PIL import Image\ntry:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport numbers\nimport types\nimport collections\nimport warnings\n\nfrom . import functional as F\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"ToPILImage\", \"Normalize\", \"Resize\",\"Resize2\",\"Resize_circle\", \"Scale\", \"CenterCrop\", \"Pad\",\n \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\", \"RandomHorizontalFlip\",\n \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\", \"LinearTransformation\",\n \"ColorJitter\", \"RandomRotation\", \"RandomRotation2\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\",\n \"RandomPerspective\"]\n\n_pil_interpolation_to_str = {\n Image.NEAREST: 'PIL.Image.NEAREST',\n Image.BILINEAR: 'PIL.Image.BILINEAR',\n Image.BICUBIC: 'PIL.Image.BICUBIC',\n Image.LANCZOS: 'PIL.Image.LANCZOS',\n Image.HAMMING: 'PIL.Image.HAMMING',\n Image.BOX: 'PIL.Image.BOX',\n}\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ToPILImage(object):\n \"\"\"Convert a tensor or an ndarray to PIL Image.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.\n - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,\n ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform\n will normalize each channel of the input ``torch.*Tensor`` i.e.\n ``input[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts out of place, i.e., it does not mutates the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean, std, inplace=False):\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return F.normalize(tensor, self.mean, self.std, self.inplace)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(object):\n \"\"\"Resize the input PIL Image to the given size.\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size)\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be scaled.\n\n Returns:\n PIL Image: Rescaled image.\n \"\"\"\n return F.resize(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\nclass Resize2(object):\n \"\"\"Resize the input PIL Image to the given size.\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size)\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be scaled.\n\n Returns:\n PIL Image: Rescaled image.\n \"\"\"\n return F.resize2(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given PIL Image at the center.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n return F.center_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(object):\n \"\"\"Pad the given PIL Image on all sides with the given \"pad\" value.\n\n Args:\n padding (int or tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode='constant'):\n assert isinstance(padding, (numbers.Number, tuple))\n assert isinstance(fill, (numbers.Number, str, tuple))\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be padded.\n\n Returns:\n PIL Image: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda(object):\n \"\"\"Apply a user-defined lambda as a transform.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n assert callable(lambd), repr(type(lambd).__name__) + \" object is not callable\"\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms(object):\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (list or tuple): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n assert isinstance(transforms, (list, tuple))\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(RandomTransforms):\n \"\"\"Apply randomly a list of transformations with a given probability\n\n Args:\n transforms (list or tuple): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super(RandomApply, self).__init__(transforms)\n self.p = p\n\n def __call__(self, img):\n if self.p < random.random():\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(object):\n \"\"\"Crop the given PIL Image at a random location.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None, i.e no padding. If a sequence of length\n 4 is provided, it is used to pad left, top, right, bottom borders\n respectively. If a sequence of length 2 is provided, it is used to\n pad left/right, top/bottom borders, respectively.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n @staticmethod\n def get_params(img, output_size):\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = img.size\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw)\n return i, j, th, tw\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n # pad the width if needed\n if self.pad_if_needed and img.size[0] < self.size[1]:\n img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and img.size[1] < self.size[0]:\n img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(object):\n \"\"\"Horizontally flip the given PIL Image randomly with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Randomly flipped image.\n \"\"\"\n if random.random() < self.p:\n return F.hflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(object):\n \"\"\"Vertically flip the given PIL Image randomly with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Randomly flipped image.\n \"\"\"\n if random.random() < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPerspective(object):\n \"\"\"Performs Perspective transformation of the given PIL Image randomly with a given probability.\n\n Args:\n interpolation : Default- Image.BICUBIC\n\n p (float): probability of the image being perspectively transformed. Default value is 0.5\n\n distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.\n\n \"\"\"\n\n def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC):\n self.p = p\n self.interpolation = interpolation\n self.distortion_scale = distortion_scale\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be Perspectively transformed.\n\n Returns:\n PIL Image: Random perspectivley transformed image.\n \"\"\"\n if not F._is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if random.random() < self.p:\n width, height = img.size\n startpoints, endpoints = self.get_params(width, height, self.distortion_scale)\n return F.perspective(img, startpoints, endpoints, self.interpolation)\n return img\n\n @staticmethod\n def get_params(width, height, distortion_scale):\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width : width of the image.\n height : height of the image.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n half_height = int(height / 2)\n half_width = int(width / 2)\n topleft = (random.randint(0, int(distortion_scale * half_width)),\n random.randint(0, int(distortion_scale * half_height)))\n topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),\n random.randint(0, int(distortion_scale * half_height)))\n botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),\n random.randint(height - int(distortion_scale * half_height) - 1, height - 1))\n botleft = (random.randint(0, int(distortion_scale * half_width)),\n random.randint(height - int(distortion_scale * half_height) - 1, height - 1))\n startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]\n endpoints = [topleft, topright, botright, botleft]\n return startpoints, endpoints\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(object):\n \"\"\"Crop the given PIL Image to random size and aspect ratio.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size: expected output size of each edge\n scale: range of size of the origin size cropped\n ratio: range of aspect ratio of the origin aspect ratio cropped\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):\n if isinstance(size, tuple):\n self.size = size\n else:\n self.size = (size, size)\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"range should be of kind (min, max)\")\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(img, scale, ratio):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n area = img.size[0] * img.size[1]\n\n for attempt in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = img.size[0] / img.size[1]\n if (in_ratio < min(ratio)):\n w = img.size[0]\n h = w / min(ratio)\n elif (in_ratio > max(ratio)):\n h = img.size[1]\n w = h * max(ratio)\n else: # whole image\n w = img.size[0]\n h = img.size[1]\n i = (img.size[1] - h) // 2\n j = (img.size[0] - w) // 2\n return i, j, h, w\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped and resized.\n\n Returns:\n PIL Image: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(object):\n \"\"\"Crop the given PIL Image into four corners and the central crop\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n self.size = size\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n self.size = size\n\n def __call__(self, img):\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(object):\n \"\"\"Crop the given PIL Image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default)\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n vertical_flip(bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n self.size = size\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n self.size = size\n self.vertical_flip = vertical_flip\n\n def __call__(self, img):\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(object):\n \"\"\"Transform a tensor image with a square transformation matrix and a mean_vector computed\n offline.\n Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and\n subtract mean_vector from it which is then followed by computing the dot\n product with the transformation matrix and then reshaping the tensor to its\n original shape.\n Applications:\n - whitening transformation: Suppose X is a column vector zero-centered data.\n Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),\n perform SVD on this matrix and pass it as transformation_matrix.\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n mean_vector (Tensor): tensor [D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix, mean_vector):\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\"mean_vector should have the same length {}\".format(mean_vector.size(0)) +\n \" as any one of the dimensions of the transformation_matrix [{} x {}]\"\n .format(transformation_matrix.size()))\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):\n raise ValueError(\"tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(*tensor.size()) +\n \"{}\".format(self.transformation_matrix.size(0)))\n flat_tensor = tensor.view(1, -1) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(tensor.size())\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(transformation_matrix='\n format_string += (str(self.transformation_matrix.tolist()) + ')')\n format_string += (\", (mean_vector=\" + str(self.mean_vector.tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(object):\n \"\"\"Randomly change the brightness, contrast and saturation of an image.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - value, center + value]\n if clip_first_on_zero:\n value[0] = max(value[0], 0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n def get_params(brightness, contrast, saturation, hue):\n \"\"\"Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n transforms = []\n\n if brightness is not None:\n brightness_factor = random.uniform(brightness[0], brightness[1])\n transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))\n\n if contrast is not None:\n contrast_factor = random.uniform(contrast[0], contrast[1])\n transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))\n\n if saturation is not None:\n saturation_factor = random.uniform(saturation[0], saturation[1])\n transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))\n\n if hue is not None:\n hue_factor = random.uniform(hue[0], hue[1])\n transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))\n\n random.shuffle(transforms)\n transform = Compose(transforms)\n\n return transform\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Input image.\n\n Returns:\n PIL Image: Color jittered image.\n \"\"\"\n transform = self.get_params(self.brightness, self.contrast,\n self.saturation, self.hue)\n return transform(img)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(object):\n \"\"\"Rotate the image by angle.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, resample=False, expand=False, center=None):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n if len(degrees) != 2:\n raise ValueError(\"If degrees is a sequence, it must be of len 2.\")\n self.degrees = degrees\n\n self.resample = resample\n self.expand = expand\n self.center = center\n\n @staticmethod\n def get_params(degrees):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n\n return angle\n\n def __call__(self, img):\n \"\"\"\n img (PIL Image): Image to be rotated.\n\n Returns:\n PIL Image: Rotated image.\n \"\"\"\n\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', resample={0}'.format(self.resample)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n format_string += ')'\n return format_string\n\n\nclass RandomRotation2(object):\n \"\"\"Rotate the image by angle.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees=[0,360], resample=False, expand=False, center=None):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n if len(degrees) != 2:\n raise ValueError(\"If degrees is a sequence, it must be of len 2.\")\n self.degrees = degrees\n\n self.resample = resample\n self.expand = expand\n self.center = center\n\n @staticmethod\n def get_params(degrees):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n #print(\"rotate angle:\"+str(angle))\n if angle>=0 and angle<90:\n angle=0\n elif angle>=90 and angle<180:\n angle=90\n elif angle>=180 and angle<270:\n angle=180\n elif angle>=270 and angle<=360:\n angle=270\n return angle\n\n def __call__(self, img):\n \"\"\"\n img (PIL Image): Image to be rotated.\n\n Returns:\n PIL Image: Rotated image.\n \"\"\"\n\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', resample={0}'.format(self.resample)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(object):\n \"\"\"Random affine transformation of the image keeping center invariant\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or float or int, optional): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Will not apply shear by default\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \\\n \"degrees should be a list or tuple and it must be of length 2.\"\n self.degrees = degrees\n\n if translate is not None:\n assert isinstance(translate, (tuple, list)) and len(translate) == 2, \\\n \"translate should be a list or tuple and it must be of length 2.\"\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n assert isinstance(scale, (tuple, list)) and len(scale) == 2, \\\n \"scale should be a list or tuple and it must be of length 2.\"\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n if isinstance(shear, numbers.Number):\n if shear < 0:\n raise ValueError(\"If shear is a single number, it must be positive.\")\n self.shear = (-shear, shear)\n else:\n assert isinstance(shear, (tuple, list)) and len(shear) == 2, \\\n \"shear should be a list or tuple and it must be of length 2.\"\n self.shear = shear\n else:\n self.shear = shear\n\n self.resample = resample\n self.fillcolor = fillcolor\n\n @staticmethod\n def get_params(degrees, translate, scale_ranges, shears, img_size):\n \"\"\"Get parameters for affine transformation\n\n Returns:\n sequence: params to be passed to the affine transformation\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n if translate is not None:\n max_dx = translate[0] * img_size[0]\n max_dy = translate[1] * img_size[1]\n translations = (np.round(random.uniform(-max_dx, max_dx)),\n np.round(random.uniform(-max_dy, max_dy)))\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = random.uniform(scale_ranges[0], scale_ranges[1])\n else:\n scale = 1.0\n\n if shears is not None:\n shear = random.uniform(shears[0], shears[1])\n else:\n shear = 0.0\n\n return angle, translations, scale, shear\n\n def __call__(self, img):\n \"\"\"\n img (PIL Image): Image to be transformed.\n\n Returns:\n PIL Image: Affine transformed image.\n \"\"\"\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)\n return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.resample > 0:\n s += ', resample={resample}'\n if self.fillcolor != 0:\n s += ', fillcolor={fillcolor}'\n s += ')'\n d = dict(self.__dict__)\n d['resample'] = _pil_interpolation_to_str[d['resample']]\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(object):\n \"\"\"Convert image to grayscale.\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n - If num_output_channels == 1 : returned image is single channel\n - If num_output_channels == 3 : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n self.num_output_channels = num_output_channels\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n return F.to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(object):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n num_output_channels = 1 if img.mode == 'L' else 3\n if random.random() < self.p:\n return F.to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n\n\nclass Resize_circle(object):\n \"\"\"Resize the input PIL Image to the given size.\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size)\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR,circle_rio = 0.5):\n assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n self.circle_rio = circle_rio\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be scaled.\n\n Returns:\n PIL Image: Rescaled image.\n \"\"\"\n if random.random() < self.circle_rio:\n return F.resize(F.img_circle(img), self.size, self.interpolation)\n return F.resize(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)"
] |
[
[
"torch.mm"
]
] |
DaniloZZZ/ising_model
|
[
"e9282aec1a445058b9e0fd1c8b4788390a9beb69"
] |
[
"syntesis/scripts/torch_ising.py"
] |
[
"import torch as T\nimport numpy as np\nfrom itertools import product\nfrom torch.functional import F\nimport scripts.ising as ising\n\ndef get_nn_mask(J, mu):\n return np.array([\n [0, J, 0]\n ,[J, mu, J]\n ,[0, J, 0]\n ])\ndef get_funny_mask(J, mu):\n return np.array([\n [J*2, J, J/2]\n ,[J, mu, J]\n ,[J/2, J, -J/2]\n ])\n\ndef get_diagonal_mask(J, mu):\n a = 1/np.sqrt(2)\n return np.array([\n [J*a, J, -J*a]\n ,[J, mu, J]\n ,[-J*a, J, J*a]\n ])\n\ndef get_anisotropic_mask(J, mu, ratio):\n h = 1+ratio\n v = 1-ratio\n return np.array([\n [0, J*v, 0]\n ,[J*h, mu, J*h]\n ,[0, J*v, 0]\n ])\n\n\ndef get_conv_torch(mask):\n \"\"\" Get 2d torch convolution with mask \"\"\"\n in_chan, out_chan = 1, 1\n shape = mask.shape\n l = T.nn.Conv2d(in_chan, out_chan, shape\n , stride=shape\n , padding=0\n , bias = False\n )\n l.mu = mask[shape[0]//2, shape[1]//2]\n mask[shape[0]//2, shape[1]//2] = 0\n l.weight.data = T.from_numpy(mask[np.newaxis, np.newaxis, ...])\n return l\n\ndef grid_torch(grid):\n \"\"\" Convert 2-d numpy to 4-d torch with shape 1,1,N,N \"\"\"\n gpu_grid = T.from_numpy(grid[np.newaxis, np.newaxis,...]).double()\n return gpu_grid\n\n\ndef get_conv_nn(J, mu, device='cuda'):\n nn_mask = get_nn_mask(J, mu)\n conv = get_conv_torch(nn_mask)\n return conv.to(device)\n\ndef get_random_grid(N, device='cuda'):\n g_ = grid_torch(ising.get_random_grid(N))\n return g_.to(device)\n\ndef metrop_step(grid, conv, beta):\n rix = np.random.randint(0, high=3, size=2)\n grid = T.roll(grid, shifts=tuple(rix), dims=(2,3))\n \n dE = 2*conv(grid)[0,0]\n \n scatter_ixs = [np.arange(1, d-1, 3) for d in grid.shape[2:]]\n ixs = (0,0) + np.ix_(*scatter_ixs)\n sub = grid[ixs]\n dE = sub*(dE + 2*conv.mu)\n \n acc_prob = T.exp(-beta*F.relu(dE))\n random = T.rand_like(acc_prob)\n sub[acc_prob > random] *= -1\n grid[ixs] = sub\n dE[acc_prob < random] *= 0\n sub[acc_prob < random] *= 0\n return grid, float(dE.sum().detach()), 2*float(sub.sum().detach())\n\n"
] |
[
[
"numpy.ix_",
"numpy.sqrt",
"torch.rand_like",
"numpy.arange",
"torch.nn.Conv2d",
"torch.functional.F.relu",
"torch.from_numpy",
"numpy.array",
"numpy.random.randint"
]
] |
uuefi/speech-to-text-benchmark
|
[
"214f0dedad888730944676be2f2876e8c48efed5"
] |
[
"processing.py"
] |
[
"import os\nimport wave\nfrom warnings import warn\n\nfrom pydub import AudioSegment\nfrom pydub.utils import mediainfo\nimport librosa\nimport numpy as np\nimport scipy\nimport soundfile as sf\n\n\n# helper to figure out the issue\ndef frame_rate_channel(audio_file_name, head):\n #x, _ = librosa.load(audio_file_name, sr=16000)\n #sf.write(os.path.join(head,'tmp.wav'), x, 16000)\n\n with wave.open(os.path.join(head,'tmp.wav'), \"rb\") as wave_file:\n frame_rate = wave_file.getframerate()\n channels = wave_file.getnchannels()\n print(f'frame_rate: {frame_rate}')\n print(f'channels: {channels}')\n return frame_rate, channels\n\n\ndef transform_audio(args, path, sample_rate, to_mono, normalise=False):\n # todo: find a better way to hold the cache path here --> args\n path_target = path.replace('data', 'cache')#.replace('.wav', '.flac')\n head, tail = os.path.split(path_target)\n os.makedirs(head, exist_ok=True)\n\n # to be removed: helper output to figure out the issue\n info = mediainfo(path)\n print(info)\n print(info['sample_rate'])\n print(info['channels'])\n\n # remove one pipeline here: helper output to figure out the issue\n tranform_with_librosa(path_target, path, sample_rate, to_mono, normalise)\n #tranform_with_pydub(path_target, path, sample_rate, to_mono, normalise, info)\n\n # to be removed: helper output to figure out the issue\n #audiofile.export(path_target, format=\"flac\")\n # scipy.io.wavfile.write(path_target, sample_rate, audio)\n # sf.write(path_target, audio, sample_rate)\n info = mediainfo(path_target)\n print(info)\n frame_rate_channel(path_target, head)\n\n return path_target\n\n\n# experiment with to pipelines to figure what the issue is\ndef tranform_with_librosa(path_target, path, sample_rate, to_mono, normalise):\n audio, sr = librosa.core.load(path, sr=None)\n if int(sr) > sample_rate:\n print(f\" - Sample rate can be reduced from {sr} to {sample_rate} for file {path}\")\n audio = librosa.resample(audio, sr, sample_rate)\n elif int(sr) < sample_rate:\n warn(f\" x Sample rate too low from {sr} to {sample_rate} for file {path} (speech-to-text). \"\n f\"Upsample for technical reasons, however, effect is limited.\")\n audio = librosa.resample(audio, sr, sample_rate)\n\n if to_mono:\n print(\" - to mono\")\n audio = librosa.to_mono(audio)\n\n if normalise:\n print(\" - normalise\")\n audio = audio * (0.7079 / np.max(np.abs(audio)))\n maxv = np.iinfo(np.int16).max\n audio = (audio * maxv).astype(np.int16)\n\n # librosa.output.write_wav(path_target, audio, sample_rate)\n # transfrom from 64-bit RIFF to flac\n sf.write(path_target, audio, sample_rate)\n\n\n\ndef tranform_with_pydub(path_target, path, sample_rate, to_mono, normalise, info):\n audiofile = AudioSegment.from_file(path)\n if int(info['sample_rate']) > sample_rate:\n print(f\" - Sample rate can be reduced from {info['sample_rate']} to {sample_rate} for file {path}\")\n audiofile = audiofile.set_frame_rate(sample_rate)\n elif int(info['sample_rate']) < sample_rate:\n warn(f\" x Sample rate too low from {info['sample_rate']} to {sample_rate} for file {path} (speech-to-text). \"\n f\"Upsample for technical reasons, however, effect is limited.\")\n audiofile = audiofile.set_frame_rate(sample_rate)\n\n if to_mono:\n print(\" - to mono\")\n audiofile = audiofile.set_channels(1)\n\n if normalise:\n exit(\"Not implemented yet\")\n\n audiofile.export(path_target, format=\"WAV\")\n"
] |
[
[
"numpy.abs",
"numpy.iinfo"
]
] |
ariecattan/s2e-coref
|
[
"2ebe126902f7a939f486a05e8ae036032a26a10a"
] |
[
"run_coref.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport shutil\nimport git\nimport torch\n\nfrom transformers import AutoConfig, AutoTokenizer, CONFIG_MAPPING, LongformerConfig, RobertaConfig\n\nfrom modeling import S2E\nfrom data import get_dataset\nfrom cli import parse_args\nfrom training import train, set_seed\nfrom eval import Evaluator\nfrom utils import write_meta_data\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n args = parse_args()\n\n transformers_logger = logging.getLogger(\"transformers\")\n transformers_logger.setLevel(logging.ERROR)\n\n if args.predict_file is None and args.do_eval:\n raise ValueError(\n \"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file \"\n \"or remove the --do_eval argument.\")\n\n if args.output_dir and os.path.exists(args.output_dir) and \\\n os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir))\n\n if args.overwrite_output_dir and os.path.isdir(args.output_dir):\n shutil.rmtree(args.output_dir)\n os.mkdir(args.output_dir)\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\n with open(os.path.join(args.output_dir, 'args.txt'), 'w') as f:\n f.write(str(args))\n\n for key, val in vars(args).items():\n logger.info(f\"{key} - {val}\")\n\n try:\n write_meta_data(args.output_dir, args)\n except git.exc.InvalidGitRepositoryError:\n logger.info(\"didn't save metadata - No git repo!\")\n\n\n logger.info(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, amp training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.amp)\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n # Barrier to make sure only the first process in distributed training download model & vocab\n torch.distributed.barrier()\n\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n config = CONFIG_MAPPING[args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\"\n )\n\n config_class = LongformerConfig\n base_model_prefix = \"longformer\"\n\n S2E.config_class = config_class\n S2E.base_model_prefix = base_model_prefix\n model = S2E.from_pretrained(args.model_name_or_path,\n config=config,\n cache_dir=args.cache_dir,\n args=args)\n\n model.to(args.device)\n\n if args.local_rank == 0:\n # End of barrier to make sure only the first process in distributed training download model & vocab\n torch.distributed.barrier()\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n evaluator = Evaluator(args, tokenizer)\n # Training\n if args.do_train:\n train_dataset = get_dataset(args, tokenizer, evaluate=False)\n\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, evaluator)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use save_pretrained for the model and tokenizer,\n # you can reload them using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model,\n 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Evaluation\n results = {}\n\n if args.do_eval and args.local_rank in [-1, 0]:\n result = evaluator.evaluate(model, prefix=\"final_evaluation\", official=True)\n results.update(result)\n return results\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.distributed.barrier",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.cuda.device_count"
]
] |
jabae/detectEM
|
[
"2d1a5116164d0bed0a8ea767a227d05a8970a448"
] |
[
"EMDetector/train/utils.py"
] |
[
"from __future__ import print_function\nimport imp\nimport os\n\nimport numpy as np\n\nimport torch\nfrom torch.nn.parallel import data_parallel\nfrom torch.cuda import *\n\nfrom train.data import Data\nfrom train.model import Model\n\n\n\ndef load_model(opt):\n # Create a model.\n net = opt.net\n net.cuda()\n model = Model(net, opt)\n\n if opt.pretrain:\n print(\"Loading {}...\".format(opt.pretrain))\n model.load(opt.pretrain)\n if opt.chkpt_num > 0:\n model = load_chkpt(model, opt.model_dir, opt.chkpt_num)\n\n return model.train(), net\n\n\ndef load_chkpt(model, fpath, chkpt_num):\n print(\"LOAD CHECKPOINT: {} iters.\".format(chkpt_num))\n fname = os.path.join(fpath, \"model{}.chkpt\".format(chkpt_num))\n model.load(fname)\n return model\n\n\ndef save_chkpt(model, fpath, chkpt_num):\n print(\"SAVE CHECKPOINT: {} iters.\".format(chkpt_num))\n fname = os.path.join(fpath, \"model{}.chkpt\".format(chkpt_num))\n model.save(fname)\n\n\ndef load_data(dataset, aug, opt):\n\n data_loader = Data(dataset, aug, opt, is_train=True)\n\n return data_loader\n\n\ndef forward(model, sample, opt):\n # Forward pass\n if len(opt.gpu_ids) > 1:\n \n losses, preds = data_parallel(model, sample)\n else:\n \n losses, preds = model(sample)\n\n # Average over minibatch\n losses = {k: v.mean() for k, v in losses.items()}\n\n return losses, preds\n"
] |
[
[
"torch.nn.parallel.data_parallel"
]
] |
liboyuty/Mutual-Cover
|
[
"b5589004e8ecc4858fe6bc50b1a13393a07a01ec"
] |
[
"disclosure_mutual_diversity.py"
] |
[
"import pandas as pd\r\nimport random\r\n\r\n\r\nrandom.seed(42)\r\n\r\n\r\nclass CheckDisclosure:\r\n def __init__(self):\r\n self.ori_path = \"./filter_data.csv\" # 原始数据路径\r\n self.mutual_path = \"./results/mutual_cover/diversity/mutual_\" # 匿名数据路径\r\n self.output_path = \"./results/mutual_cover/diversity/disclosure\" # 结果输出路径\r\n self.seed_numbers = range(0, 10) # 随机种子\r\n self.l_values = [10, 12, 15, 18, 20] # l参数\r\n self.k_values = [6, 7] # k参数\r\n self.probabilities = [0.3, 0.5, 0.8, 1] # 攻击者保留QI值的概率\r\n self.qi_attributes = [\"RELATE\", \"SEX\", \"AGE\", \"MARST\", \"RACE\", \"EDUC\", \"UHRSWORK\"] # 作为匹配条件的属性\r\n\r\n def check_oper(self):\r\n self.original_data = pd.read_csv(self.ori_path) # 读取原始数据\r\n for pr in self.probabilities:\r\n self.generate_conditions(pr)\r\n print(\"pr_value=:\" + str(pr))\r\n for kv in self.k_values:\r\n self.kv = kv\r\n print(\"k_value=\" + str(kv))\r\n for lv in self.l_values:\r\n self.lv = lv\r\n print(\"l_value=\" + str(lv))\r\n mutual_data = self.read_mutual_data()\r\n identity_disclosure = [0.0, ] * len(self.seed_numbers) # 每个随机种子下的身份暴露概率\r\n attribute_disclosure = [0.0, ] * len(self.seed_numbers) # 每个随机种子下的敏感值暴露概率\r\n for id_index, row_info in self.original_data.iterrows():\r\n if id_index % 100 == 0:\r\n print(id_index)\r\n ori_svalue = row_info[\"INCWAGE\"] # 记录原始数据的敏感值\r\n # ori_match = self.return_match_data(original_data, temp_condition)\r\n # ori_match_number = ori_match.shape[0]\r\n temp_condition = self.conditions[id_index]\r\n for md_index in range(len(mutual_data)):\r\n md = mutual_data[md_index] # 取出相应的匿名数据\r\n md_match, contain_flag = self.return_match_data(md, temp_condition,\r\n id_index) # 根据匹配条件筛选出满足条件的tuple\r\n md_match_number = md_match.shape[0]\r\n if md_match_number == 0: # 如果没有满足条件的tuple,则继续下一轮\r\n continue\r\n if contain_flag == True:\r\n identity_disclosure[md_index] += 1 / md_match_number\r\n attribute_disclosure[md_index] += self.count_attri_disclosure(ori_svalue, md_match)\r\n identity_disclosure = [i / self.original_data.shape[0] for i in identity_disclosure]\r\n attribute_disclosure = [i / self.original_data.shape[0] for i in attribute_disclosure]\r\n ide_dis_max, ide_dis_min, ide_dis_avg = self.count_max_min_avg(identity_disclosure)\r\n att_dis_max, att_dis_min, att_dis_avg = self.count_max_min_avg(attribute_disclosure)\r\n self.store_results(pr, kv, lv, ide_dis_max, ide_dis_min, ide_dis_avg, att_dis_max, att_dis_min,\r\n att_dis_avg)\r\n return\r\n\r\n def generate_conditions(self, pr):\r\n self.conditions = dict()\r\n for id_index, row_info in self.original_data.iterrows():\r\n self.conditions[id_index] = dict()\r\n while len(self.conditions[id_index]) == 0:\r\n for qi_col in self.qi_attributes:\r\n if random.random() <= pr:\r\n self.conditions[id_index][qi_col] = row_info[qi_col]\r\n return\r\n\r\n def read_mutual_data(self):\r\n mutual_data = []\r\n for sd in self.seed_numbers:\r\n mutual_tname = self.mutual_path + \"l\" + str(self.lv) + \"_k\" + str(self.kv) + \"_r\" + str(sd)\r\n mdata = pd.read_csv(mutual_tname)\r\n mutual_data.append(mdata)\r\n return mutual_data\r\n\r\n def return_match_data(self, ori_data, condition_set, id_index):\r\n temp_flags = None\r\n for col in condition_set:\r\n if temp_flags is None:\r\n temp_flags = (ori_data[col] == condition_set[col])\r\n continue\r\n temp_flags &= (ori_data[col] == condition_set[col])\r\n match_data = ori_data.loc[temp_flags, self.qi_attributes + [\"INCWAGE\", \"OID\"]].copy()\r\n contain_flag = True\r\n if match_data[match_data[\"OID\"] == id_index].shape[0] == 0:\r\n contain_flag = False\r\n return match_data, contain_flag\r\n\r\n def count_attri_disclosure(self, ori_svalue, md_match):\r\n temp_count = md_match[\"INCWAGE\"].value_counts()\r\n match_count = 0\r\n for col, val in temp_count.items():\r\n if ori_svalue == col:\r\n match_count += val\r\n break\r\n return match_count / md_match.shape[0]\r\n\r\n def count_max_min_avg(self, dis_values):\r\n max_value = dis_values[0]\r\n min_value = dis_values[0]\r\n avg_value = 0\r\n for val in dis_values:\r\n if val > max_value:\r\n max_value = val\r\n if val < min_value:\r\n min_value = val\r\n avg_value += val\r\n return max_value, min_value, avg_value / len(dis_values)\r\n\r\n def store_results(self, pr, kv, lv, ide_dis_max, ide_dis_min, ide_dis_avg, att_dis_max, att_dis_min, att_dis_avg):\r\n with open(self.output_path, \"a\") as tfile:\r\n tfile.write(\"Identity Disclosure: k=\" + str(kv) + \" l=\" + str(lv) + \" p=\" + str(pr) + \":\\n\")\r\n tfile.write(\"max: \" + str(ide_dis_max) + \"\\n\")\r\n tfile.write(\"min: \" + str(ide_dis_min) + \"\\n\")\r\n tfile.write(\"avg: \" + str(ide_dis_avg) + \"\\n\")\r\n tfile.write(\"Attribute Disclosure: k=\" + str(kv) + \" l=\" + str(lv) + \" p=\" + str(pr) + \":\\n\")\r\n tfile.write(\"max: \" + str(att_dis_max) + \"\\n\")\r\n tfile.write(\"min: \" + str(att_dis_min) + \"\\n\")\r\n tfile.write(\"avg: \" + str(att_dis_avg) + \"\\n\")\r\n tfile.write(\"--------------------------------------------------------------------\\n\")\r\n return\r\n\r\n\r\nif __name__ == \"__main__\":\r\n cd = CheckDisclosure()\r\n cd.check_oper()\r\n"
] |
[
[
"pandas.read_csv"
]
] |
bgoesswein/implementation_backend
|
[
"546018eb5dba79b823e3cfb20472271e02045789"
] |
[
"services/jobs/jobs/dependencies/process.py"
] |
[
"from ..models import Job\nimport numpy as np\nfrom datetime import datetime\ncoords = [(10.288696, 45.935871), (12.189331, 46.905246)]\n\nFACTOR_RESOLUTION = 1000\n\n\ndef reproject(latitude, longitude):\n \"\"\"Returns the x & y coordinates in meters using a sinusoidal projection\"\"\"\n from math import pi, cos, radians\n earth_radius = 6371009 # in meters\n lat_dist = pi * earth_radius / 180.0\n\n y = [lat * lat_dist for lat in latitude]\n x = [long * lat_dist * cos(radians(lat))\n for lat, long in zip(latitude, longitude)]\n return x, y\n\n\ndef area_of_polygon(x, y):\n \"\"\"Calculates the area of an arbitrary polygon given its verticies\"\"\"\n area = 0.0\n for i in range(-1, len(x)-1):\n area += x[i] * (y[i+1] - y[i-1])\n return abs(area) / 2.0\n\n\ndef generate_area(lon1, lat1, lon2, lat2, date_start, date_end):\n\n longitude = [lon1, lon1, lon2, lon2]\n latitude = [lat1, lat2, lat1, lat2]\n\n x, y = reproject(longitude, latitude)\n # x2, y2 = reproject(lon2, lat2)\n\n width = int((((x[0]-x[1])**2)**0.5)/FACTOR_RESOLUTION)\n height = int((((y[0]-y[3])**2)**0.5)/FACTOR_RESOLUTION)\n # area = area_of_polygon(x, y)\n\n start = datetime.strptime(date_start, \"%Y-%m-%d\")\n end = datetime.strptime(date_end, \"%Y-%m-%d\")\n\n daterange = end-start\n\n area = np.ones((width, height, daterange.days))\n\n return area\n\n\ndef set_no_data(data, cur, should):\n '''Set no data value'''\n\n data[data == cur] = should\n return data\n\n\ndef calc_ndvi(red, nir):\n '''Returns ndvi for given red and nir band (no data is set to 2, ndvi in range [-1, 1])'''\n\n # Get band data\n# red = get_band_data(dataset, file_param[\"band_order\"][\"B04\"])\n# nir = get_band_data(dataset, file_param[\"band_order\"][\"B08\"])\n\n # Calculate NDVI\n ndvi = (nir - red) / (nir + red)\n ndvi = set_no_data(ndvi, np.nan, 2)\n return ndvi\n\n\ndef calc_mintime(data):\n return np.fmin.reduce(data)\n\n\ndef calc_mintime(data):\n return np.fmax.reduce(data)\n\n\n"
] |
[
[
"numpy.fmax.reduce",
"numpy.fmin.reduce",
"numpy.ones"
]
] |
HindsightInstructionFollowing/gym-minigrid
|
[
"e01c561634ae4c91717444ca86b338aba2ff4ac4"
] |
[
"gym_minigrid/wrappers.py"
] |
[
"import math\nimport operator\nfrom functools import reduce\n\nimport numpy as np\nimport gym\nfrom gym import error, spaces, utils\nfrom gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX, STATE_TO_IDX, \\\n SHADE_TO_IDX, SIZE_TO_IDX\nfrom gym_minigrid.minigrid import CELL_PIXELS\n\nimport json\nimport torch\n\nclass ReseedWrapper(gym.core.Wrapper):\n \"\"\"{{\n Wrapper to always regenerate an environment with the same set of seeds.\n This can be used to force an environment to always keep the same\n configuration when reset.\n \"\"\"\n\n def __init__(self, env, seeds=[0], seed_idx=0):\n self.seeds = list(seeds)\n self.seed_idx = seed_idx\n super().__init__(env)\n\n def reset(self, **kwargs):\n seed = self.seeds[self.seed_idx]\n self.seed_idx = (self.seed_idx + 1) % len(self.seeds)\n self.env.seed(seed)\n return self.env.reset(**kwargs)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return obs, reward, done, info\n\nclass ActionBonus(gym.core.Wrapper):\n \"\"\"\n Wrapper which adds an exploration bonus.\n This is a reward to encourage exploration of less\n visited (state,action) pairs.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.counts = {}\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n env = self.unwrapped\n tup = (tuple(env.agent_pos), env.agent_dir, action)\n\n # Get the count for this (s,a) pair\n pre_count = 0\n if tup in self.counts:\n pre_count = self.counts[tup]\n\n # Update the count for this (s,a) pair\n new_count = pre_count + 1\n self.counts[tup] = new_count\n\n bonus = 1 / math.sqrt(new_count)\n reward += bonus\n\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass StateBonus(gym.core.Wrapper):\n \"\"\"\n Adds an exploration bonus based on which positions\n are visited on the grid.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.counts = {}\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n # Tuple based on which we index the counts\n # We use the position after an update\n env = self.unwrapped\n tup = (tuple(env.agent_pos))\n\n # Get the count for this key\n pre_count = 0\n if tup in self.counts:\n pre_count = self.counts[tup]\n\n # Update the count for this key\n new_count = pre_count + 1\n self.counts[tup] = new_count\n\n bonus = 1 / math.sqrt(new_count)\n reward += bonus\n\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass ImgObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Use the image as the only observation output, no language/mission.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = env.observation_space.spaces['image']\n\n def observation(self, obs):\n return obs['image']\n\nclass OneHotPartialObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Wrapper to get a one-hot encoding of a partially observable\n agent view as observation.\n \"\"\"\n\n def __init__(self, env, tile_size=8):\n super().__init__(env)\n\n self.tile_size = tile_size\n\n obs_shape = env.observation_space['image'].shape\n\n # Number of bits per cell\n num_bits = len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + len(STATE_TO_IDX)\n\n self.observation_space.spaces[\"image\"] = spaces.Box(\n low=0,\n high=255,\n shape=(obs_shape[0], obs_shape[1], num_bits),\n dtype='uint8'\n )\n\n def observation(self, obs):\n img = obs['image']\n out = np.zeros(self.observation_space.shape, dtype='uint8')\n\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n type = img[i, j, 0]\n color = img[i, j, 1]\n state = img[i, j, 2]\n\n out[i, j, type] = 1\n out[i, j, len(OBJECT_TO_IDX) + color] = 1\n out[i, j, len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + state] = 1\n\n return {\n 'mission': obs['mission'],\n 'image': out\n }\n\nclass RGBImgObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Wrapper to use fully observable RGB image as the only observation output,\n no language/mission. This can be used to have the agent to solve the\n gridworld in pixel space.\n \"\"\"\n\n def __init__(self, env, tile_size=8):\n super().__init__(env)\n\n self.tile_size = tile_size\n\n self.observation_space.spaces['image'] = spaces.Box(\n low=0,\n high=255,\n shape=(self.env.width*tile_size, self.env.height*tile_size, 3),\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n\n rgb_img = env.render(\n mode='rgb_array',\n highlight=False,\n tile_size=self.tile_size\n )\n\n return {\n 'mission': obs['mission'],\n 'image': rgb_img\n }\n\n\nclass RGBImgPartialObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Wrapper to use partially observable RGB image as the only observation output\n This can be used to have the agent to solve the gridworld in pixel space.\n \"\"\"\n\n def __init__(self, env, tile_size=8):\n super().__init__(env)\n\n self.tile_size = tile_size\n\n obs_shape = env.observation_space['image'].shape\n self.observation_space.spaces['image'] = spaces.Box(\n low=0,\n high=255,\n shape=(obs_shape[0] * tile_size, obs_shape[1] * tile_size, 3),\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n\n rgb_img_partial = env.get_obs_render(\n obs['image'],\n tile_size=self.tile_size,\n mode='rgb_array'\n )\n\n return {\n 'mission': obs['mission'],\n 'image': rgb_img_partial\n }\n\nclass FullyObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Fully observable gridworld using a compact grid encoding\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n\n self.observation_space.spaces[\"image\"] = spaces.Box(\n low=0,\n high=255,\n shape=(self.env.width, self.env.height, 3), # number of cells\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n full_grid = env.grid.encode()\n full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([\n OBJECT_TO_IDX['agent'],\n COLOR_TO_IDX['red'],\n env.agent_dir\n ])\n\n return {\n 'mission': obs['mission'],\n 'image': full_grid\n }\n\nclass FlatObsWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Encode mission strings using a one-hot scheme,\n and combine these with observed images into one flat array\n \"\"\"\n\n def __init__(self, env, maxStrLen=96):\n super().__init__(env)\n\n self.maxStrLen = maxStrLen\n self.numCharCodes = 27\n\n imgSpace = env.observation_space.spaces['image']\n imgSize = reduce(operator.mul, imgSpace.shape, 1)\n\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(1, imgSize + self.numCharCodes * self.maxStrLen),\n dtype='uint8'\n )\n\n self.cachedStr = None\n self.cachedArray = None\n\n def observation(self, obs):\n image = obs['image']\n mission = obs['mission']\n\n # Cache the last-encoded mission string\n if mission != self.cachedStr:\n assert len(mission) <= self.maxStrLen, 'mission string too long ({} chars)'.format(len(mission))\n mission = mission.lower()\n\n strArray = np.zeros(shape=(self.maxStrLen, self.numCharCodes), dtype='float32')\n\n for idx, ch in enumerate(mission):\n if ch >= 'a' and ch <= 'z':\n chNo = ord(ch) - ord('a')\n elif ch == ' ':\n chNo = ord('z') - ord('a') + 1\n assert chNo < self.numCharCodes, '%s : %d' % (ch, chNo)\n strArray[idx, chNo] = 1\n\n self.cachedStr = mission\n self.cachedArray = strArray\n\n obs = np.concatenate((image.flatten(), self.cachedArray.flatten()))\n\n return obs\n\nclass ViewSizeWrapper(gym.core.Wrapper):\n \"\"\"\n Wrapper to customize the agent field of view size.\n This cannot be used with fully observable wrappers.\n \"\"\"\n\n def __init__(self, env, agent_view_size=7):\n super().__init__(env)\n\n # Override default view size\n env.unwrapped.agent_view_size = agent_view_size\n\n # Compute observation space with specified view size\n observation_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(agent_view_size, agent_view_size, 3),\n dtype='uint8'\n )\n\n # Override the environment's observation space\n self.observation_space = spaces.Dict({\n 'image': observation_space\n })\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n return self.env.step(action)\n\n\nclass FrameStackerWrapper(gym.core.ObservationWrapper):\n def __init__(self, env, n_stack=4, create_dim=False):\n \"\"\"\n Stack last frames to give the agent a bit a memory\n\n old observation shape is : (width, height, channel)\n new observation is :\n (N_STACK * width, height, channel) if create_dim = False\n (N_STACK, width, height, channel) if create_dim = True\n\n\n \"\"\"\n super().__init__(env=env)\n\n # Modify observation space to contains stacking, pre-pending a dimension\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n low, high, new_shape = int(obs_space[\"image\"].low.min()), int(obs_space[\"image\"].high.max()), obs_space[\"image\"].shape\n\n if create_dim:\n self.stack_function = np.stack\n new_shape = (n_stack, *new_shape)\n else:\n new_shape = list(new_shape)\n new_shape[2] *= n_stack\n self.stack_function = lambda x : np.concatenate(x, axis=2)\n\n obs_space[\"image\"] = spaces.Box(low=low, high=high, shape=new_shape)\n\n self.observation_space = spaces.Dict(obs_space)\n self.last_frames = []\n self.n_stack = n_stack\n\n def reset(self):\n obs = self.env.reset()\n last_frame = obs[\"image\"]\n obs[\"last_state\"] = last_frame\n self.last_frames = [last_frame]*self.n_stack\n obs[\"image\"] = self.stack_function(self.last_frames)\n return obs\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n obs[\"last_state\"] = obs[\"image\"]\n self.last_frames.append(obs[\"image\"])\n self.last_frames.pop(0)\n obs[\"image\"] = self.stack_function(self.last_frames)\n\n return obs, reward, done, info\n\n\nclass RemoveUselessActionWrapper(gym.core.Wrapper):\n def __init__(self, env, n_action=4):\n super().__init__(env)\n self.action_space = spaces.Discrete(4)\n def step(self, act):\n assert self.action_space.contains(act), \"Action not in action space\"\n return self.env.step(act)\n\nclass RemoveUselessChannelWrapper(gym.core.ObservationWrapper):\n\n def __init__(self, env):\n super().__init__(env)\n # Modify observation space to contains the correct number of channel\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n n_channel = obs_space[\"image\"].shape[-1]\n old_shape = obs_space[\"image\"].shape[:-1]\n\n # Change the last dimension, whatever the number of dimension\n obs_space[\"image\"] = spaces.Box(0, 255, (*old_shape,n_channel-1))\n self.observation_space = spaces.Dict(obs_space)\n\n def observation(self, obs):\n # Reduce complexity by removing open/close, not useful in many env\n obs[\"image\"] = obs[\"image\"][:,:,:-1] # Remove last element in state\n return obs\n\nclass Word2IndexWrapper(gym.core.ObservationWrapper):\n\n def __init__(self, env, vocab_file_str=\"\"):\n \"\"\"\n Load vocabulary from file, path in str format is given as input to TextWrapper\n \"\"\"\n\n super().__init__(env)\n\n # Word => Index\n vocab_dict = json.load(open(vocab_file_str, 'r'))\n self.w2i = vocab_dict[\"vocabulary\"]\n\n self.max_vocabulary = len(self.w2i)\n self.sentence_max_length = vocab_dict[\"sentence_max_length\"]\n\n # Vocab should respect those conventions, to avoid moving variables around or using global var\n assert self.w2i[\"<BEG>\"] == 0\n assert self.w2i[\"<END>\"] == 1\n assert self.w2i[\"<PAD>\"] == 2\n\n # Index => Word\n self.i2w = list(range(len(self.w2i)))\n for word, index in self.w2i.items():\n self.i2w[index] = word\n\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n obs_space[\"mission\"] = spaces.Box(0, self.max_vocabulary, (self.sentence_max_length,))\n self.observation_space = spaces.Dict(obs_space)\n\n #Mission related\n self.mission = None # List of indexes, each index represent a word, present in self.w2i (vocabulary)\n self.raw_mission = None # Str representation of the mission, easier for visualisation\n\n def reset(self):\n obs = self.env.reset()\n\n self.raw_mission = obs[\"mission\"]\n self.current_mission = [self.w2i[\"<BEG>\"]] + [self.w2i[word] for word in obs[\"mission\"].split(\" \")]\n self.current_mission += [self.w2i[\"<END>\"]]\n self.len_mission = len(self.current_mission)\n\n # No Padding, done in model\n # mission_len = len(self.current_mission)\n # n_padding = self.sentence_max_length - mission_len\n # if n_padding > 0:\n # self.current_mission.extend([0]*n_padding)\n\n obs[\"mission\"] = self.current_mission\n obs[\"raw_mission\"] = self.raw_mission\n obs[\"mission_length\"] = self.len_mission\n\n return obs\n\n def step(self, action):\n\n obs, reward, done, info = self.env.step(action)\n obs[\"mission\"] = self.current_mission\n obs[\"raw_mission\"] = self.raw_mission\n obs[\"mission_length\"] = self.len_mission\n\n if \"hindsight_mission\" in obs:\n obs[\"hindsight_raw_mission\"] = obs[\"hindsight_mission\"]\n obs[\"hindsight_mission\"] = [self.w2i[word] for word in obs[\"hindsight_mission\"].split(\" \")]\n\n return obs, reward, done, info\n\n\nclass CartPoleWrapper(gym.core.ObservationWrapper):\n def __init__(self, env):\n super().__init__(env)\n\n obs_space = dict()\n obs_space[\"image\"] = gym.spaces.Box(0,1,(4,))\n obs_space[\"mission\"] = gym.spaces.Box(0,1,(2,))\n self.observation_space = spaces.Dict(obs_space)\n\n def observation(self, observation):\n obs = dict()\n obs[\"image\"] = observation\n obs[\"mission\"] = [1,1]\n obs[\"mission_length\"] = [2]\n return obs\n\nclass StateNormWrapper(gym.core.ObservationWrapper):\n def __init__(self, env):\n super().__init__(env)\n\n # Modify observation space to contains the correct number of channel\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n old_shape = obs_space[\"image\"].shape\n # Change the last dimension, whatever the number of dimension\n obs_space[\"image\"] = spaces.Box(0, 1, shape=old_shape)\n self.observation_space = spaces.Dict(obs_space)\n\n def observation(self, observation):\n observation[\"image\"] = observation[\"image\"] / 255\n return observation\n\n\nclass TorchWrapper(gym.core.ObservationWrapper):\n def __init__(self, env, device='cpu'):\n super().__init__(env)\n self.device = device\n if isinstance(env.observation_space, gym.spaces.Dict) :\n self.observation = self._dict_space_to_torch\n elif isinstance(env.observation_space, gym.spaces.Box):\n self.observation = self._box_space_to_torch\n else:\n raise NotImplementedError(\"Can only deal with box and dict\")\n\n def _dict_space_to_torch(self, obs):\n for key in obs.keys():\n obs[key] = torch.tensor(obs[key]).float().unsqueeze(0).to(self.device)\n return obs\n\n def _box_space_to_torch(self, obs):\n return torch.tensor(obs).unsqueeze(0).to(self.device)\n\n\nclass VisionObjectiveWrapper(gym.core.ObservationWrapper):\n\n def __init__(self, env, with_distractor=True):\n super().__init__(env)\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n # The objective mission is being added\n h, w, c = obs_space[\"image\"].shape\n obs_space[\"image\"] = spaces.Box(0, 255, shape=(h,w, 2*c-1))\n self.observation_space = gym.spaces.Dict(obs_space)\n\n # Do you want to remove distractor object (otherobject present)\n self.with_distractor = with_distractor\n\n def observation(self, obs):\n if self.with_distractor:\n state_obj = obs[\"final_state\"]\n else:\n state_obj = obs[\"final_state_wo_distractor\"]\n assert obs[\"image\"].shape == state_obj.shape\n state_obj = state_obj[:,:,:-1]\n\n obs[\"image\"] = np.concatenate((state_obj, obs[\"image\"]), axis=2)\n return obs\n\nclass LastActionWrapper(gym.core.Wrapper):\n def __init__(self, env):\n super().__init__(env)\n\n def reset(self):\n obs = self.env.reset()\n obs[\"last_action\"] = self.action_space.n # Padding action\n return obs\n\n def step(self, action):\n obs, reward, is_done, info = self.env.step(action)\n obs[\"last_action\"] = action\n return obs, reward, is_done, info\n\nclass MinigridTorchWrapper(gym.core.ObservationWrapper):\n def __init__(self, env, device='cpu'):\n super().__init__(env)\n self.device = device\n\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n h, w, c = obs_space[\"image\"].shape[-3:]\n im_space_shape = list(obs_space[\"image\"].shape)\n im_space_shape[-3:] = c, h, w\n obs_space[\"image\"] = spaces.Box(0, 255, shape=im_space_shape)\n\n self.observation_space = gym.spaces.Dict(obs_space)\n\n def observation(self, obs):\n obs[\"image\"] = torch.Tensor(obs[\"image\"])\n if obs[\"image\"].dim() == 4:\n obs[\"image\"] = obs[\"image\"].permute(0,3,1,2)\n else:\n assert obs[\"image\"].dim() == 3, \"Image should be of dim 4 (stacked) or 3 (single image)\"\n obs[\"image\"] = obs[\"image\"].permute(2, 0, 1)\n\n obs[\"image\"] = obs[\"image\"].unsqueeze(0).to(self.device)\n obs[\"mission\"] = torch.LongTensor(obs[\"mission\"]).unsqueeze(0).to(self.device)\n obs[\"mission_length\"] = torch.LongTensor([obs[\"mission_length\"]]).to(self.device)\n obs[\"last_action\"] = torch.LongTensor([obs[\"last_action\"]]).to(self.device) if \"last_action\" in obs else 0\n return obs\n\ndef wrap_env_from_list(env, wrappers_json):\n\n str2wrap = {\n \"lastactionwrapper\" : LastActionWrapper,\n \"directionwrapper\" : DirectionWrapper,\n \"visionobjectivewrapper\" : VisionObjectiveWrapper,\n \"framestackerwrapper\" : FrameStackerWrapper,\n \"word2indexwrapper\": Word2IndexWrapper,\n \"removeuselessactionwrapper\" : RemoveUselessActionWrapper,\n \"removeuselesschannelwrapper\" : RemoveUselessChannelWrapper,\n \"minigridtorchwrapper\": MinigridTorchWrapper,\n \"vizdoom2minigrid\" : Vizdoom2Minigrid,\n \"normalizewrapper\" : NormalizeWrapper,\n \"statenormwrapper\" : StateNormWrapper\n }\n\n for wrap_dict in wrappers_json:\n current_wrap = str2wrap[wrap_dict[\"name\"].lower()]\n env = current_wrap(env, **wrap_dict[\"params\"])\n\n return env\n\nclass DirectionWrapper(gym.core.ObservationWrapper):\n \"\"\"\n Add direction as 2 features map in the image space\n if image is (7,7,3), new state will be (7,7,5)\n \"\"\"\n def __init__(self, env):\n super().__init__(env)\n\n obs_space = {}\n for key in env.observation_space.spaces.keys():\n obs_space[key] = env.observation_space.spaces[key]\n\n new_shape = list(obs_space[\"image\"].shape)\n #add 2 channels\n new_shape[2] += 2\n obs_space[\"image\"] = spaces.Box(0, 255, shape=new_shape)\n\n def observation(self, obs):\n assert 'direction' in obs, \"Direction not present in observation\"\n\n h,w = obs[\"image\"].shape[:2]\n direction = obs[\"direction\"]\n\n direction_channels = np.zeros((h, w, 2))\n\n if direction == 0: # East\n direction_channels[:,:,0] = 1\n elif direction == 1: # South\n direction_channels[:,:,1] = 1\n elif direction == 2: # West\n direction_channels[:,:,0] = -1\n elif direction == 3: # North\n direction_channels[:,:,1] = -1\n\n obs[\"image\"] = np.concatenate((direction, obs[\"image\"]), axis=2)\n return obs\n\n\nclass FullyObsWrapperAndState(gym.core.ObservationWrapper):\n \"\"\"\n Fully observable gridworld using a compact grid encoding\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n\n self.observation_space.spaces[\"image\"] = spaces.Box(\n low=0,\n high=255,\n shape=(self.env.width, self.env.height, 3), # number of cells\n dtype='uint8'\n )\n\n def observation(self, obs):\n env = self.unwrapped\n full_grid = env.grid.encode()\n full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([\n OBJECT_TO_IDX['agent'],\n COLOR_TO_IDX['red'],\n SHADE_TO_IDX['very_light'],\n SIZE_TO_IDX['tiny'],\n env.agent_dir\n ])\n\n return {\n 'mission': obs['mission'],\n 'image_full': full_grid,\n 'image': env.gen_obs()['image']\n }\n\n\nclass Vizdoom2Minigrid(gym.core.Wrapper):\n def __init__(self, env):\n super().__init__(env)\n\n # Vocab should respect those conventions, to avoid moving variables around or using global var\n assert self.env.word_to_idx[\"<BEG>\"] == 0\n assert self.env.word_to_idx[\"<END>\"] == 1\n assert self.env.word_to_idx[\"<PAD>\"] == 2\n\n def pad_mission(self, wordidx):\n mission_length = len(wordidx)\n assert mission_length <= self.env.unwrapped.mission_max_length\n return wordidx + [self.env.word_to_idx[\"<PAD>\"]] * (self.env.unwrapped.mission_max_length - mission_length)\n\n def reset(self):\n (image, instruction, hindsight_mission, correct_obj_name), reward, is_done, info = self.env.reset()\n wordidx = [self.env.word_to_idx[\"<BEG>\"]]\n wordidx += [self.env.word_to_idx[word] for word in instruction.split()]\n wordidx += [self.env.word_to_idx[\"<END>\"]]\n\n self.mission_raw = instruction\n self.mission_length = len(wordidx)\n\n self.mission = self.pad_mission(wordidx)\n\n return {\n 'mission_raw': self.mission_raw,\n 'mission': self.mission,\n 'image': image,\n \"mission_length\": self.mission_length,\n \"hindsight_mission\" : None,\n \"correct_obj_name\" : correct_obj_name\n }\n\n def step(self, action):\n (image, instruction, hindsight_mission, correct_obj_name), reward, done, info = self.env.step(action)\n\n if hindsight_mission:\n hindsight_mission = [self.env.word_to_idx['<BEG>']] + [self.env.word_to_idx[word]\n for word in hindsight_mission.split()]\n\n hindsight_mission.append(self.env.word_to_idx['<END>'])\n\n hindsight_mission = self.pad_mission(hindsight_mission)\n\n return {\n 'mission_raw': self.mission_raw,\n 'mission': self.mission,\n 'image': image,\n \"mission_length\": self.mission_length,\n \"hindsight_mission\" : hindsight_mission,\n \"correct_obj_name\": correct_obj_name\n }, reward, done, info\n\n\nclass NormalizeWrapper(gym.core.ObservationWrapper):\n def __init__(self, env, device):\n # Todo : do a running average instead of computing it in advance\n\n super().__init__(env)\n input_space = self.observation_space[\"image\"].shape\n\n self.mean = torch.FloatTensor([53.17008 , 54.069958, 41.62655 ]).unsqueeze(1).unsqueeze(1).expand(*input_space).to(device)\n self.std = torch.FloatTensor([31.937586, 32.52923 , 24.772118]).unsqueeze(1).unsqueeze(1).expand(*input_space).to(device)\n\n def observation(self, observation):\n observation[\"image\"] = (observation[\"image\"] - self.mean) / self.std\n return observation\n\n\nif __name__ == \"__main__\":\n\n from gym_minigrid.envs.fetch_attr import FetchAttrEnv\n missons_file_str = \"envs/missions/fetch_train_missions_10_percent.json\"\n\n env = FetchAttrEnv(size=8,\n numObjs=10,\n missions_file_str=missons_file_str)\n\n env = LessActionAndObsWrapper(Word2IndexWrapper(env=env, vocab_file_str=\"envs/missions/vocab_fetch.json\"))\n print(env.observation_space)\n\n"
] |
[
[
"torch.LongTensor",
"torch.Tensor",
"torch.tensor",
"numpy.concatenate",
"torch.FloatTensor",
"numpy.array",
"numpy.zeros"
]
] |
vsahil/influence-duplicate
|
[
"ae5bc77be6dcb7d69054a520733c373d833552da"
] |
[
"benchmarks/german/facet_plots.py"
] |
[
"\n\nfrom plotnine import *\n# from plotnine import ggplot, geom_point, geom_line, aes, stat_smooth, facet_wrap\n# from plotnine.data import mtcars\n# from plotnine import positions\n# from plotnine import xlab, ylab, ggtitle, geom_boxplot, geom_path, geom_ribbon, geom_arrow\nimport pandas as pd\n# df = pd.read_csv(\"all_german_discm_data.csv\")\ndf = pd.read_csv(\"german_credit_mega_results0.csv\")\n# import ipdb; ipdb.set_trace()\nx = (ggplot(aes(x='Removed-points', y='Discm-percent', color='Data-Split'), data=df) +\\\n geom_point(size=1.5) +\\\n # stat_smooth(colour='blue', span=0.2) +\\\n # stat_summary() +\\\n # xlim(0, 85) +\\\n facet_wrap(['H1Units','H2Units','Batch'], nrow=3, ncol=4,scales = 'fixed', labeller='label_both', shrink=False) + \\\n xlab(\"Biased Points Removed\") + \\\n ylab(\"Percentage Discrimination remaining\") + \\\n ggtitle(\"Facet plot for remaining discrimination for each setting (German Credit)\") +\\\n theme(axis_text_x = element_text(size=6), dpi=50) +\\\n theme_seaborn()\n )\n\n# x.save(\"summary.png\", height=12, width=12)\nx.save(\"points_redone_upto700.png\", height=12, width=12)\n\n# (ggplot(mtcars, aes('wt', 'mpg', color='factor(gear)'))\n# + geom_point()\n# + stat_smooth(method='lm')\n# + facet_wrap('~gear'))\n\n# (ggplot(ggplot.save()))\n\n\n# import pandas as pd\n# import numpy as np\n# from pandas.api.types import CategoricalDtype\n# from plotnine import *\n# from plotnine.data import mpg\n# %matplotlib inline\n\n# x = (ggplot(mpg) # defining what data to use\n# + aes(x='class') # defining what variable to use\n# + geom_bar(size=20) # defining the type of plot to use\n# )\n"
] |
[
[
"pandas.read_csv"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.