repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
matteopilotto/CogView
[ "51203abf9c74415f3d212e31c1e8558313ba35e9" ]
[ "preprocess/utils.py" ]
[ "# -*- encoding: utf-8 -*-\n'''\n@File : utils.py\n@Time : 2021/01/24 16:35:43\n@Author : Ming Ding \n@Contact : [email protected]\n'''\n\n# here put the import lib\nimport os\nimport sys\nimport math\nimport random\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom vqvae import code2img, img2code\nfrom torchvision.utils import save_image\n\n\ndef show_recover_results(model, imgs):\n codes = img2code(model, imgs)\n recovered = code2img(model, codes)\n mean = torch.tensor([0.79093, 0.76271, 0.75340], device=recovered.device).view(-1, 1, 1)\n std = torch.tensor([0.30379, 0.32279, 0.32800], device=recovered.device).view(-1, 1, 1)\n recovered = (recovered * std + mean).clamp(0, 1)\n imgs = (imgs * std + mean).clamp(0, 1)\n out = torch.cat([imgs, recovered], dim=0)\n save_image(out, 'samples/show_recover_results.jpg', normalize=False, nrow=len(imgs))\n" ]
[ [ "torch.cat", "torch.tensor" ] ]
LordLean/Checkers-Game
[ "641215782cb0de875a818bf62e9da2122c300963" ]
[ "graphs/bigO.py" ]
[ "#!pip install matplotlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Branching factor.\nB = 2.8\n# Search depth of d plies. d = 5.\nx = np.linspace(1,5,500)\n# Minimax Big O.\ny_minimax = np.power(B, x)\n# Alpha beta Big O.\ny_a_b = np.sqrt(y_minimax)\n# Plot:\nplt.plot(x, y_minimax, label=\"Minimax\")\nplt.plot(x, y_a_b, label=\"Alpha-Beta\")\nplt.title(\"Big O Complexity\")\nplt.xlabel(\"Plies - Game Tree Depth level\")\nplt.ylabel(\"Operations\")\nplt.legend()\nplt.savefig(\"TimeComplexityComparison.png\")\nplt.show()" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.power", "numpy.sqrt", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.linspace" ] ]
steppenwolf0/primerDesign
[ "1d1a7ea07ece17cca6f5a24133dd5389acc9ae3d" ]
[ "code/outputVal.py" ]
[ "# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nimport math\nimport random\nimport sys\nfrom IPython.display import display, Image\nfrom scipy import ndimage\nfrom six.moves.urllib.request import urlretrieve\nfrom six.moves import cPickle as pickle\nfrom funcCNN import *\n\ndef ouputVal(oneHot_test_labels,results,filter_width,kfoldIndex,limit,var,test_accuracy,valid_accuracy,iterCurve,validCurve,trainingCurve):\n\tfpr=[]\n\ttpr=[]\n\ttresholds=[]\n\tfrom sklearn.metrics import roc_curve, auc\n\tfpr, tpr, tresholds = roc_curve(oneHot_test_labels[:, 1], results[:, 1])\n\troc_auc = auc(fpr, tpr)\n\n\timport matplotlib as mpl\n\tmpl.use('Agg')\n\timport matplotlib.pyplot as plt\n\tplt.title('Receiver Operating Characteristic')\n\tplt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n\tplt.legend(loc = 'lower right')\n\tplt.plot([0, 1], [0, 1],'r--')\n\tplt.xlim([0, 1])\n\tplt.ylim([0, 1])\n\tplt.ylabel('True Positive Rate')\n\tplt.xlabel('False Positive Rate')\n\tplt.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'ROC.png')\n\tplt.clf()\n\n\timport matplotlib as mpl2\n\tmpl2.use('Agg')\n\timport matplotlib.pyplot as plt2\n\tplt2.title('Accuracy')\n\tplt2.plot(iterCurve, trainingCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt2.xlabel('Iterations')\n\tplt2.ylabel('Training Accuracy')\n\tplt2.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'Training_Accuracy.png')\n\tplt2.clf()\n\n\timport matplotlib as mpl3\n\tmpl3.use('Agg')\n\timport matplotlib.pyplot as plt3\n\tplt3.title('Accuracy')\n\tplt3.plot(iterCurve, validCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt3.xlabel('Iterations')\n\tplt3.ylabel('Valid Accuracy')\n\tplt3.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'Valid_Accuracy.png')\n\tplt3.clf()\n\n\tsaveMatrix('results_'+str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'%1.4f_'%(roc_auc)+'%1.4f_'%(valid_accuracy)+'%1.4f'%(test_accuracy),results)\n\t\ndef ouputValA(oneHot_test_labels,results,filter_width,kfoldIndex,limit,var,test_accuracy,valid_accuracy,iterCurve,validCurve,trainingCurve,cross_entropyCurve):\n\tfpr=[]\n\ttpr=[]\n\ttresholds=[]\n\tfrom sklearn.metrics import roc_curve, auc\n\tfpr, tpr, tresholds = roc_curve(oneHot_test_labels[:, 1], results[:, 1])\n\troc_auc = auc(fpr, tpr)\n\n\timport matplotlib as mpl\n\tmpl.use('Agg')\n\timport matplotlib.pyplot as plt\n\tplt.title('Receiver Operating Characteristic')\n\tplt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n\tplt.legend(loc = 'lower right')\n\tplt.plot([0, 1], [0, 1],'r--')\n\tplt.xlim([0, 1])\n\tplt.ylim([0, 1])\n\tplt.ylabel('True Positive Rate')\n\tplt.xlabel('False Positive Rate')\n\tplt.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'ROC.png')\n\tplt.clf()\n\n\timport matplotlib as mpl2\n\tmpl2.use('Agg')\n\timport matplotlib.pyplot as plt2\n\tplt2.title('Accuracy')\n\tplt2.plot(iterCurve, trainingCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt2.xlabel('Iterations')\n\tplt2.ylabel('Training Accuracy')\n\tplt2.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'Training_Accuracy.png')\n\tplt2.clf()\n\n\n\n\timport matplotlib as mpl3\n\tmpl3.use('Agg')\n\timport matplotlib.pyplot as plt3\n\tplt3.title('Accuracy')\n\tplt3.plot(iterCurve, validCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt3.xlabel('Iterations')\n\tplt3.ylabel('Valid Accuracy')\n\tplt3.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'Valid_Accuracy.png')\n\tplt3.clf()\n\n\tsaveMatrix('results_'+str(filter_width)+'_'+str(kfoldIndex)+'_'+str(var)\n\t+'_'+'%1.2f_'%(limit)+'%1.4f_'%(roc_auc)+'%1.4f_'%(valid_accuracy)+'%1.4f'%(test_accuracy),results)\n\ndef ouputValIter(oneHot_test_labels,results,filter_width,kfoldIndex,\n\t\t\t\t limit,iterMax,test_accuracy,valid_accuracy,iterCurve,\n\t\t\t\t validCurve,trainingCurve,typeProgram):\n\tfpr=[]\n\ttpr=[]\n\ttresholds=[]\n\tfrom sklearn.metrics import roc_curve, auc\n\tfpr, tpr, tresholds = roc_curve(oneHot_test_labels[:, 1], results[:, 1])\n\troc_auc = auc(fpr, tpr)\n\n\timport matplotlib as mpl\n\tmpl.use('Agg')\n\timport matplotlib.pyplot as plt\n\tplt.title('Receiver Operating Characteristic')\n\tplt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n\tplt.legend(loc = 'lower right')\n\tplt.plot([0, 1], [0, 1],'r--')\n\tplt.xlim([0, 1])\n\tplt.ylim([0, 1])\n\tplt.ylabel('True Positive Rate')\n\tplt.xlabel('False Positive Rate')\n\tplt.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'ROC.png')\n\tplt.clf()\n\n\timport matplotlib as mpl2\n\tmpl2.use('Agg')\n\timport matplotlib.pyplot as plt2\n\tplt2.title('Accuracy')\n\tplt2.plot(iterCurve, trainingCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt2.xlabel('Iterations')\n\tplt2.ylabel('Training Accuracy')\n\tplt2.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'Training_Accuracy.png')\n\tplt2.clf()\n\n\timport matplotlib as mpl3\n\tmpl3.use('Agg')\n\timport matplotlib.pyplot as plt3\n\tplt3.title('Accuracy')\n\tplt3.plot(iterCurve, validCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt3.xlabel('Iterations')\n\tplt3.ylabel('Valid Accuracy')\n\tplt3.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'Valid_Accuracy.png')\n\tplt3.clf()\n\n\tsaveMatrix('results_'+str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'%1.4f_'%(roc_auc)+'%1.4f_'%(valid_accuracy)+'%1.4f'%(test_accuracy),results)\n\t\ndef ouputValIterA(oneHot_test_labels,results,filter_width,kfoldIndex,\n\t\t\t\t limit,iterMax,test_accuracy,valid_accuracy,iterCurve,\n\t\t\t\t validCurve,trainingCurve,typeProgram,cross_entropyCurve):\n\tfpr=[]\n\ttpr=[]\n\ttresholds=[]\n\tfrom sklearn.metrics import roc_curve, auc\n\tfpr, tpr, tresholds = roc_curve(oneHot_test_labels[:, 1], results[:, 1])\n\troc_auc = auc(fpr, tpr)\n\n\timport matplotlib as mpl\n\tmpl.use('Agg')\n\timport matplotlib.pyplot as plt\n\tplt.title('Receiver Operating Characteristic')\n\tplt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n\tplt.legend(loc = 'lower right')\n\tplt.plot([0, 1], [0, 1],'r--')\n\tplt.xlim([0, 1])\n\tplt.ylim([0, 1])\n\tplt.ylabel('True Positive Rate')\n\tplt.xlabel('False Positive Rate')\n\tplt.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'ROC.png')\n\tplt.clf()\n\n\timport matplotlib as mpl2\n\tmpl2.use('Agg')\n\timport matplotlib.pyplot as plt2\n\tplt2.title('Accuracy')\n\tplt2.plot(iterCurve, trainingCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt2.xlabel('Iterations')\n\tplt2.ylabel('Training Accuracy')\n\tplt2.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'Training_Accuracy.png')\n\tplt2.clf()\n\n\timport matplotlib as mpl3\n\tmpl3.use('Agg')\n\timport matplotlib.pyplot as plt3\n\tplt3.title('Accuracy')\n\tplt3.plot(iterCurve, validCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt3.xlabel('Iterations')\n\tplt3.ylabel('Valid Accuracy')\n\tplt3.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'Valid_Accuracy.png')\n\tplt3.clf()\n\t\n\timport matplotlib as mpl4\n\tmpl4.use('Agg')\n\timport matplotlib.pyplot as plt4\n\tplt4.title('Accuracy')\n\tplt4.plot(iterCurve, cross_entropyCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt4.xlabel('Iterations')\n\tplt4.ylabel('cross_entropy')\n\tplt4.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'cross_entropy.png')\n\tplt4.clf()\n\n\tsaveMatrix('results_'+str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'%1.4f_'%(roc_auc)+'%1.4f_'%(valid_accuracy)+'%1.4f'%(test_accuracy),results)\n\t\ndef ouputValIterA(oneHot_test_labels,results,filter_width,kfoldIndex,\n\t\t\t\t limit,iterMax,test_accuracy,valid_accuracy,iterCurve,\n\t\t\t\t validCurve,trainingCurve,typeProgram,cross_entropyCurve,cross_entropyCurveT):\n\tfpr=[]\n\ttpr=[]\n\ttresholds=[]\n\tfrom sklearn.metrics import roc_curve, auc\n\tfpr, tpr, tresholds = roc_curve(oneHot_test_labels[:, 1], results[:, 1])\n\troc_auc = auc(fpr, tpr)\n\n\timport matplotlib as mpl\n\tmpl.use('Agg')\n\timport matplotlib.pyplot as plt\n\tplt.title('Receiver Operating Characteristic')\n\tplt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n\tplt.legend(loc = 'lower right')\n\tplt.plot([0, 1], [0, 1],'r--')\n\tplt.xlim([0, 1])\n\tplt.ylim([0, 1])\n\tplt.ylabel('True Positive Rate')\n\tplt.xlabel('False Positive Rate')\n\tplt.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'ROC.png')\n\tplt.clf()\n\n\timport matplotlib as mpl2\n\tmpl2.use('Agg')\n\timport matplotlib.pyplot as plt2\n\tplt2.title('Accuracy Training')\n\tplt2.plot(iterCurve, trainingCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt2.xlabel('Iterations')\n\tplt2.ylabel('Training Accuracy')\n\tplt2.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'Training_Accuracy.png')\n\tplt2.clf()\n\n\timport matplotlib as mpl3\n\tmpl3.use('Agg')\n\timport matplotlib.pyplot as plt3\n\tplt3.title('Accuracy Validation')\n\tplt3.plot(iterCurve, validCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt3.xlabel('Iterations')\n\tplt3.ylabel('Valid Accuracy')\n\tplt3.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'Valid_Accuracy.png')\n\tplt3.clf()\n\t\n\timport matplotlib as mpl4\n\tmpl4.use('Agg')\n\timport matplotlib.pyplot as plt4\n\tplt4.title('cross_entropy Validation')\n\tplt4.plot(iterCurve, cross_entropyCurve, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt4.xlabel('Iterations')\n\tplt4.ylabel('cross_entropy')\n\tplt4.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'cross_entropy.png')\n\tplt4.clf()\n\n\timport matplotlib as mpl5\n\tmpl5.use('Agg')\n\timport matplotlib.pyplot as plt5\n\tplt5.title('cross_entropy Training')\n\tplt5.plot(iterCurve, cross_entropyCurveT, 'r')\n\t#plt.plot([0,1],[0,1],'r--')\n\t#plt.xlim([-0.1,1.2])\n\t#plt.ylim([-0.1,1.2])\n\tplt5.xlabel('Iterations')\n\tplt5.ylabel('cross_entropyT')\n\tplt5.savefig(str(filter_width)+'_'+str(kfoldIndex)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'cross_entropyT.png')\n\tplt5.clf()\n\n\tsaveMatrix('results_'+str(filter_width)+'_'+str(kfoldIndex)+'_'+str(iterMax)+'_'+typeProgram\n\t+'_'+'%1.2f_'%(limit)+'%1.4f_'%(roc_auc)+'%1.4f_'%(valid_accuracy)+'%1.4f'%(test_accuracy),results)" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "sklearn.metrics.auc", "sklearn.metrics.roc_curve" ] ]
njanakiev/geospatial-storytelling
[ "8485d13b49a4dbfae41cf0bacf862520ad0a5906" ]
[ "src/point_heatmap.py" ]
[ "import os\nfrom mpl_toolkits.basemap.pyproj import Proj\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\n\n\n\n# Project data to New York Long Island SRS (Spatial Reference System)\n# http://www.spatialreference.org/ref/epsg/2263/\np = Proj(init=\"epsg:2263\")\n\n# Load GPS data from url or file\n#f = urllib2.urlopen(\"https://feeds.citibikenyc.com/stations/stations.json\")\n#jsonData = json.load(f)\nwith open('../data/citibike.json', 'r') as f:\n\tjsonData = json.load(f)\n\nX = []\nfor i in range(len(jsonData['stationBeanList'])):\n\tlon = jsonData['stationBeanList'][i]['longitude']\n\tlat = jsonData['stationBeanList'][i]['latitude']\n\tX.append((lat, lon))\nX = np.array(X)\nx, y = p(X[:, 1], X[:, 0]) # Projection of coordinates\n\n\n\n# Get underlying nyc mapping data\nnyc_neighborhoods = \"../nyc_data/nyc_neighborhoods.json\"\npolygons = []\nwith open(nyc_neighborhoods, 'r') as f:\n\tjsonData = json.load(f)\n\tfor feature in jsonData['features']:\n\t\tfor polygon in feature['geometry']['coordinates']:\n\t\t\tpolygon = np.array(polygon).squeeze()\n\t\t\tpolygon[:,0], polygon[:,1] = p(polygon[:,0], polygon[:,1]) # Projection of coordinates\n\t\t\tpolygons.append(polygon)\t\t\t\n\n\nheatmap, xedges, yedges = np.histogram2d(y, x, bins=20)\nextent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]\n\n\n\n# Draw visualization\nfig, ax = plt.subplots()\nfig.set_size_inches(20, 20)\nfor polygon in polygons:\n\tax.plot(polygon[:,0], polygon[:,1], 'k', alpha=0.3)\n\nax.imshow(heatmap, extent=extent, interpolation='none', cmap=plt.get_cmap('Greys'))\nax.scatter(x, y, marker=\".\", edgecolors='none', s=5, c='k', alpha=0.9)\n\nplt.axis('equal')\nplt.axis('off')\nplt.xlim([extent[0], extent[1]])\nplt.ylim([extent[3], extent[2]])\nplt.savefig(\"../visualization/heatmap_visualization.png\", bbox_inches='tight', pad_inches=0)" ]
[ [ "numpy.histogram2d", "numpy.array", "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylim", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.subplots", "matplotlib.pyplot.axis" ] ]
bupticybee/icyChessZero
[ "1be5cfb3ab7dfad11925ae469b86c990efab9d16" ]
[ "cchess/board.py" ]
[ "# -*- coding: utf-8 -*-\n\n'''\nCopyright (C) 2014 walker li <[email protected]>\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n'''\n\nimport sys\nimport copy\nimport numpy as np\n\nfrom cchess.exception import *\nfrom cchess.piece import *\nfrom cchess.move import *\n\n#-----------------------------------------------------#\nFULL_INIT_FEN = 'rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR w - - 0 1'\n\n#-----------------------------------------------------#\n_text_board = [\n#u' 1 2 3 4 5 6 7 8 9',\nu'0 โ”Œโ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”€โ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”',\nu' โ”‚ โ”‚ โ”‚ โ”‚๏ผผโ”‚๏ผโ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'1 โ”œโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ€ปโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚๏ผโ”‚๏ผผโ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'2 โ”œโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'3 โ”œโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'4 โ”œโ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”ค',\nu' โ”‚ใ€€ ใ€€ โ”‚',\nu'5 โ”œโ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”ฌโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'6 โ”œโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'7 โ”œโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚๏ผผโ”‚๏ผโ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'8 โ”œโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ€ปโ”€โ”ผโ”€โ”ผโ”€โ”ผโ”€โ”ค',\nu' โ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚๏ผโ”‚๏ผผโ”‚ใ€€โ”‚ใ€€โ”‚ใ€€โ”‚',\nu'9 โ””โ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”€โ”€โ”ดโ”€โ”ดโ”€โ”ดโ”€โ”˜',\nu' 0 1 2 3 4 5 6 7 8'\n#u' ไน ๅ…ซ ไธƒ ๅ…ญ ไบ” ๅ›› ไธ‰ ไบŒ ไธ€'\n]\n\n_fench_txt_name_dict = {\n 'K': u\"ๅธ…\",\n 'k': u\"ๅฐ†\",\n 'A': u\"ไป•\",\n 'a': u\"ๅฃซ\",\n 'B': u\"็›ธ\", \n 'b': u\"่ฑก\",\n 'N': u\"้ฉฌ\",\n 'n': u\"็ขผ\",\n 'R': u\"่ฝฆ\",\n 'r': u\"็ —\",\n 'C': u\"็‚ฎ\", \n 'c': u\"็ ฒ\",\n 'P': u\"ๅ…ต\", \n 'p': u\"ๅ’\" \n\n }\n#-----------------------------------------------------#\n\ndef _pos_to_text_board_pos(pos):\n return Pos(2*pos.x+2, (9 - pos.y)*2) \n\ndef _fench_to_txt_name(fench) :\n return _fench_txt_name_dict[fench]\n \n#-----------------------------------------------------#\nclass BaseChessBoard(object) :\n def __init__(self, fen = None):\n self.clear()\n if fen: self.from_fen(fen)\n \n def clear(self): \n self._board = [[None for x in range(9)] for y in range(10)]\n self.move_side = ChessSide.RED \n \n def copy(self):\n return copy.deepcopy(self)\n \n def put_fench(self, fench, pos):\n if self._board[pos.y][pos.x] != None:\n return False \n \n self._board[pos.y][pos.x] = fench\n \n return True\n \n def get_fench(self, pos):\n return self._board[pos.y][pos.x]\n \n def get_piece(self, pos): \n fench = self._board[pos.y][pos.x]\n \n if not fench:\n return None\n \n return Piece.create(self, fench, pos)\n \n def is_valid_move_t(self, move_t):\n pos_from, pos_to = move_t\n return self.is_valid_move(pos_from, pos_to)\n \n def is_valid_move(self, pos_from, pos_to):\n \n '''\n ๅช่ฟ›่กŒๆœ€ๅŸบๆœฌ็š„่ตฐๅญ่ง„ๅˆ™ๆฃ€ๆŸฅ๏ผŒไธๅฏนๆฏไธชๅญ็š„่ง„ๅˆ™่ฟ›่กŒๆฃ€ๆŸฅ๏ผŒไปฅๅŠ ๅฟซๆ–‡ไปถๅŠ ่ฝฝไน‹็ฑป็š„้€Ÿๅบฆ\n '''\n \n if not (0 <= pos_to.x <= 8): return False\n if not (0 <= pos_to.y <= 9): return False\n \n fench_from = self._board[pos_from.y][pos_from.x]\n if not fench_from :\n return False \n \n _, from_side = fench_to_species(fench_from)\n \n #move_side ไธๆ˜ฏNoneๅ€ผๆ‰ไผš่ฟ›่กŒ่ตฐๅญ้ขœ่‰ฒๆฃ€ๆŸฅ๏ผŒ่ฟ™ๆ ทๅค„็†ๆŸไบ›็‰นๆฎŠ็š„ๅญ˜ๅ‚จๆ ผๅผๆ—ถไผšๅค„็†ๆฏ”่พƒ่ฟ…้€Ÿ\n if self.move_side and (from_side != self.move_side) :\n return False \n \n fench_to = self._board[pos_to.y][pos_to.x]\n if not fench_to :\n return True \n \n _, to_side = fench_to_species(fench_to)\n \n return (from_side != to_side) \n \n def _move_piece(self, pos_from, pos_to):\n \n fench = self._board[pos_from.y][pos_from.x]\n self._board[pos_to.y][pos_to.x] = fench\n self._board[pos_from.y][pos_from.x] = None\n \n return fench\n \n def move(self, pos_from, pos_to):\n pos_from.y = 9 - pos_from.y\n pos_to.y = 9 - pos_to.y\n if not self.is_valid_move(pos_from, pos_to):\n return None \n \n board = self.copy()\n fench = self.get_fench(pos_to)\n self._move_piece(pos_from, pos_to)\n \n return Move(board, pos_from, pos_to)\n \n def move_iccs(self,move_str):\n move_from, move_to = Move.from_iccs(move_str)\n return move(move_from, move_to)\n \n def move_chinese(self,move_str):\n move_from, move_to = Move.from_chinese(self, move_str)\n return move(move_from, move_to)\n \n def next_turn(self) :\n if self.move_side == None :\n return None\n \n self.move_side = ChessSide.next_side(self.move_side)\n \n return self.move_side\n \n def from_fen(self, fen):\n \n num_set = set(('1', '2', '3', '4', '5', '6', '7', '8', '9'))\n ch_set = set(('k','a','b','n','r','c','p'))\n \n self.clear()\n \n if not fen or fen == '':\n return\n \n fen = fen.strip()\n \n x = 0\n y = 9\n\n for i in range(0, len(fen)):\n ch = fen[i]\n \n if ch == ' ': break\n elif ch == '/':\n y -= 1\n x = 0\n if y < 0: break\n elif ch in num_set:\n x += int(ch)\n if x > 8: x = 8\n elif ch.lower() in ch_set:\n if x <= 8:\n self.put_fench(ch, Pos(x, y)) \n x += 1\n else:\n return False\n \n fens = fen.split() \n \n self.move_side = None\n if (len(fens) >= 2) and (fens[1] == 'b') :\n self.move_side = ChessSide.BLACK\n else:\n self.move_side = ChessSide.RED \n \n if len(fens) >= 6 :\n self.round = int(fens[5])\n else:\n self.round = 1 \n \n return True \n \n def count_x_line_in(self, y, x_from, x_to):\n return reduce(lambda count, fench: count+1 if fench else count, self.x_line_in(y, x_from, x_to), 0)\n \n def count_y_line_in(self, x, y_from, y_to):\n return reduce(lambda count, fench: count+1 if fench else count, self.y_line_in(x, y_from, y_to), 0)\n \n def x_line_in(self, y, x_from, x_to):\n step = 1 if x_to > x_from else -1\n return [ self._board[y][x] for x in range(x_from+step, x_to, step) ]\n \n def y_line_in(self, x, y_from, y_to):\n step = 1 if y_to > y_from else -1\n return [ self._board[y][x] for y in range(y_from+step, y_to, step) ]\n \n def to_fen(self):\n return self.to_short_fen() + ' - - 0 1'\n \n def to_short_fen(self):\n fen = ''\n count = 0\n for y in range(9, -1, -1):\n for x in range(9):\n fench = self._board[y][x]\n if fench: \n if count is not 0:\n fen += str(count)\n count = 0\n fen += fench\n else:\n count += 1\n \n if count > 0:\n fen += str(count)\n count = 0\n \n if y > 0: fen += '/'\n \n if self.move_side is ChessSide.BLACK:\n fen += ' b'\n elif self.move_side is ChessSide.RED :\n fen += ' w'\n else :\n raise CChessException('Move Side Error' + str(self.move_side))\n \n return fen\n\n def dump_board(self):\n \n board_str = _text_board[:]\n \n y = 0\n for line in self._board:\n x = 0\n for ch in line:\n if ch : \n pos = _pos_to_text_board_pos(Pos(x,y))\n new_text=board_str[pos.y][:pos.x] + _fench_to_txt_name(ch) + board_str[pos.y][pos.x+1:]\n board_str[pos.y] = new_text\n x += 1 \n y += 1\n \n return board_str\n \n def print_board(self):\n \n board_txt = self.dump_board()\n print()\n for line in board_txt:\n print(line)\n print()\n \n def get_board_arr(self):\n return np.asarray(self._board[::-1])\n \n#-----------------------------------------------------#\n\nclass ChessBoard(BaseChessBoard):\n def __init__(self, fen = None): \n super(ChessBoard, self).__init__(fen)\n \n def put_fench(self, fench, pos):\n self._board[pos.y][pos.x] = fench\n \n def is_valid_move(self, pos_from, pos_to):\n if not super(ChessBoard, self).is_valid_move(pos_from, pos_to):\n return False\n \n piece = self.get_piece(pos_from)\n if not piece.is_valid_move(pos_to):\n return False\n return True\n \n def is_checked_move(self, pos_from, pos_to):\n board = self.copy()\n board._move_piece(pos_from, pos_to)\n return board.is_checked()\n \n def is_checked(self):\n king = self.get_king(self.move_side)\n king.create_moves()\n if not king : return 0\n killers = self.get_side_pieces(ChessSide.next_side(self.move_side))\n '''\n for piece in killers:\n if piece.is_valid_move(Pos(king.x, king.y)):\n #print piece.x, piece.y, piece.fench\n return 1\n return 0\n ''' \n return reduce(lambda count, piece : count+1 if piece.is_valid_move(Pos(king.x, king.y)) else count, killers, 0) \n \n def is_checkmate(self):\n defenders = self.get_side_pieces(self.move_side)\n for piece in defenders :\n for move_it in piece.create_moves():\n if self.is_valid_move_t(move_it):\n if not self.is_checked_move(move_it[0], move_it[1]):\n return False\n return True\n \n def get_king(self, side):\n limit_y = ((0,1,2), (7,8,9))\n for x in (3,4,5):\n for y in limit_y[side]:\n fench = self._board[y][x]\n if not fench :\n continue\n if fench.lower() == 'k':\n return Piece.create(self, fench, Pos(x,y))\n return None\n \n def get_side_pieces(self, side):\n pieces = [] \n for x in range(9):\n for y in range(10):\n fench = self._board[y][x]\n if not fench :\n continue\n _, p_side = fench_to_species(fench)\n if p_side == side : \n pieces.append(Piece.create(self, fench, Pos(x,y)))\n return pieces\n \n \n#-----------------------------------------------------#\nif __name__ == '__main__':\n pass\n# \n# board = ChessBoard(FULL_INIT_FEN)\n# board.print_board()\n# \n# k = board.get_king(ChessSide.RED)\n# print (k.x, k.y) == (4,0)\n# k = board.get_king(ChessSide.BLACK)\n# print (k.x, k.y) == (4,9)\n# \n# print board.x_line_in(0, 0, 8)\n# print board.x_line_in(0, 8, 0)\n# #print board.x_line_in(9, 0, 10)\n# print board.y_line_in(4, -1, 10)\n# print board.y_line_in(4, 10, -1)\n# \n# print board.count_x_line_in(0, 0, 8) == 7\n# print board.count_y_line_in(4,0,9) == 2\n# print board.count_y_line_in(4,1,8) == 2\n# \n# print board.is_checked()\n# \n# print board.copy().move(Pos(7,2),Pos(4,2)).to_chinese() == u'็‚ฎไบŒๅนณไบ”'\n# print board.copy().move(Pos(1,2),Pos(1,1)).to_chinese() == u'็‚ฎๅ…ซ้€€ไธ€'\n# print board.copy().move(Pos(7,2),Pos(7,6)).to_chinese() == u'็‚ฎไบŒ่ฟ›ๅ››'\n# print board.copy().move(Pos(7,7),Pos(4,7)).to_chinese() == u'็‚ฎ๏ผ˜ๅนณ๏ผ•'\n# print board.copy().move(Pos(7,7),Pos(7,3)).to_chinese() == u'็‚ฎ๏ผ˜่ฟ›๏ผ”'\n# print board.copy().move(Pos(6,3),Pos(6,4)).to_chinese() == u'ๅ…ตไธ‰่ฟ›ไธ€'\n# print board.copy().move(Pos(8,0),Pos(8,1)).to_chinese() == u'่ฝฆไธ€่ฟ›ไธ€'\n# print board.copy().move(Pos(0,9),Pos(0,8)).to_chinese() == u'่ฝฆ๏ผ‘่ฟ›๏ผ‘'\n# print board.copy().move(Pos(4,0),Pos(4,1)).to_chinese() == u'ๅธ…ไบ”่ฟ›ไธ€'\n# print board.copy().move(Pos(4,9),Pos(4,8)).to_chinese() == u'ๅฐ†๏ผ•่ฟ›๏ผ‘'\n# print board.copy().move(Pos(2,0),Pos(4,2)).to_chinese() == u'็›ธไธƒ่ฟ›ไบ”'\n# print board.copy().move(Pos(5,0),Pos(4,1)).to_chinese() == u'ไป•ๅ››่ฟ›ไบ”'\n# print board.copy().move(Pos(7,0),Pos(6,2)).to_chinese() == u'้ฉฌไบŒ่ฟ›ไธ‰'\n# " ]
[ [ "numpy.asarray" ] ]
data-hound/scikeras
[ "d216d083679d5113b85a4e338d4346b9403e3a71" ]
[ "tests/multi_output_models.py" ]
[ "from typing import List\n\nimport numpy as np\n\nfrom sklearn.utils.multiclass import type_of_target\nfrom tensorflow.keras.backend import floatx as tf_floatx\n\nfrom scikeras.utils.transformers import ClassifierLabelEncoder\nfrom scikeras.wrappers import KerasClassifier\n\n\nclass MultiLabelTransformer(ClassifierLabelEncoder):\n def __init__(\n self, split: bool = True,\n ):\n super().__init__()\n self.split = split\n\n def fit(self, y: np.ndarray) -> \"MultiLabelTransformer\":\n self._target_type = type_of_target(y)\n if self._target_type not in (\"multilabel-indicator\", \"multiclass-multioutput\"):\n return super().fit(y)\n # y = array([1, 1, 1, 0], [0, 0, 1, 1])\n # each col will be processed as multiple binary classifications\n self.n_outputs_ = y.shape[1]\n self.n_outputs_expected_ = None if not self.split else self.n_outputs_\n self._y_dtype = y.dtype\n self.classes_ = [np.array([0, 1])] * y.shape[1]\n self.n_classes_ = [2] * y.shape[1]\n return self\n\n def transform(self, y: np.ndarray) -> List[np.ndarray]:\n if self._target_type not in (\"multilabel-indicator\", \"multiclass-multioutput\"):\n return super().transform(y)\n y = y.astype(tf_floatx())\n if self.split:\n return np.split(y, y.shape[1], axis=1)\n return y\n\n def inverse_transform(\n self, y: List[np.ndarray], return_proba: bool = False\n ) -> np.ndarray:\n if self._target_type not in (\"multilabel-indicator\", \"multiclass-multioutput\"):\n return super().inverse_transform(y, return_proba=return_proba)\n if not return_proba and self.split:\n y = [np.argmax(y_, axis=1).astype(self._y_dtype, copy=False) for y_ in y]\n y = np.squeeze(np.column_stack(y))\n if self._target_type == \"multilabel-indicator\":\n # RandomForestClassifier and sklearn's MultiOutputClassifier always return int64\n # for multilabel-indicator\n y = y.astype(int)\n return y\n\n\nclass MultiOutputClassifier(KerasClassifier):\n \"\"\"Extend KerasClassifier with the ability to process\n \"multilabel-indicator\" by mapping to multiple Keras outputs.\n \"\"\"\n\n def __init__(self, model=None, split: bool = True, **kwargs):\n super().__init__(model=model, **kwargs)\n self.split = split\n\n @property\n def target_encoder(self) -> MultiLabelTransformer:\n return MultiLabelTransformer(split=self.split)\n\n def score(self, X, y):\n \"\"\"Taken from sklearn.multiouput.MultiOutputClassifier\n \"\"\"\n if self.target_type_ != \"multilabel-indicator\":\n return super().score(X, y)\n return np.mean(np.all(y == self.predict(X), axis=1))\n" ]
[ [ "numpy.array", "numpy.split", "sklearn.utils.multiclass.type_of_target", "tensorflow.keras.backend.floatx", "numpy.argmax", "numpy.column_stack" ] ]
ssteo/streamlit
[ "fde1b548e4bf2d2e5a97b5c3fcf655d43134b342" ]
[ "lib/streamlit/elements/image.py" ]
[ "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Image marshalling.\"\"\"\n\nimport imghdr\nimport io\nimport mimetypes\nfrom typing import cast\nfrom urllib.parse import urlparse\n\nimport numpy as np\nfrom PIL import Image, ImageFile\n\nimport streamlit\nfrom streamlit import config\nfrom streamlit.errors import StreamlitAPIException, StreamlitDeprecationWarning\nfrom streamlit.logger import get_logger\nfrom streamlit.media_file_manager import media_file_manager\nfrom streamlit.proto.Image_pb2 import ImageList as ImageListProto\n\nLOGGER = get_logger(__name__)\n\n# This constant is related to the frontend maximum content width specified\n# in App.jsx main container\n# 730 is the max width of element-container in the frontend, and 2x is for high\n# DPI.\nMAXIMUM_CONTENT_WIDTH = 2 * 730\n\n\nclass ImageMixin:\n def image(\n self,\n image,\n caption=None,\n width=None,\n use_column_width=None,\n clamp=False,\n channels=\"RGB\",\n output_format=\"auto\",\n ):\n \"\"\"Display an image or list of images.\n\n Parameters\n ----------\n image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str]\n Monochrome image of shape (w,h) or (w,h,1)\n OR a color image of shape (w,h,3)\n OR an RGBA image of shape (w,h,4)\n OR a URL to fetch the image from\n OR a path of a local image file\n OR an SVG XML string like `<svg xmlns=...</svg>`\n OR a list of one of the above, to display multiple images.\n caption : str or list of str\n Image caption. If displaying multiple images, caption should be a\n list of captions (one for each image).\n width : int or None\n Image width. None means use the image width,\n but do not exceed the width of the column.\n Should be set for SVG images, as they have no default image width.\n use_column_width : 'auto' or 'always' or 'never' or bool\n If 'auto', set the image's width to its natural size,\n but do not exceed the width of the column.\n If 'always' or True, set the image's width to the column width.\n If 'never' or False, set the image's width to its natural size.\n Note: if set, `use_column_width` takes precedence over the `width` parameter.\n clamp : bool\n Clamp image pixel values to a valid range ([0-255] per channel).\n This is only meaningful for byte array images; the parameter is\n ignored for image URLs. If this is not set, and an image has an\n out-of-range value, an error will be thrown.\n channels : 'RGB' or 'BGR'\n If image is an nd.array, this parameter denotes the format used to\n represent color information. Defaults to 'RGB', meaning\n `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and\n `image[:, :, 2]` is blue. For images coming from libraries like\n OpenCV you should set this to 'BGR', instead.\n output_format : 'JPEG', 'PNG', or 'auto'\n This parameter specifies the format to use when transferring the\n image data. Photos should use the JPEG format for lossy compression\n while diagrams should use the PNG format for lossless compression.\n Defaults to 'auto' which identifies the compression type based\n on the type and format of the image argument.\n\n Example\n -------\n >>> from PIL import Image\n >>> image = Image.open('sunrise.jpg')\n >>>\n >>> st.image(image, caption='Sunrise by the mountains')\n\n .. output::\n https://static.streamlit.io/0.61.0-yRE1/index.html?id=Sn228UQxBfKoE5C7A7Y2Qk\n height: 630px\n\n \"\"\"\n\n if use_column_width == \"auto\" or (use_column_width is None and width is None):\n width = -3\n elif use_column_width == \"always\" or use_column_width == True:\n width = -2\n elif width is None:\n width = -1\n elif width <= 0:\n raise StreamlitAPIException(\"Image width must be positive.\")\n\n image_list_proto = ImageListProto()\n marshall_images(\n self.dg._get_delta_path_str(),\n image,\n caption,\n width,\n image_list_proto,\n clamp,\n channels,\n output_format,\n )\n return self.dg._enqueue(\"imgs\", image_list_proto)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n\n\ndef _image_may_have_alpha_channel(image):\n if image.mode in (\"RGBA\", \"LA\", \"P\"):\n return True\n else:\n return False\n\n\ndef _format_from_image_type(image, output_format):\n output_format = output_format.upper()\n if output_format == \"JPEG\" or output_format == \"PNG\":\n return output_format\n\n # We are forgiving on the spelling of JPEG\n if output_format == \"JPG\":\n return \"JPEG\"\n\n if _image_may_have_alpha_channel(image):\n return \"PNG\"\n\n return \"JPEG\"\n\n\ndef _PIL_to_bytes(image, format=\"JPEG\", quality=100):\n tmp = io.BytesIO()\n\n # User must have specified JPEG, so we must convert it\n if format == \"JPEG\" and _image_may_have_alpha_channel(image):\n image = image.convert(\"RGB\")\n\n image.save(tmp, format=format, quality=quality)\n\n return tmp.getvalue()\n\n\ndef _BytesIO_to_bytes(data):\n data.seek(0)\n return data.getvalue()\n\n\ndef _np_array_to_bytes(array, output_format=\"JPEG\"):\n img = Image.fromarray(array.astype(np.uint8))\n format = _format_from_image_type(img, output_format)\n\n return _PIL_to_bytes(img, format)\n\n\ndef _4d_to_list_3d(array):\n return [array[i, :, :, :] for i in range(0, array.shape[0])]\n\n\ndef _verify_np_shape(array):\n if len(array.shape) not in (2, 3):\n raise StreamlitAPIException(\"Numpy shape has to be of length 2 or 3.\")\n if len(array.shape) == 3 and array.shape[-1] not in (1, 3, 4):\n raise StreamlitAPIException(\n \"Channel can only be 1, 3, or 4 got %d. Shape is %s\"\n % (array.shape[-1], str(array.shape))\n )\n\n # If there's only one channel, convert is to x, y\n if len(array.shape) == 3 and array.shape[-1] == 1:\n array = array[:, :, 0]\n\n return array\n\n\ndef _normalize_to_bytes(data, width, output_format):\n image = Image.open(io.BytesIO(data))\n actual_width, actual_height = image.size\n format = _format_from_image_type(image, output_format)\n if output_format.lower() == \"auto\":\n ext = imghdr.what(None, data)\n mimetype = mimetypes.guess_type(\"image.%s\" % ext)[0]\n else:\n mimetype = \"image/\" + format.lower()\n\n if width < 0 and actual_width > MAXIMUM_CONTENT_WIDTH:\n width = MAXIMUM_CONTENT_WIDTH\n\n if width > 0 and actual_width > width:\n new_height = int(1.0 * actual_height * width / actual_width)\n image = image.resize((width, new_height))\n data = _PIL_to_bytes(image, format=format, quality=90)\n mimetype = \"image/\" + format.lower()\n\n return data, mimetype\n\n\ndef _clip_image(image, clamp):\n data = image\n if issubclass(image.dtype.type, np.floating):\n if clamp:\n data = np.clip(image, 0, 1.0)\n else:\n if np.amin(image) < 0.0 or np.amax(image) > 1.0:\n raise RuntimeError(\"Data is outside [0.0, 1.0] and clamp is not set.\")\n data = data * 255\n else:\n if clamp:\n data = np.clip(image, 0, 255)\n else:\n if np.amin(image) < 0 or np.amax(image) > 255:\n raise RuntimeError(\"Data is outside [0, 255] and clamp is not set.\")\n return data\n\n\ndef image_to_url(\n image, width, clamp, channels, output_format, image_id, allow_emoji=False\n):\n # PIL Images\n if isinstance(image, ImageFile.ImageFile) or isinstance(image, Image.Image):\n format = _format_from_image_type(image, output_format)\n data = _PIL_to_bytes(image, format)\n\n # BytesIO\n # Note: This doesn't support SVG. We could convert to png (cairosvg.svg2png)\n # or just decode BytesIO to string and handle that way.\n elif isinstance(image, io.BytesIO):\n data = _BytesIO_to_bytes(image)\n\n # Numpy Arrays (ie opencv)\n elif type(image) is np.ndarray:\n data = _verify_np_shape(image)\n data = _clip_image(data, clamp)\n\n if channels == \"BGR\":\n if len(data.shape) == 3:\n data = data[:, :, [2, 1, 0]]\n else:\n raise StreamlitAPIException(\n 'When using `channels=\"BGR\"`, the input image should '\n \"have exactly 3 color channels\"\n )\n\n data = _np_array_to_bytes(data, output_format=output_format)\n\n # Strings\n elif isinstance(image, str):\n # If it's a url, then set the protobuf and continue\n try:\n p = urlparse(image)\n if p.scheme:\n return image\n except UnicodeDecodeError:\n pass\n\n # Finally, see if it's a file.\n try:\n with open(image, \"rb\") as f:\n data = f.read()\n except:\n if allow_emoji:\n # This might be an emoji string, so just pass it to the frontend\n return image\n else:\n # Allow OS filesystem errors to raise\n raise\n\n # Assume input in bytes.\n else:\n data = image\n\n (data, mimetype) = _normalize_to_bytes(data, width, output_format)\n this_file = media_file_manager.add(data, mimetype, image_id)\n return this_file.url\n\n\ndef marshall_images(\n coordinates,\n image,\n caption,\n width,\n proto_imgs,\n clamp,\n channels=\"RGB\",\n output_format=\"auto\",\n):\n channels = channels.upper()\n\n # Turn single image and caption into one element list.\n if type(image) is list:\n images = image\n else:\n if type(image) == np.ndarray and len(image.shape) == 4:\n images = _4d_to_list_3d(image)\n else:\n images = [image]\n\n if type(caption) is list:\n captions = caption\n else:\n if isinstance(caption, str):\n captions = [caption]\n # You can pass in a 1-D Numpy array as captions.\n elif type(caption) == np.ndarray and len(caption.shape) == 1:\n captions = caption.tolist()\n # If there are no captions then make the captions list the same size\n # as the images list.\n elif caption is None:\n captions = [None] * len(images)\n else:\n captions = [str(caption)]\n\n assert type(captions) == list, \"If image is a list then caption should be as well\"\n assert len(captions) == len(images), \"Cannot pair %d captions with %d images.\" % (\n len(captions),\n len(images),\n )\n\n proto_imgs.width = width\n # Each image in an image list needs to be kept track of at its own coordinates.\n for coord_suffix, (image, caption) in enumerate(zip(images, captions)):\n proto_img = proto_imgs.imgs.add()\n if caption is not None:\n proto_img.caption = str(caption)\n\n # We use the index of the image in the input image list to identify this image inside\n # MediaFileManager. For this, we just add the index to the image's \"coordinates\".\n image_id = \"%s-%i\" % (coordinates, coord_suffix)\n\n is_svg = False\n if isinstance(image, str):\n # Unpack local SVG image file to an SVG string\n if image.endswith(\".svg\"):\n with open(image) as textfile:\n image = textfile.read()\n if image.strip().startswith(\"<svg\"):\n proto_img.markup = f\"data:image/svg+xml,{image}\"\n is_svg = True\n if not is_svg:\n proto_img.url = image_to_url(\n image, width, clamp, channels, output_format, image_id\n )\n" ]
[ [ "numpy.amin", "numpy.amax", "numpy.clip" ] ]
vangorade/pandas
[ "2b4bcf25f7bc61fe35387654a81908222e1db3da" ]
[ "pandas/core/frame.py" ]
[ "\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import annotations\n\nimport collections\nfrom collections import abc\nimport datetime\nfrom io import StringIO\nimport itertools\nimport mmap\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n AnyStr,\n Dict,\n FrozenSet,\n Hashable,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import algos as libalgos, lib, properties\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n ArrayLike,\n Axes,\n Axis,\n ColspaceArgType,\n CompressionOptions,\n Dtype,\n FilePathOrBuffer,\n FloatFormatType,\n FormattersType,\n FrameOrSeriesUnion,\n IndexKeyFunc,\n IndexLabel,\n Label,\n Level,\n PythonFuncType,\n Renamer,\n StorageOptions,\n Suffixes,\n ValueKeyFunc,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n deprecate_kwarg,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_axis_style_args,\n validate_bool_kwarg,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.cast import (\n construct_1d_arraylike_from_scalar,\n construct_2d_arraylike_from_scalar,\n find_common_type,\n infer_dtype_from_scalar,\n invalidate_string_dtypes,\n maybe_box_datetimelike,\n maybe_convert_platform,\n maybe_downcast_to_dtype,\n maybe_infer_to_datetimelike,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n infer_dtype_from_object,\n is_bool_dtype,\n is_dataclass,\n is_datetime64_any_dtype,\n is_dict_like,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_integer_dtype,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_sequence,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.missing import isna, notna\n\nfrom pandas.core import algorithms, common as com, generic, nanops, ops\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.aggregation import (\n aggregate,\n reconstruct_func,\n relabel_result,\n transform,\n)\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.arrays.sparse import SparseFrameAccessor\nfrom pandas.core.construction import extract_array, sanitize_masked_array\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.indexes import base as ibase\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n PeriodIndex,\n ensure_index,\n ensure_index_from_sequences,\n)\nfrom pandas.core.indexes.multi import MultiIndex, maybe_droplevels\nfrom pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.internals.construction import (\n arrays_to_mgr,\n dataclasses_to_dicts,\n init_dict,\n init_ndarray,\n masked_rec_array_to_mgr,\n nested_data_to_arrays,\n reorder_arrays,\n sanitize_index,\n to_arrays,\n treat_as_nested,\n)\nfrom pandas.core.reshape.melt import melt\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index, lexsort_indexer, nargsort\n\nfrom pandas.io.common import get_handle\nfrom pandas.io.formats import console, format as fmt\nfrom pandas.io.formats.info import BaseInfo, DataFrameInfo\nimport pandas.plotting\n\nif TYPE_CHECKING:\n from typing import Literal\n\n from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes\n\n from pandas.core.groupby.generic import DataFrameGroupBy\n from pandas.core.resample import Resampler\n\n from pandas.io.formats.style import Styler\n\n# ---------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = {\n \"axes\": \"index, columns\",\n \"klass\": \"DataFrame\",\n \"axes_single_arg\": \"{0 or 'index', 1 or 'columns'}\",\n \"axis\": \"\"\"axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index': apply function to each column.\n If 1 or 'columns': apply function to each row.\"\"\",\n \"inplace\": \"\"\"\n inplace : boolean, default False\n If True, performs operation inplace and returns None.\"\"\",\n \"optional_by\": \"\"\"\n by : str or list of str\n Name or list of names to sort by.\n\n - if `axis` is 0 or `'index'` then `by` may contain index\n levels and/or column labels.\n - if `axis` is 1 or `'columns'` then `by` may contain column\n levels and/or index labels.\"\"\",\n \"optional_labels\": \"\"\"labels : array-like, optional\n New labels / index to conform the axis specified by 'axis' to.\"\"\",\n \"optional_axis\": \"\"\"axis : int or str, optional\n Axis to target. Can be either the axis name ('index', 'columns')\n or number (0, 1).\"\"\",\n \"replace_iloc\": \"\"\"\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\"\"\",\n}\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame or named Series objects with a database-style join.\n\nThe join is done on columns or indexes. If joining columns on\ncolumns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes\non indexes or indexes on a column or columns, the index will be passed on.\nWhen performing a cross merge, no column specifications to merge on are\nallowed.\n\nParameters\n----------%s\nright : DataFrame or named Series\n Object to merge with.\nhow : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'\n Type of merge to be performed.\n\n * left: use only keys from left frame, similar to a SQL left outer join;\n preserve key order.\n * right: use only keys from right frame, similar to a SQL right outer join;\n preserve key order.\n * outer: use union of keys from both frames, similar to a SQL full outer\n join; sort keys lexicographically.\n * inner: use intersection of keys from both frames, similar to a SQL inner\n join; preserve the order of the left keys.\n * cross: creates the cartesian product from both frames, preserves the order\n of the left keys.\n\n .. versionadded:: 1.2.0\n\non : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If `on` is None and not merging on indexes then this defaults\n to the intersection of the columns in both DataFrames.\nleft_on : label or list, or array-like\n Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\nright_on : label or list, or array-like\n Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\nleft_index : bool, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels.\nright_index : bool, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\nsort : bool, default False\n Sort the join keys lexicographically in the result DataFrame. If False,\n the order of the join keys depends on the join type (how keyword).\nsuffixes : list-like, default is (\"_x\", \"_y\")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\ncopy : bool, default True\n If False, avoid copy if possible.\nindicator : bool or str, default False\n If True, adds a column to the output DataFrame called \"_merge\" with\n information on the source of each row. The column can be given a different\n name by providing a string argument. The column will have a Categorical\n type with the value of \"left_only\" for observations whose merge key only\n appears in the left DataFrame, \"right_only\" for observations\n whose merge key only appears in the right DataFrame, and \"both\"\n if the observation's merge key is found in both DataFrames.\n\nvalidate : str, optional\n If specified, checks if merge is of specified type.\n\n * \"one_to_one\" or \"1:1\": check if merge keys are unique in both\n left and right datasets.\n * \"one_to_many\" or \"1:m\": check if merge keys are unique in left\n dataset.\n * \"many_to_one\" or \"m:1\": check if merge keys are unique in right\n dataset.\n * \"many_to_many\" or \"m:m\": allowed, but does not result in checks.\n\nReturns\n-------\nDataFrame\n A DataFrame of the two merged objects.\n\nSee Also\n--------\nmerge_ordered : Merge with optional filling/interpolation.\nmerge_asof : Merge on nearest keys.\nDataFrame.join : Similar method using indices.\n\nNotes\n-----\nSupport for specifying index levels as the `on`, `left_on`, and\n`right_on` parameters was added in version 0.23.0\nSupport for merging named Series objects was added in version 0.24.0\n\nExamples\n--------\n>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [1, 2, 3, 5]})\n>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [5, 6, 7, 8]})\n>>> df1\n lkey value\n0 foo 1\n1 bar 2\n2 baz 3\n3 foo 5\n>>> df2\n rkey value\n0 foo 5\n1 bar 6\n2 baz 7\n3 foo 8\n\nMerge df1 and df2 on the lkey and rkey columns. The value columns have\nthe default suffixes, _x and _y, appended.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2 with specified left and right suffixes\nappended to any overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey',\n... suffixes=('_left', '_right'))\n lkey value_left rkey value_right\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2, but raise an exception if the DataFrames have\nany overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))\nTraceback (most recent call last):\n...\nValueError: columns overlap but no suffix specified:\n Index(['value'], dtype='object')\n\n>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})\n>>> df1\n a b\n0 foo 1\n1 bar 2\n>>> df2\n a c\n0 foo 3\n1 baz 4\n\n>>> df1.merge(df2, how='inner', on='a')\n a b c\n0 foo 1 3\n\n>>> df1.merge(df2, how='left', on='a')\n a b c\n0 foo 1 3.0\n1 bar 2 NaN\n\n>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})\n>>> df2 = pd.DataFrame({'right': [7, 8]})\n>>> df1\n left\n0 foo\n1 bar\n>>> df2\n right\n0 7\n1 8\n\n>>> df1.merge(df2, how='cross')\n left right\n0 foo 7\n1 foo 8\n2 bar 7\n3 bar 8\n\"\"\"\n\n\n# -----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame, OpsMixin):\n \"\"\"\n Two-dimensional, size-mutable, potentially heterogeneous tabular data.\n\n Data structure also contains labeled axes (rows and columns).\n Arithmetic operations align on both row and column labels. Can be\n thought of as a dict-like container for Series objects. The primary\n pandas data structure.\n\n Parameters\n ----------\n data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame\n Dict can contain Series, arrays, constants, dataclass or list-like objects. If\n data is a dict, column order follows insertion-order.\n\n .. versionchanged:: 0.25.0\n If data is a list of dicts, column order follows insertion-order.\n\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided.\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer.\n copy : bool, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input.\n\n See Also\n --------\n DataFrame.from_records : Constructor from tuples, also record arrays.\n DataFrame.from_dict : From dicts of Series, arrays, or dicts.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n read_table : Read general delimited file into DataFrame.\n read_clipboard : Read text from clipboard into DataFrame.\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = pd.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n ... columns=['a', 'b', 'c'])\n >>> df2\n a b c\n 0 1 2 3\n 1 4 5 6\n 2 7 8 9\n\n Constructing DataFrame from dataclass:\n\n >>> from dataclasses import make_dataclass\n >>> Point = make_dataclass(\"Point\", [(\"x\", int), (\"y\", int)])\n >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])\n x y\n 0 0 0\n 1 0 3\n 2 2 3\n \"\"\"\n\n _internal_names_set = {\"columns\", \"index\"} | NDFrame._internal_names_set\n _typ = \"dataframe\"\n _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)\n\n @property\n def _constructor(self) -> Type[DataFrame]:\n return DataFrame\n\n _constructor_sliced: Type[Series] = Series\n _hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])\n _accessors: Set[str] = {\"sparse\"}\n\n @property\n def _constructor_expanddim(self):\n # GH#31549 raising NotImplementedError on a property causes trouble\n # for `inspect`\n def constructor(*args, **kwargs):\n raise NotImplementedError(\"Not supported for DataFrames!\")\n\n return constructor\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index: Optional[Axes] = None,\n columns: Optional[Axes] = None,\n dtype: Optional[Dtype] = None,\n copy: bool = False,\n ):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._mgr\n\n if isinstance(data, BlockManager):\n if index is None and columns is None and dtype is None and copy is False:\n # GH#33357 fastpath\n NDFrame.__init__(self, data)\n return\n\n mgr = self._init_mgr(\n data, axes={\"index\": index, \"columns\": columns}, dtype=dtype, copy=copy\n )\n\n elif isinstance(data, dict):\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)\n\n # a masked array\n else:\n data = sanitize_masked_array(data)\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = {k: data[k] for k in data_columns}\n if columns is None:\n columns = data_columns\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, \"name\", None) is not None:\n mgr = init_dict({data.name: data}, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n # For data is list-like, or Iterable (will consume into list)\n elif is_list_like(data):\n if not isinstance(data, (abc.Sequence, ExtensionArray)):\n data = list(data)\n if len(data) > 0:\n if is_dataclass(data[0]):\n data = dataclasses_to_dicts(data)\n if treat_as_nested(data):\n arrays, columns, index = nested_data_to_arrays(\n data, columns, index, dtype\n )\n mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n else:\n mgr = init_dict({}, index, columns, dtype=dtype)\n # For data is scalar\n else:\n if index is None or columns is None:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n if not dtype:\n dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)\n\n # For data is a scalar extension dtype\n if is_extension_array_dtype(dtype):\n # TODO(EA2D): special case not needed with 2D EAs\n\n values = [\n construct_1d_arraylike_from_scalar(data, len(index), dtype)\n for _ in range(len(columns))\n ]\n mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)\n else:\n values = construct_2d_arraylike_from_scalar(\n data, len(index), len(columns), dtype, copy\n )\n\n mgr = init_ndarray(\n values, index, columns, dtype=values.dtype, copy=False\n )\n\n NDFrame.__init__(self, mgr)\n\n # ----------------------------------------------------------------------\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],\n dtype='object')]\n \"\"\"\n return [self.index, self.columns]\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n See Also\n --------\n ndarray.shape : Tuple of array dimensions.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self.index), len(self.columns)\n\n @property\n def _is_homogeneous_type(self) -> bool:\n \"\"\"\n Whether all the columns in a DataFrame have the same type.\n\n Returns\n -------\n bool\n\n See Also\n --------\n Index._is_homogeneous_type : Whether the object has a single\n dtype.\n MultiIndex._is_homogeneous_type : Whether all the levels of a\n MultiIndex have the same dtype.\n\n Examples\n --------\n >>> DataFrame({\"A\": [1, 2], \"B\": [3, 4]})._is_homogeneous_type\n True\n >>> DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.0]})._is_homogeneous_type\n False\n\n Items with the same type but different sizes are considered\n different types.\n\n >>> DataFrame({\n ... \"A\": np.array([1, 2], dtype=np.int32),\n ... \"B\": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type\n False\n \"\"\"\n if self._mgr.any_extension_types:\n return len({block.dtype for block in self._mgr.blocks}) == 1\n else:\n return not self._is_mixed_type\n\n @property\n def _can_fast_transpose(self) -> bool:\n \"\"\"\n Can we transpose this DataFrame without creating any new array objects.\n \"\"\"\n if self._mgr.any_extension_types:\n # TODO(EA2D) special case would be unnecessary with 2D EAs\n return False\n return len(self._mgr.blocks) == 1\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def _repr_fits_vertical_(self) -> bool:\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns.\n\n In case of non-interactive session, no boundaries apply.\n\n `ignore_width` is here so ipynb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n width, height = console.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if (max_columns and nb_columns > max_columns) or (\n (not ignore_width) and width and nb_columns > (width // 2)\n ):\n return False\n\n # used by repr_html under IPython notebook or scripts ignore terminal\n # dims\n if ignore_width or not console.in_interactive_session():\n return True\n\n if get_option(\"display.width\") is not None or console.in_ipython_frontend():\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actually checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[: min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max(len(line) for line in value.split(\"\\n\"))\n\n return repr_width < width\n\n def _info_repr(self) -> bool:\n \"\"\"\n True if the repr should show the info view.\n \"\"\"\n info_repr_option = get_option(\"display.large_repr\") == \"info\"\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular DataFrame.\n \"\"\"\n buf = StringIO(\"\")\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n max_colwidth = get_option(\"display.max_colwidth\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = console.get_console_size()\n else:\n width = None\n self.to_string(\n buf=buf,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n line_width=width,\n max_colwidth=max_colwidth,\n show_dimensions=show_dimensions,\n )\n\n return buf.getvalue()\n\n def _repr_html_(self) -> Optional[str]:\n \"\"\"\n Return a html representation for a particular DataFrame.\n\n Mainly for IPython notebook.\n \"\"\"\n if self._info_repr():\n buf = StringIO(\"\")\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace(\"<\", r\"&lt;\", 1)\n val = val.replace(\">\", r\"&gt;\", 1)\n return \"<pre>\" + val + \"</pre>\"\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=None,\n col_space=None,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n justify=None,\n index_names=True,\n header=True,\n index=True,\n bold_rows=True,\n escape=True,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=\".\",\n )\n return fmt.DataFrameRenderer(formatter).to_html(notebook=True)\n else:\n return None\n\n @Substitution(\n header_type=\"bool or sequence\",\n header=\"Write out the column names. If a list of strings \"\n \"is given, it is assumed to be aliases for the \"\n \"column names\",\n col_space_type=\"int, list or dict of int\",\n col_space=\"The minimum width of each column\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_string(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[int] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[fmt.FormattersType] = None,\n float_format: Optional[fmt.FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n min_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n line_width: Optional[int] = None,\n max_colwidth: Optional[int] = None,\n encoding: Optional[str] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n %(shared_params)s\n line_width : int, optional\n Width to wrap a line in characters.\n max_colwidth : int, optional\n Max width to truncate each column in characters. By default, no limit.\n\n .. versionadded:: 1.0.0\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n %(returns)s\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}\n >>> df = pd.DataFrame(d)\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n from pandas import option_context\n\n with option_context(\"display.max_colwidth\", max_colwidth):\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header,\n index=index,\n min_rows=min_rows,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=decimal,\n )\n return fmt.DataFrameRenderer(formatter).to_string(\n buf=buf,\n encoding=encoding,\n line_width=line_width,\n )\n\n # ----------------------------------------------------------------------\n\n @property\n def style(self) -> Styler:\n \"\"\"\n Returns a Styler object.\n\n Contains methods for building a styled HTML representation of the DataFrame.\n\n See Also\n --------\n io.formats.style.Styler : Helps style a DataFrame or Series according to the\n data with HTML and CSS.\n \"\"\"\n from pandas.io.formats.style import Styler\n\n return Styler(self)\n\n _shared_docs[\n \"items\"\n ] = r\"\"\"\n Iterate over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Yields\n ------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as\n (index, Series) pairs.\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples\n of the values.\n\n Examples\n --------\n >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n >>> for label, content in df.items():\n ... print(f'label: {label}')\n ... print(f'content: {content}', sep='\\n')\n ...\n label: species\n content:\n panda bear\n polar bear\n koala marsupial\n Name: species, dtype: object\n label: population\n content:\n panda 1864\n polar 22000\n koala 80000\n Name: population, dtype: int64\n \"\"\"\n\n @Appender(_shared_docs[\"items\"])\n def items(self) -> Iterable[Tuple[Label, Series]]:\n if self.columns.is_unique and hasattr(self, \"_item_cache\"):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self._ixs(i, axis=1)\n\n @Appender(_shared_docs[\"items\"])\n def iteritems(self) -> Iterable[Tuple[Label, Series]]:\n yield from self.items()\n\n def iterrows(self) -> Iterable[Tuple[Label, Series]]:\n \"\"\"\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : Series\n The data of the row as a Series.\n\n See Also\n --------\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n \"\"\"\n columns = self.columns\n klass = self._constructor_sliced\n for k, v in zip(self.index, self.values):\n s = klass(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index: bool = True, name: Optional[str] = \"Pandas\"):\n \"\"\"\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default \"Pandas\"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n >>> for row in df.itertuples():\n ... print(row)\n ...\n Pandas(Index='dog', num_legs=4, num_wings=0)\n Pandas(Index='hawk', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n Pandas(num_legs=4, num_wings=0)\n Pandas(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name='Animal'):\n ... print(row)\n ...\n Animal(Index='dog', num_legs=4, num_wings=0)\n Animal(Index='hawk', num_legs=2, num_wings=2)\n \"\"\"\n arrays = []\n fields = list(self.columns)\n if index:\n arrays.append(self.index)\n fields.insert(0, \"Index\")\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n\n if name is not None:\n # https://github.com/python/mypy/issues/9046\n # error: namedtuple() expects a string literal as the first argument\n itertuple = collections.namedtuple( # type: ignore[misc]\n name, fields, rename=True\n )\n return map(itertuple._make, zip(*arrays))\n\n # fallback to regular tuples\n return zip(*arrays)\n\n def __len__(self) -> int:\n \"\"\"\n Returns length of info axis, but here we use the index.\n \"\"\"\n return len(self.index)\n\n def dot(self, other):\n \"\"\"\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series, DataFrame or a numpy array.\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series or DataFrame\n If other is a Series, return the matrix product between self and\n other as a Series. If other is a DataFrame or a numpy.array, return\n the matrix product of self and other in a DataFrame of a np.array.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n Here we multiply a DataFrame with a Series.\n\n >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> s = pd.Series([1, 1, 2, 1])\n >>> df.dot(s)\n 0 -4\n 1 5\n dtype: int64\n\n Here we multiply a DataFrame with another DataFrame.\n\n >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(other)\n 0 1\n 0 1 4\n 1 2 2\n\n Note that the dot method give the same result as @\n\n >>> df @ other\n 0 1\n 0 1 4\n 1 2 2\n\n The dot method works also if other is an np.array.\n\n >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(arr)\n 0 1\n 0 1 4\n 1 2 2\n\n Note how shuffling of the objects does not change the result.\n\n >>> s2 = s.reindex([1, 0, 2, 3])\n >>> df.dot(s2)\n 0 -4\n 1 5\n dtype: int64\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if len(common) > len(self.columns) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right._values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, DataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=left.index, columns=other.columns\n )\n elif isinstance(other, Series):\n return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return self._constructor_sliced(result, index=left.index)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def __matmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n try:\n return self.T.dot(np.transpose(other)).T\n except ValueError as err:\n if \"shape mismatch\" not in str(err):\n raise\n # GH#21581 give exception message for original shapes\n msg = f\"shapes {np.shape(other)} and {self.shape} not aligned\"\n raise ValueError(msg) from err\n\n # ----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient=\"columns\", dtype=None, columns=None) -> DataFrame:\n \"\"\"\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Specify ``orient='index'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data, orient='index')\n 0 1 2 3\n row_1 3 2 1 0\n row_2 a b c d\n\n When using the 'index' orientation, the column names can be\n specified manually:\n\n >>> pd.DataFrame.from_dict(data, orient='index',\n ... columns=['A', 'B', 'C', 'D'])\n A B C D\n row_1 3 2 1 0\n row_2 a b c d\n \"\"\"\n index = None\n orient = orient.lower()\n if orient == \"index\":\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient == \"columns\":\n if columns is not None:\n raise ValueError(\"cannot use columns parameter with orient='columns'\")\n else: # pragma: no cover\n raise ValueError(\"only recognize index or columns for orient\")\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n def to_numpy(\n self, dtype=None, copy: bool = False, na_value=lib.no_default\n ) -> np.ndarray:\n \"\"\"\n Convert the DataFrame to a NumPy array.\n\n .. versionadded:: 0.24.0\n\n By default, the dtype of the returned array will be the common NumPy\n dtype of all types in the DataFrame. For example, if the dtypes are\n ``float16`` and ``float32``, the results dtype will be ``float32``.\n This may require copying data and coercing values, which may be\n expensive.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the dtypes of the DataFrame columns.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.to_numpy : Similar method for Series.\n\n Examples\n --------\n >>> pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to\n be used.\n\n >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.5]})\n >>> df.to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will\n have object dtype.\n\n >>> df['C'] = pd.date_range('2000', periods=2)\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],\n [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n result = self._mgr.as_array(\n transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value\n )\n if result.dtype is not dtype:\n result = np.array(result, dtype=dtype, copy=False)\n\n return result\n\n def to_dict(self, orient: str = \"dict\", into=dict):\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n See Also\n --------\n DataFrame.from_dict: Create a DataFrame from a dictionary.\n DataFrame.to_json: Convert a DataFrame to JSON format.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n >>> df.to_dict()\n {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}\n\n You can specify the return orientation.\n\n >>> df.to_dict('series')\n {'col1': row1 1\n row2 2\n Name: col1, dtype: int64,\n 'col2': row1 0.50\n row2 0.75\n Name: col2, dtype: float64}\n\n >>> df.to_dict('split')\n {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],\n 'data': [[1, 0.5], [2, 0.75]]}\n\n >>> df.to_dict('records')\n [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]\n\n >>> df.to_dict('index')\n {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),\n ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd)\n [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),\n defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\n \"DataFrame columns are not unique, some columns will be omitted.\",\n UserWarning,\n stacklevel=2,\n )\n # GH16122\n into_c = com.standardize_mapping(into)\n\n orient = orient.lower()\n # GH32515\n if orient.startswith((\"d\", \"l\", \"s\", \"r\", \"i\")) and orient not in {\n \"dict\",\n \"list\",\n \"series\",\n \"split\",\n \"records\",\n \"index\",\n }:\n warnings.warn(\n \"Using short name for 'orient' is deprecated. Only the \"\n \"options: ('dict', list, 'series', 'split', 'records', 'index') \"\n \"will be used in a future version. Use one of the above \"\n \"to silence this warning.\",\n FutureWarning,\n )\n\n if orient.startswith(\"d\"):\n orient = \"dict\"\n elif orient.startswith(\"l\"):\n orient = \"list\"\n elif orient.startswith(\"sp\"):\n orient = \"split\"\n elif orient.startswith(\"s\"):\n orient = \"series\"\n elif orient.startswith(\"r\"):\n orient = \"records\"\n elif orient.startswith(\"i\"):\n orient = \"index\"\n\n if orient == \"dict\":\n return into_c((k, v.to_dict(into)) for k, v in self.items())\n\n elif orient == \"list\":\n return into_c((k, v.tolist()) for k, v in self.items())\n\n elif orient == \"split\":\n return into_c(\n (\n (\"index\", self.index.tolist()),\n (\"columns\", self.columns.tolist()),\n (\n \"data\",\n [\n list(map(maybe_box_datetimelike, t))\n for t in self.itertuples(index=False, name=None)\n ],\n ),\n )\n )\n\n elif orient == \"series\":\n return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())\n\n elif orient == \"records\":\n columns = self.columns.tolist()\n rows = (\n dict(zip(columns, row))\n for row in self.itertuples(index=False, name=None)\n )\n return [\n into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())\n for row in rows\n ]\n\n elif orient == \"index\":\n if not self.index.is_unique:\n raise ValueError(\"DataFrame index must be unique for orient='index'.\")\n return into_c(\n (t[0], dict(zip(self.columns, t[1:])))\n for t in self.itertuples(name=None)\n )\n\n else:\n raise ValueError(f\"orient '{orient}' not understood\")\n\n def to_gbq(\n self,\n destination_table: str,\n project_id: Optional[str] = None,\n chunksize: Optional[int] = None,\n reauth: bool = False,\n if_exists: str = \"fail\",\n auth_local_webserver: bool = False,\n table_schema: Optional[List[Dict[str, str]]] = None,\n location: Optional[str] = None,\n progress_bar: bool = True,\n credentials=None,\n ) -> None:\n \"\"\"\n Write a DataFrame to a Google BigQuery table.\n\n This function requires the `pandas-gbq package\n <https://pandas-gbq.readthedocs.io>`__.\n\n See the `How to authenticate with Google BigQuery\n <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__\n guide for authentication instructions.\n\n Parameters\n ----------\n destination_table : str\n Name of table to be written, in the form ``dataset.tablename``.\n project_id : str, optional\n Google BigQuery Account project ID. Optional when available from\n the environment.\n chunksize : int, optional\n Number of rows to be inserted in each chunk from the dataframe.\n Set to ``None`` to load the whole dataframe at once.\n reauth : bool, default False\n Force Google BigQuery to re-authenticate the user. This is useful\n if multiple accounts are used.\n if_exists : str, default 'fail'\n Behavior when the destination table exists. Value can be one of:\n\n ``'fail'``\n If table exists raise pandas_gbq.gbq.TableCreationError.\n ``'replace'``\n If table exists, drop it, recreate it, and insert data.\n ``'append'``\n If table exists, insert data. Create if does not exist.\n auth_local_webserver : bool, default False\n Use the `local webserver flow`_ instead of the `console flow`_\n when getting user credentials.\n\n .. _local webserver flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server\n .. _console flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console\n\n *New in version 0.2.0 of pandas-gbq*.\n table_schema : list of dicts, optional\n List of BigQuery table fields to which according DataFrame\n columns conform to, e.g. ``[{'name': 'col1', 'type':\n 'STRING'},...]``. If schema is not provided, it will be\n generated according to dtypes of DataFrame columns. See\n BigQuery API documentation on available names of a field.\n\n *New in version 0.3.1 of pandas-gbq*.\n location : str, optional\n Location where the load job should run. See the `BigQuery locations\n documentation\n <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a\n list of available locations. The location must match that of the\n target dataset.\n\n *New in version 0.5.0 of pandas-gbq*.\n progress_bar : bool, default True\n Use the library `tqdm` to show the progress bar for the upload,\n chunk by chunk.\n\n *New in version 0.5.0 of pandas-gbq*.\n credentials : google.auth.credentials.Credentials, optional\n Credentials for accessing Google APIs. Use this parameter to\n override default credentials, such as to use Compute Engine\n :class:`google.auth.compute_engine.Credentials` or Service\n Account :class:`google.oauth2.service_account.Credentials`\n directly.\n\n *New in version 0.8.0 of pandas-gbq*.\n\n .. versionadded:: 0.24.0\n\n See Also\n --------\n pandas_gbq.to_gbq : This function in the pandas-gbq library.\n read_gbq : Read a DataFrame from Google BigQuery.\n \"\"\"\n from pandas.io import gbq\n\n gbq.to_gbq(\n self,\n destination_table,\n project_id=project_id,\n chunksize=chunksize,\n reauth=reauth,\n if_exists=if_exists,\n auth_local_webserver=auth_local_webserver,\n table_schema=table_schema,\n location=location,\n progress_bar=progress_bar,\n credentials=credentials,\n )\n\n @classmethod\n def from_records(\n cls,\n data,\n index=None,\n exclude=None,\n columns=None,\n coerce_float: bool = False,\n nrows=None,\n ) -> DataFrame:\n \"\"\"\n Convert structured or record ndarray to DataFrame.\n\n Creates a DataFrame object from a structured ndarray, sequence of\n tuples or dicts, or DataFrame.\n\n Parameters\n ----------\n data : structured ndarray, sequence of tuples or dicts, or DataFrame\n Structured input data.\n index : str, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use.\n exclude : sequence, default None\n Columns or fields to exclude.\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns).\n coerce_float : bool, default False\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n nrows : int, default None\n Number of rows to read if data is an iterator.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_dict : DataFrame from dict of array-like or dicts.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n Data can be provided as a structured ndarray:\n\n >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],\n ... dtype=[('col_1', 'i4'), ('col_2', 'U1')])\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of dicts:\n\n >>> data = [{'col_1': 3, 'col_2': 'a'},\n ... {'col_1': 2, 'col_2': 'b'},\n ... {'col_1': 1, 'col_2': 'c'},\n ... {'col_1': 0, 'col_2': 'd'}]\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of tuples with corresponding columns:\n\n >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]\n >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n \"\"\"\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = ensure_index(columns)\n\n if is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, \"dtype\") and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns_list = []\n for k, v in data.items():\n if k in columns:\n arr_columns_list.append(k)\n arrays.append(v)\n\n arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = to_arrays(data, columns)\n if columns is not None:\n columns = ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = to_arrays(data, columns)\n if coerce_float:\n for i, arr in enumerate(arrays):\n if arr.dtype == object:\n arrays[i] = lib.maybe_convert_objects(arr, try_float=True)\n\n arr_columns = ensure_index(arr_columns)\n if columns is not None:\n columns = ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if isinstance(index, str) or not hasattr(index, \"__iter__\"):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n index_data = [arrays[arr_columns.get_loc(field)] for field in index]\n except (KeyError, TypeError):\n # raised by get_loc, see GH#29258\n result_index = index\n else:\n result_index = ensure_index_from_sequences(index_data, names=index)\n exclude.update(index)\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)\n\n return cls(mgr)\n\n def to_records(\n self, index=True, column_dtypes=None, index_dtypes=None\n ) -> np.recarray:\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n If the DataFrame index has no label then the recarray field name\n is set to 'index'. If the index has a label then this is used as the\n field name:\n\n >>> df.index = df.index.rename(\"I\")\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False)\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"})\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n As well as for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\")\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])\n\n >>> index_dtypes = f\"<S{df.index.str.len().max()}\"\n >>> df.to_records(index_dtypes=index_dtypes)\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n if index:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = list(map(np.array, zip(*self.index._values)))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [\n np.asarray(self.iloc[:, i]) for i in range(len(self.columns))\n ]\n\n count = 0\n index_names = list(self.index.names)\n\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = f\"level_{count}\"\n count += 1\n elif index_names[0] is None:\n index_names = [\"index\"]\n\n names = [str(name) for name in itertools.chain(index_names, self.columns)]\n else:\n arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]\n names = [str(c) for c in self.columns]\n index_names = []\n\n index_len = len(index_names)\n formats = []\n\n for i, v in enumerate(arrays):\n index = i\n\n # When the names and arrays are collected, we\n # first collect those in the DataFrame's index,\n # followed by those in its columns.\n #\n # Thus, the total length of the array is:\n # len(index_names) + len(DataFrame.columns).\n #\n # This check allows us to see whether we are\n # handling a name / array in the index or column.\n if index < index_len:\n dtype_mapping = index_dtypes\n name = index_names[index]\n else:\n index -= index_len\n dtype_mapping = column_dtypes\n name = self.columns[index]\n\n # We have a dictionary, so we get the data type\n # associated with the index or column (which can\n # be denoted by its name in the DataFrame or its\n # position in DataFrame's array of indices or\n # columns, whichever is applicable.\n if is_dict_like(dtype_mapping):\n if name in dtype_mapping:\n dtype_mapping = dtype_mapping[name]\n elif index in dtype_mapping:\n dtype_mapping = dtype_mapping[index]\n else:\n dtype_mapping = None\n\n # If no mapping can be found, use the array's\n # dtype attribute for formatting.\n #\n # A valid dtype must either be a type or\n # string naming a type.\n if dtype_mapping is None:\n formats.append(v.dtype)\n elif isinstance(dtype_mapping, (type, np.dtype, str)):\n formats.append(dtype_mapping)\n else:\n element = \"row\" if i < index_len else \"column\"\n msg = f\"Invalid dtype {dtype_mapping} specified for {element} {name}\"\n raise ValueError(msg)\n\n return np.rec.fromarrays(arrays, dtype={\"names\": names, \"formats\": formats})\n\n @classmethod\n def _from_arrays(\n cls,\n arrays,\n columns,\n index,\n dtype: Optional[Dtype] = None,\n verify_integrity: bool = True,\n ) -> DataFrame:\n \"\"\"\n Create DataFrame from a list of arrays corresponding to the columns.\n\n Parameters\n ----------\n arrays : list-like of arrays\n Each array in the list corresponds to one column, in order.\n columns : list-like, Index\n The column names for the resulting DataFrame.\n index : list-like, Index\n The rows labels for the resulting DataFrame.\n dtype : dtype, optional\n Optional dtype to enforce for all arrays.\n verify_integrity : bool, default True\n Validate and homogenize all input. If set to False, it is assumed\n that all elements of `arrays` are actual arrays how they will be\n stored in a block (numpy ndarray or ExtensionArray), have the same\n length as and are aligned with the index, and that `columns` and\n `index` are ensured to be an Index object.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n mgr = arrays_to_mgr(\n arrays,\n columns,\n index,\n columns,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n return cls(mgr)\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_stata(\n self,\n path: FilePathOrBuffer,\n convert_dates: Optional[Dict[Label, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Label, str]] = None,\n version: Optional[int] = 114,\n convert_strl: Optional[Sequence[Label]] = None,\n compression: CompressionOptions = \"infer\",\n storage_options: StorageOptions = None,\n ) -> None:\n \"\"\"\n Export DataFrame object to Stata dta format.\n\n Writes the DataFrame to a Stata dataset file.\n \"dta\" files contain a Stata dataset.\n\n Parameters\n ----------\n path : str, buffer or path object\n String, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() function. If using a buffer\n then the buffer will not be automatically closed after the file\n data has been written.\n\n .. versionchanged:: 1.0.0\n\n Previously this was \"fname\"\n\n convert_dates : dict\n Dictionary mapping columns containing datetime types to stata\n internal format to use when writing the dates. Options are 'tc',\n 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer\n or a name. Datetime columns that do not have a conversion type\n specified will be converted to 'tc'. Raises NotImplementedError if\n a datetime column has timezone information.\n write_index : bool\n Write the index to Stata dataset.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`.\n time_stamp : datetime\n A datetime to use as file creation date. Default is the current\n time.\n data_label : str, optional\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict\n Dictionary containing columns as keys and variable labels as\n values. Each label must be 80 characters or smaller.\n version : {{114, 117, 118, 119, None}}, default 114\n Version to use in the output dta file. Set to None to let pandas\n decide between 118 or 119 formats depending on the number of\n columns in the frame. Version 114 can be read by Stata 10 and\n later. Version 117 can be read by Stata 13 or later. Version 118\n is supported in Stata 14 and later. Version 119 is supported in\n Stata 15 and later. Version 114 limits string variables to 244\n characters or fewer while versions 117 and later allow strings\n with lengths up to 2,000,000 characters. Versions 118 and 119\n support Unicode characters, and version 119 supports more than\n 32,767 variables.\n\n Version 119 should usually only be used when the number of\n variables exceeds the capacity of dta format 118. Exporting\n smaller datasets in format 119 may have unintended consequences,\n and, as of November 2020, Stata SE cannot read version 119 files.\n\n .. versionchanged:: 1.0.0\n\n Added support for formats 118 and 119.\n\n convert_strl : list, optional\n List of column names to convert to string columns to Stata StrL\n format. Only available if version is 117. Storing strings in the\n StrL format can produce smaller dta files if strings have more than\n 8 characters and values are repeated.\n compression : str or dict, default 'infer'\n For on-the-fly compression of the output dta. If string, specifies\n compression mode. If dict, value at key 'method' specifies\n compression mode. Compression mode must be one of {{'infer', 'gzip',\n 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and\n `fname` is path-like, then detect compression from the following\n extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n compression). If dict and compression mode is one of {{'zip',\n 'gzip', 'bz2'}}, or inferred as one of the above, other entries\n passed as additional compression options.\n\n .. versionadded:: 1.1.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n * Column dtype is not representable in Stata\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n See Also\n --------\n read_stata : Import Stata data files.\n io.stata.StataWriter : Low-level writer for Stata data files.\n io.stata.StataWriter117 : Low-level writer for version 117 files.\n\n Examples\n --------\n >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}})\n >>> df.to_stata('animals.dta') # doctest: +SKIP\n \"\"\"\n if version not in (114, 117, 118, 119, None):\n raise ValueError(\"Only formats 114, 117, 118 and 119 are supported.\")\n if version == 114:\n if convert_strl is not None:\n raise ValueError(\"strl is not supported in format 114\")\n from pandas.io.stata import StataWriter as statawriter\n elif version == 117:\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriter117 as statawriter,\n )\n else: # versions 118 and 119\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriterUTF8 as statawriter,\n )\n\n kwargs: Dict[str, Any] = {}\n if version is None or version >= 117:\n # strl conversion is only supported >= 117\n kwargs[\"convert_strl\"] = convert_strl\n if version is None or version >= 118:\n # Specifying the version is only supported for UTF8 (118 or 119)\n kwargs[\"version\"] = version\n\n # mypy: Too many arguments for \"StataWriter\"\n writer = statawriter( # type: ignore[call-arg]\n path,\n self,\n convert_dates=convert_dates,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n write_index=write_index,\n variable_labels=variable_labels,\n compression=compression,\n storage_options=storage_options,\n **kwargs,\n )\n writer.write_file()\n\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:\n \"\"\"\n Write a DataFrame to the binary Feather format.\n\n Parameters\n ----------\n path : str or file-like object\n If a string, it will be used as Root Directory path.\n **kwargs :\n Additional keywords passed to :func:`pyarrow.feather.write_feather`.\n Starting with pyarrow 0.17, this includes the `compression`,\n `compression_level`, `chunksize` and `version` keywords.\n\n .. versionadded:: 1.1.0\n \"\"\"\n from pandas.io.feather_format import to_feather\n\n to_feather(self, path, **kwargs)\n\n @doc(\n Series.to_markdown,\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=_shared_docs[\"storage_options\"],\n examples=\"\"\"Examples\n --------\n >>> df = pd.DataFrame(\n ... data={\"animal_1\": [\"elk\", \"pig\"], \"animal_2\": [\"dog\", \"quetzal\"]}\n ... )\n >>> print(df.to_markdown())\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(df.to_markdown(tablefmt=\"grid\"))\n +----+------------+------------+\n | | animal_1 | animal_2 |\n +====+============+============+\n | 0 | elk | dog |\n +----+------------+------------+\n | 1 | pig | quetzal |\n +----+------------+------------+\n \"\"\",\n )\n def to_markdown(\n self,\n buf: Optional[Union[IO[str], str]] = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[str]:\n if \"showindex\" in kwargs:\n warnings.warn(\n \"'showindex' is deprecated. Only 'index' will be used \"\n \"in a future version. Use 'index' to silence this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n\n kwargs.setdefault(\"headers\", \"keys\")\n kwargs.setdefault(\"tablefmt\", \"pipe\")\n kwargs.setdefault(\"showindex\", index)\n tabulate = import_optional_dependency(\"tabulate\")\n result = tabulate.tabulate(self, **kwargs)\n if buf is None:\n return result\n\n with get_handle(buf, mode, storage_options=storage_options) as handles:\n assert not isinstance(handles.handle, (str, mmap.mmap))\n handles.handle.writelines(result)\n return None\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_parquet(\n self,\n path: Optional[FilePathOrBuffer] = None,\n engine: str = \"auto\",\n compression: Optional[str] = \"snappy\",\n index: Optional[bool] = None,\n partition_cols: Optional[List[str]] = None,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[bytes]:\n \"\"\"\n Write a DataFrame to the binary parquet format.\n\n This function writes the dataframe as a `parquet file\n <https://parquet.apache.org/>`_. You can choose different parquet\n backends, and have the option of compression. See\n :ref:`the user guide <io.parquet>` for more details.\n\n Parameters\n ----------\n path : str or file-like object, default None\n If a string, it will be used as Root Directory path\n when writing a partitioned dataset. By file-like object,\n we refer to objects with a write() method, such as a file handle\n (e.g. via builtin open function) or io.BytesIO. The engine\n fastparquet does not accept file-like objects. If path is None,\n a bytes object is returned.\n\n .. versionchanged:: 1.2.0\n\n Previously this was \"fname\"\n\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'\n Name of the compression to use. Use ``None`` for no compression.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output.\n If ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n\n .. versionadded:: 0.24.0\n\n partition_cols : list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n\n .. versionadded:: 0.24.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n Additional arguments passed to the parquet library. See\n :ref:`pandas io <io.parquet>` for more details.\n\n Returns\n -------\n bytes if no path argument is provided else None\n\n See Also\n --------\n read_parquet : Read a parquet file.\n DataFrame.to_csv : Write a csv file.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_hdf : Write to hdf.\n\n Notes\n -----\n This function requires either the `fastparquet\n <https://pypi.org/project/fastparquet>`_ or `pyarrow\n <https://arrow.apache.org/docs/python/>`_ library.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})\n >>> df.to_parquet('df.parquet.gzip',\n ... compression='gzip') # doctest: +SKIP\n >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP\n col1 col2\n 0 1 3\n 1 2 4\n\n If you want to get a buffer to the parquet content you can use a io.BytesIO\n object, as long as you don't use partition_cols, which creates multiple files.\n\n >>> import io\n >>> f = io.BytesIO()\n >>> df.to_parquet(f)\n >>> f.seek(0)\n 0\n >>> content = f.read()\n \"\"\"\n from pandas.io.parquet import to_parquet\n\n return to_parquet(\n self,\n path,\n engine,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n **kwargs,\n )\n\n @Substitution(\n header_type=\"bool\",\n header=\"Whether to print column labels, default True\",\n col_space_type=\"str or int, list or dict of int or str\",\n col_space=\"The minimum width of each column in CSS length \"\n \"units. An int is assumed to be px units.\\n\\n\"\n \" .. versionadded:: 0.25.0\\n\"\n \" Ability to use str\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_html(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[ColspaceArgType] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[FormattersType] = None,\n float_format: Optional[FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: Union[bool, str] = False,\n decimal: str = \".\",\n bold_rows: bool = True,\n classes: Optional[Union[str, List, Tuple]] = None,\n escape: bool = True,\n notebook: bool = False,\n border: Optional[int] = None,\n table_id: Optional[str] = None,\n render_links: bool = False,\n encoding: Optional[str] = None,\n ):\n \"\"\"\n Render a DataFrame as an HTML table.\n %(shared_params)s\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.display.html.border``.\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links.\n\n .. versionadded:: 0.24.0\n %(returns)s\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:\n raise ValueError(\"Invalid value for justify parameter\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n )\n # TODO: a generic formatter wld b in DataFrameFormatter\n return fmt.DataFrameRenderer(formatter).to_html(\n buf=buf,\n classes=classes,\n notebook=notebook,\n border=border,\n encoding=encoding,\n table_id=table_id,\n render_links=render_links,\n )\n\n # ----------------------------------------------------------------------\n @Substitution(\n klass=\"DataFrame\",\n type_sub=\" and columns\",\n max_cols_sub=dedent(\n \"\"\"\\\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used. By default, the setting in\n ``pandas.options.display.max_info_columns`` is used.\"\"\"\n ),\n show_counts_sub=dedent(\n \"\"\"\\\n show_counts : bool, optional\n Whether to show the non-null counts. By default, this is shown\n only if the DataFrame is smaller than\n ``pandas.options.display.max_info_rows`` and\n ``pandas.options.display.max_info_columns``. A value of True always\n shows the counts, and False never shows the counts.\n null_counts : bool, optional\n .. deprecated:: 1.2.0\n Use show_counts instead.\"\"\"\n ),\n examples_sub=dedent(\n \"\"\"\\\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = pd.DataFrame({\"int_col\": int_values, \"text_col\": text_values,\n ... \"float_col\": float_values})\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open(\"df_info.txt\", \"w\",\n ... encoding=\"utf-8\") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big DataFrames and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)\n >>> df = pd.DataFrame({\n ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)\n ... })\n >>> df.info()\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 22.9+ MB\n\n >>> df.info(memory_usage='deep')\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 165.9 MB\"\"\"\n ),\n see_also_sub=dedent(\n \"\"\"\\\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n DataFrame.memory_usage: Memory usage of DataFrame columns.\"\"\"\n ),\n version_added_sub=\"\",\n )\n @doc(BaseInfo.render)\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n memory_usage: Optional[Union[bool, str]] = None,\n show_counts: Optional[bool] = None,\n null_counts: Optional[bool] = None,\n ) -> None:\n if null_counts is not None:\n if show_counts is not None:\n raise ValueError(\"null_counts used with show_counts. Use show_counts.\")\n warnings.warn(\n \"null_counts is deprecated. Use show_counts instead\",\n FutureWarning,\n stacklevel=2,\n )\n show_counts = null_counts\n info = DataFrameInfo(\n data=self,\n memory_usage=memory_usage,\n )\n info.render(\n buf=buf,\n max_cols=max_cols,\n verbose=verbose,\n show_counts=show_counts,\n )\n\n def memory_usage(self, index=True, deep=False) -> Series:\n \"\"\"\n Return the memory usage of each column in bytes.\n\n The memory usage can optionally include the contribution of\n the index and elements of `object` dtype.\n\n This value is displayed in `DataFrame.info` by default. This can be\n suppressed by setting ``pandas.options.display.memory_usage`` to False.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the DataFrame's\n index in returned Series. If ``index=True``, the memory usage of\n the index is the first item in the output.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the original column names and whose values\n is the memory usage of each column in bytes.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of an\n ndarray.\n Series.memory_usage : Bytes consumed by a Series.\n Categorical : Memory-efficient array for string values with\n many repeated values.\n DataFrame.info : Concise summary of a DataFrame.\n\n Examples\n --------\n >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']\n >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))\n ... for t in dtypes])\n >>> df = pd.DataFrame(data)\n >>> df.head()\n int64 float64 complex128 object bool\n 0 1 1.0 1.0+0.0j 1 True\n 1 1 1.0 1.0+0.0j 1 True\n 2 1 1.0 1.0+0.0j 1 True\n 3 1 1.0 1.0+0.0j 1 True\n 4 1 1.0 1.0+0.0j 1 True\n\n >>> df.memory_usage()\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n >>> df.memory_usage(index=False)\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n The memory footprint of `object` dtype columns is ignored by default:\n\n >>> df.memory_usage(deep=True)\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 180000\n bool 5000\n dtype: int64\n\n Use a Categorical for efficient storage of an object-dtype column with\n many repeated values.\n\n >>> df['object'].astype('category').memory_usage(deep=True)\n 5244\n \"\"\"\n result = self._constructor_sliced(\n [c.memory_usage(index=False, deep=deep) for col, c in self.items()],\n index=self.columns,\n )\n if index:\n result = self._constructor_sliced(\n self.index.memory_usage(deep=deep), index=[\"Index\"]\n ).append(result)\n return result\n\n def transpose(self, *args, copy: bool = False) -> DataFrame:\n \"\"\"\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n Parameters\n ----------\n *args : tuple, optional\n Accepted for compatibility with NumPy.\n copy : bool, default False\n Whether to copy the data after transposing, even for DataFrames\n with a single dtype.\n\n Note that a copy is always required for mixed dtype DataFrames,\n or for DataFrames with any extension types.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n See Also\n --------\n numpy.transpose : Permute the dimensions of a given array.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the `object` dtype. In such a case, a copy of the data\n is always made.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = pd.DataFrame(data=d1)\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T # or df1.transpose()\n >>> df1_transposed\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'name': ['Alice', 'Bob'],\n ... 'score': [9.5, 8],\n ... 'employed': [False, True],\n ... 'kids': [0, 0]}\n >>> df2 = pd.DataFrame(data=d2)\n >>> df2\n name score employed kids\n 0 Alice 9.5 False 0\n 1 Bob 8.0 True 0\n\n >>> df2_transposed = df2.T # or df2.transpose()\n >>> df2_transposed\n 0 1\n name Alice Bob\n score 9.5 8.0\n employed False True\n kids 0 0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the `object` dtype:\n\n >>> df2.dtypes\n name object\n score float64\n employed bool\n kids int64\n dtype: object\n >>> df2_transposed.dtypes\n 0 object\n 1 object\n dtype: object\n \"\"\"\n nv.validate_transpose(args, {})\n # construct the args\n\n dtypes = list(self.dtypes)\n if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):\n # We have EAs with the same dtype. We can preserve that dtype in transpose.\n dtype = dtypes[0]\n arr_type = dtype.construct_array_type()\n values = self.values\n\n new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]\n result = self._constructor(\n dict(zip(self.index, new_values)), index=self.columns\n )\n\n else:\n new_values = self.values.T\n if copy:\n new_values = new_values.copy()\n result = self._constructor(\n new_values, index=self.columns, columns=self.index\n )\n\n return result.__finalize__(self, method=\"transpose\")\n\n @property\n def T(self) -> DataFrame:\n return self.transpose()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Parameters\n ----------\n i : int\n axis : int\n\n Notes\n -----\n If slice passed, the resulting data will be a view.\n \"\"\"\n # irow\n if axis == 0:\n new_values = self._mgr.fast_xs(i)\n\n # if we are a copy, mark as such\n copy = isinstance(new_values, np.ndarray) and new_values.base is None\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[i],\n dtype=new_values.dtype,\n )\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n label = self.columns[i]\n\n values = self._mgr.iget(i)\n result = self._box_col_values(values, i)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def _get_column_array(self, i: int) -> ArrayLike:\n \"\"\"\n Get the values of the i'th column (ndarray or ExtensionArray, as stored\n in the Block)\n \"\"\"\n return self._mgr.iget_values(i)\n\n def _iter_column_arrays(self) -> Iterator[ArrayLike]:\n \"\"\"\n Iterate over the arrays of all columns in order.\n This returns the values as stored in the Block (ndarray or ExtensionArray).\n \"\"\"\n for i in range(len(self.columns)):\n yield self._get_column_array(i)\n\n def __getitem__(self, key):\n key = lib.item_from_zerodim(key)\n key = com.apply_if_callable(key, self)\n\n if is_hashable(key):\n # shortcut if the key is in columns\n if self.columns.is_unique and key in self.columns:\n if isinstance(self.columns, MultiIndex):\n return self._getitem_multilevel(key)\n return self._get_item_cache(key)\n\n # Do we have a slicer (on rows)?\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n if isinstance(indexer, np.ndarray):\n indexer = lib.maybe_indices_to_slice(\n indexer.astype(np.intp, copy=False), len(self)\n )\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._slice(indexer, axis=0)\n\n # Do we have a (boolean) DataFrame?\n if isinstance(key, DataFrame):\n return self.where(key)\n\n # Do we have a (boolean) 1d indexer?\n if com.is_bool_indexer(key):\n return self._getitem_bool_array(key)\n\n # We are left with two options: a single key, and a collection of keys,\n # We interpret tuples as collections only for non-MultiIndex\n is_single_key = isinstance(key, tuple) or not is_list_like(key)\n\n if is_single_key:\n if self.columns.nlevels > 1:\n return self._getitem_multilevel(key)\n indexer = self.columns.get_loc(key)\n if is_integer(indexer):\n indexer = [indexer]\n else:\n if is_iterator(key):\n key = list(key)\n indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]\n\n # take() does not accept boolean indexers\n if getattr(indexer, \"dtype\", None) == bool:\n indexer = np.where(indexer)[0]\n\n data = self._take_with_is_copy(indexer, axis=1)\n\n if is_single_key:\n # What does looking for a single key in a non-unique index return?\n # The behavior is inconsistent. It returns a Series, except when\n # - the key itself is repeated (test on data.shape, #9519), or\n # - we have a MultiIndex on columns (test on self.columns, #21309)\n if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):\n # GH#26490 using data[key] can cause RecursionError\n data = data._get_item_cache(key)\n\n return data\n\n def _getitem_bool_array(self, key):\n # also raises Exception if object array with NA values\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\n \"Boolean Series key will be reindexed to match DataFrame index.\",\n UserWarning,\n stacklevel=3,\n )\n elif len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}.\"\n )\n\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self._take_with_is_copy(indexer, axis=0)\n\n def _getitem_multilevel(self, key):\n # self.columns is a MultiIndex\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = self._constructor(\n new_values, index=self.index, columns=result_columns\n )\n result = result.__finalize__(self)\n\n # If there is only one column being returned, and its name is\n # either an empty string, or a tuple with an empty string as its\n # first element, then treat the empty string as a placeholder\n # and return the column as if the user had provided that empty\n # string in the key. If the result is a Series, exclude the\n # implied empty string from its name.\n if len(result.columns) == 1:\n top = result.columns[0]\n if isinstance(top, tuple):\n top = top[0]\n if top == \"\":\n result = result[\"\"]\n if isinstance(result, Series):\n result = self._constructor_sliced(\n result, index=self.index, name=key\n )\n\n result._set_is_copy(self)\n return result\n else:\n # loc is neither a slice nor ndarray, so must be an int\n return self._ixs(loc, axis=1)\n\n def _get_value(self, index, col, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n scalar\n \"\"\"\n if takeable:\n series = self._ixs(col, axis=1)\n return series._values[index]\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n\n try:\n loc = engine.get_loc(index)\n return series._values[loc]\n except KeyError:\n # GH 20629\n if self.index.nlevels > 1:\n # partial indexing forbidden\n raise\n\n # we cannot handle direct indexing\n # use positional\n col = self.columns.get_loc(col)\n index = self.index.get_loc(index)\n return self._get_value(index, col, takeable=True)\n\n def __setitem__(self, key, value):\n key = com.apply_if_callable(key, self)\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, DataFrame) or getattr(key, \"ndim\", None) == 2:\n self._setitem_frame(key, value)\n elif isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n elif isinstance(value, DataFrame):\n self._set_item_frame_value(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key: slice, value):\n # NB: we can't just use self.loc[key] = value because that\n # operates on labels and we need to operate positional for\n # backwards-compat, xref GH#31469\n self._check_setitem_copy()\n self.iloc[key] = value\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}!\"\n )\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.iloc[indexer] = value\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError(\"Columns must be same length as key\")\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n self.loc._ensure_listlike_indexer(key, axis=1, value=value)\n indexer = self.loc._get_listlike_indexer(\n key, axis=1, raise_missing=False\n )[1]\n self._check_setitem_copy()\n self.iloc[:, indexer] = value\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if isinstance(key, np.ndarray):\n if key.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n key = self._constructor(key, **self._construct_axes_dict())\n\n if key.size and not is_bool_dtype(key.values):\n raise TypeError(\n \"Must pass DataFrame or 2-d ndarray with boolean values only\"\n )\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self._where(-key, value, inplace=True)\n\n def _set_item_frame_value(self, key, value: \"DataFrame\") -> None:\n self._ensure_valid_index(value)\n\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex(cols, axis=1)\n\n # now align rows\n value = _reindex_for_setitem(value, self.index)\n value = value.T\n self._set_item_mgr(key, value)\n\n def _iset_item_mgr(self, loc: int, value) -> None:\n self._mgr.iset(loc, value)\n self._clear_item_cache()\n\n def _set_item_mgr(self, key, value):\n value = _maybe_atleast_2d(value)\n\n try:\n loc = self._info_axis.get_loc(key)\n except KeyError:\n # This item wasn't present, just insert at end\n self._mgr.insert(len(self._info_axis), key, value)\n else:\n self._iset_item_mgr(loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _iset_item(self, loc: int, value):\n value = self._sanitize_column(value)\n value = _maybe_atleast_2d(value)\n self._iset_item_mgr(loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n value = self._sanitize_column(value)\n\n if (\n key in self.columns\n and value.ndim == 1\n and not is_extension_array_dtype(value)\n ):\n # broadcast across multiple columns if necessary\n if not self.columns.is_unique or isinstance(self.columns, MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n self._set_item_mgr(key, value)\n\n def _set_value(self, index, col, value, takeable: bool = False):\n \"\"\"\n Put single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar\n takeable : interpret the index/col as indexers, default False\n \"\"\"\n try:\n if takeable is True:\n series = self._ixs(col, axis=1)\n series._set_value(index, value, takeable=True)\n return\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n loc = engine.get_loc(index)\n validate_numeric_casting(series.dtype, value)\n\n series._values[loc] = value\n # Note: trying to use series._set_value breaks tests in\n # tests.frame.indexing.test_indexing and tests.indexing.test_partial\n except (KeyError, TypeError):\n # set using a non-recursive method & reset the cache\n if takeable:\n self.iloc[index, col] = value\n else:\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n Ensure that if we don't have an index, that we can create one from the\n passed value.\n \"\"\"\n # GH5632, make sure that we are a Series convertible\n if not len(self.index) and is_list_like(value) and len(value):\n try:\n value = Series(value)\n except (ValueError, NotImplementedError, TypeError) as err:\n raise ValueError(\n \"Cannot set a frame with no defined index \"\n \"and a value that cannot be converted to a Series\"\n ) from err\n\n # GH31368 preserve name of index\n index_copy = value.index.copy()\n if self.index.name is not None:\n index_copy.name = self.index.name\n\n self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)\n\n def _box_col_values(self, values, loc: int) -> Series:\n \"\"\"\n Provide boxed values for a column.\n \"\"\"\n # Lookup in columns so that if e.g. a str datetime was passed\n # we attach the Timestamp object as the name.\n name = self.columns[loc]\n klass = self._constructor_sliced\n return klass(values, index=self.index, name=name, fastpath=True)\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def query(self, expr: str, inplace: bool = False, **kwargs):\n \"\"\"\n Query the columns of a DataFrame with a boolean expression.\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n\n You can refer to column names that are not valid Python variable names\n by surrounding them in backticks. Thus, column names containing spaces\n or punctuations (besides underscores) or starting with digits must be\n surrounded by backticks. (For example, a column named \"Area (cm^2) would\n be referenced as `Area (cm^2)`). Column names which are Python keywords\n (like \"list\", \"for\", \"import\", etc) cannot be used.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n .. versionadded:: 0.25.0\n Backtick quoting introduced.\n\n .. versionadded:: 1.0.0\n Expanding functionality of backtick quoting for more than only spaces.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n DataFrame or None\n DataFrame resulting from the provided query expression or\n None if ``inplace=True``.\n\n See Also\n --------\n eval : Evaluate a string describing operations on\n DataFrame columns.\n DataFrame.eval : Evaluate a string describing operations on\n DataFrame columns.\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query. Please note that\n Python keywords may not be used as identifiers.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n *Backtick quoted variables*\n\n Backtick quoted variables are parsed as literal Python code and\n are converted internally to a Python valid identifier.\n This can lead to the following problems.\n\n During parsing a number of disallowed characters inside the backtick\n quoted string are replaced by strings that are allowed as a Python identifier.\n These characters include all operators in Python, the space character, the\n question mark, the exclamation mark, the dollar sign, and the euro sign.\n For other characters that fall outside the ASCII range (U+0001..U+007F)\n and those that are not further specified in PEP 3131,\n the query parser will raise an error.\n This excludes whitespace different than the space character,\n but also the hashtag (as it is used for comments) and the backtick\n itself (backtick can also not be escaped).\n\n In a special case, quotes that make a pair around a backtick can\n confuse the parser.\n For example, ```it's` > `that's``` will raise an error,\n as it forms a quoted string (``'s > `that'``) with a backtick inside.\n\n See also the Python documentation about lexical analysis\n (https://docs.python.org/3/reference/lexical_analysis.html)\n in combination with the source code in :mod:`pandas.core.computation.parsing`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not isinstance(expr, str):\n msg = f\"expr must be a string to be evaluated, {type(expr)} given\"\n raise ValueError(msg)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n kwargs[\"target\"] = None\n res = self.eval(expr, **kwargs)\n\n try:\n result = self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n result = self[res]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def eval(self, expr: str, inplace: bool = False, **kwargs):\n \"\"\"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ndarray, scalar, pandas object, or None\n The result of the evaluation or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Notes\n -----\n For more details see the API documentation for :func:`~eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n\n Multiple columns can be assigned to using multi-line expressions:\n\n >>> df.eval(\n ... '''\n ... C = A + B\n ... D = A - B\n ... '''\n ... )\n A B C D\n 0 1 10 11 -9\n 1 2 8 10 -6\n 2 3 6 9 -3\n 3 4 4 8 0\n 4 5 2 7 3\n \"\"\"\n from pandas.core.computation.eval import eval as _eval\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n resolvers = kwargs.pop(\"resolvers\", None)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n column_resolvers = self._get_cleaned_column_resolvers()\n resolvers = column_resolvers, index_resolvers\n if \"target\" not in kwargs:\n kwargs[\"target\"] = self\n kwargs[\"resolvers\"] = kwargs.get(\"resolvers\", ()) + tuple(resolvers)\n\n return _eval(expr, inplace=inplace, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None) -> DataFrame:\n \"\"\"\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n\n See Also\n --------\n DataFrame.dtypes: Return Series with the data type of each column.\n\n Notes\n -----\n * To select all *numeric* types, use ``np.number`` or ``'number'``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or\n ``'timedelta64'``\n * To select Pandas categorical dtypes, use ``'category'``\n * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in\n 0.20.0) or ``'datetime64[ns, tz]'``\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int64'])\n b c\n 0 True 1.0\n 1 False 2.0\n 2 True 1.0\n 3 False 2.0\n 4 True 1.0\n 5 False 2.0\n \"\"\"\n if not is_list_like(include):\n include = (include,) if include is not None else ()\n if not is_list_like(exclude):\n exclude = (exclude,) if exclude is not None else ()\n\n selection = (frozenset(include), frozenset(exclude))\n\n if not any(selection):\n raise ValueError(\"at least one of include or exclude must be nonempty\")\n\n # convert the myriad valid dtypes object to a single representation\n include = frozenset(infer_dtype_from_object(x) for x in include)\n exclude = frozenset(infer_dtype_from_object(x) for x in exclude)\n for dtypes in (include, exclude):\n invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError(f\"include and exclude overlap on {(include & exclude)}\")\n\n # We raise when both include and exclude are empty\n # Hence, we can just shrink the columns we want to keep\n keep_these = np.full(self.shape[1], True)\n\n def extract_unique_dtypes_from_dtypes_set(\n dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray\n ) -> List[Dtype]:\n extracted_dtypes = [\n unique_dtype\n for unique_dtype in unique_dtypes\n if (\n issubclass(\n unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]\n )\n or (\n np.number in dtypes_set\n and getattr(unique_dtype, \"_is_numeric\", False)\n )\n )\n ]\n return extracted_dtypes\n\n unique_dtypes = self.dtypes.unique()\n\n if include:\n included_dtypes = extract_unique_dtypes_from_dtypes_set(\n include, unique_dtypes\n )\n keep_these &= self.dtypes.isin(included_dtypes)\n\n if exclude:\n excluded_dtypes = extract_unique_dtypes_from_dtypes_set(\n exclude, unique_dtypes\n )\n keep_these &= ~self.dtypes.isin(excluded_dtypes)\n\n return self.iloc[:, keep_these.values]\n\n def insert(self, loc, column, value, allow_duplicates: bool = False) -> None:\n \"\"\"\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n See Also\n --------\n Index.insert : Insert new item by index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n >>> df.insert(1, \"newcol\", [99, 99])\n >>> df\n col1 newcol col2\n 0 1 99 3\n 1 2 99 4\n >>> df.insert(0, \"col1\", [100, 100], allow_duplicates=True)\n >>> df\n col1 col1 newcol col2\n 0 100 1 99 3\n 1 100 2 99 4\n \"\"\"\n if allow_duplicates and not self.flags.allows_duplicate_labels:\n raise ValueError(\n \"Cannot specify 'allow_duplicates=True' when \"\n \"'self.flags.allows_duplicate_labels' is False.\"\n )\n value = self._sanitize_column(value)\n value = _maybe_atleast_2d(value)\n self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs) -> DataFrame:\n r\"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n \"\"\"\n data = self.copy()\n\n for k, v in kwargs.items():\n data[k] = com.apply_if_callable(v, data)\n return data\n\n def _sanitize_column(self, value):\n \"\"\"\n Ensures new columns (which go into the BlockManager as new blocks) are\n always copied and converted into an array.\n\n Parameters\n ----------\n value : scalar, Series, or array-like\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n self._ensure_valid_index(value)\n\n # We should never get here with DataFrame value\n if isinstance(value, Series):\n value = _reindex_for_setitem(value, self.index)\n\n elif isinstance(value, ExtensionArray):\n # Explicitly copy here, instead of in sanitize_index,\n # as sanitize_index won't copy an EA, even with copy=True\n value = value.copy()\n value = sanitize_index(value, self.index)\n\n elif isinstance(value, Index) or is_sequence(value):\n\n # turn me into an ndarray\n value = sanitize_index(value, self.index)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = maybe_convert_platform(value)\n else:\n value = com.asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n elif isinstance(value, Index):\n value = value.copy(deep=True)\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = maybe_infer_to_datetimelike(value)\n\n else:\n value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None)\n\n return value\n\n @property\n def _series(self):\n return {\n item: Series(\n self._mgr.iget(idx), index=self.index, name=item, fastpath=True\n )\n for idx, item in enumerate(self.columns)\n }\n\n def lookup(self, row_labels, col_labels) -> np.ndarray:\n \"\"\"\n Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n .. deprecated:: 1.2.0\n DataFrame.lookup is deprecated,\n use DataFrame.melt and DataFrame.loc instead.\n For an example see :meth:`~pandas.DataFrame.lookup`\n in the user guide.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup.\n col_labels : sequence\n The column labels to use for lookup.\n\n Returns\n -------\n numpy.ndarray\n The found values.\n \"\"\"\n msg = (\n \"The 'lookup' method is deprecated and will be\"\n \"removed in a future version.\"\n \"You can use DataFrame.melt and DataFrame.loc\"\n \"as a substitute.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError(\"Row labels must have same size as column labels\")\n if not (self.index.is_unique and self.columns.is_unique):\n # GH#33041\n raise ValueError(\"DataFrame.lookup requires unique index and columns\")\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError(\"One or more row labels was not found\")\n if (cidx == -1).any():\n raise KeyError(\"One or more column labels was not found\")\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype=\"O\")\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self._get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n # ----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):\n frame = self\n\n columns = axes[\"columns\"]\n if columns is not None:\n frame = frame._reindex_columns(\n columns, method, copy, level, fill_value, limit, tolerance\n )\n\n index = axes[\"index\"]\n if index is not None:\n frame = frame._reindex_index(\n index, method, copy, level, fill_value, limit, tolerance\n )\n\n return frame\n\n def _reindex_index(\n self,\n new_index,\n method,\n copy: bool,\n level: Level,\n fill_value=np.nan,\n limit=None,\n tolerance=None,\n ):\n new_index, indexer = self.index.reindex(\n new_index, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {0: [new_index, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_columns(\n self,\n new_columns,\n method,\n copy: bool,\n level: Level,\n fill_value=None,\n limit=None,\n tolerance=None,\n ):\n new_columns, indexer = self.columns.reindex(\n new_columns, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {1: [new_columns, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame:\n \"\"\"\n We are guaranteed non-Nones in the axes.\n \"\"\"\n new_index, row_indexer = self.index.reindex(axes[\"index\"])\n new_columns, col_indexer = self.columns.reindex(axes[\"columns\"])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = algorithms.take_2d_multi(\n self.values, indexer, fill_value=fill_value\n )\n return self._constructor(new_values, index=new_index, columns=new_columns)\n else:\n return self._reindex_with_indexers(\n {0: [new_index, row_indexer], 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.align, **_shared_doc_kwargs)\n def align(\n self,\n other,\n join: str = \"outer\",\n axis: Optional[Axis] = None,\n level: Optional[Level] = None,\n copy: bool = True,\n fill_value=None,\n method: Optional[str] = None,\n limit=None,\n fill_axis: Axis = 0,\n broadcast_axis: Optional[Axis] = None,\n ) -> DataFrame:\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n @Appender(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index')\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns')\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\" column or\",\n axis_description_sub=\", and 1 identifies the columns\",\n see_also_sub=\" or columns\",\n )\n @Appender(NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.reindex.__doc__)\n @rewrite_axis_style_signature(\n \"labels\",\n [\n (\"method\", None),\n (\"copy\", True),\n (\"level\", None),\n (\"fill_value\", np.nan),\n (\"limit\", None),\n (\"tolerance\", None),\n ],\n )\n def reindex(self, *args, **kwargs) -> DataFrame:\n axes = validate_axis_style_args(self, args, kwargs, \"labels\", \"reindex\")\n kwargs.update(axes)\n # Pop these, since the values are in `kwargs` under different names\n kwargs.pop(\"axis\", None)\n kwargs.pop(\"labels\", None)\n return super().reindex(**kwargs)\n\n def drop(\n self,\n labels=None,\n axis: Axis = 0,\n index=None,\n columns=None,\n level: Optional[Level] = None,\n inplace: bool = False,\n errors: str = \"raise\",\n ):\n \"\"\"\n Drop specified labels from rows or columns.\n\n Remove rows or columns by specifying label names and corresponding\n axis, or by specifying directly index or column names. When using a\n multi-index, labels on different levels can be removed by specifying\n the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index or column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Whether to drop labels from the index (0 or 'index') or\n columns (1 or 'columns').\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is equivalent to ``index=labels``).\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n level : int or level name, optional\n For MultiIndex, level from which the labels will be removed.\n inplace : bool, default False\n If False, return a copy. Otherwise, do operation\n inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are\n dropped.\n\n Returns\n -------\n DataFrame or None\n DataFrame without the removed index or column labels or\n None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis.\n\n See Also\n --------\n DataFrame.loc : Label-location based indexer for selection by label.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n DataFrame.drop_duplicates : Return DataFrame with duplicate rows\n removed, optionally only considering certain columns.\n Series.drop : Return Series with specified index labels removed.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n Drop columns and/or rows of MultiIndex DataFrame\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],\n ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],\n ... [250, 150], [1.5, 0.8], [320, 250],\n ... [1, 0.8], [0.3, 0.2]])\n >>> df\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n length 1.5 1.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n length 1.5 0.8\n falcon speed 320.0 250.0\n weight 1.0 0.8\n length 0.3 0.2\n\n >>> df.drop(index='cow', columns='small')\n big\n lama speed 45.0\n weight 200.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n\n >>> df.drop(index='length', level=1)\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n falcon speed 320.0 250.0\n weight 1.0 0.8\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @rewrite_axis_style_signature(\n \"mapper\",\n [(\"copy\", True), (\"inplace\", False), (\"level\", None), (\"errors\", \"ignore\")],\n )\n def rename(\n self,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Alter axes labels.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or function transformations to apply to\n that axis' values. Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index`` and\n ``columns``.\n index : dict-like or function\n Alternative to specifying axis (``mapper, axis=0``\n is equivalent to ``index=mapper``).\n columns : dict-like or function\n Alternative to specifying axis (``mapper, axis=1``\n is equivalent to ``columns=mapper``).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to target with ``mapper``. Can be either the axis name\n ('index', 'columns') or number (0, 1). The default is 'index'.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new DataFrame. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the renamed axis labels or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n DataFrame.rename_axis : Set the name of the axis.\n\n Examples\n --------\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Rename columns using a mapping:\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n Rename index using a mapping:\n\n >>> df.rename(index={0: \"x\", 1: \"y\", 2: \"z\"})\n A B\n x 1 4\n y 2 5\n z 3 6\n\n Cast index labels to a different type:\n\n >>> df.index\n RangeIndex(start=0, stop=3, step=1)\n >>> df.rename(index=str).index\n Index(['0', '1', '2'], dtype='object')\n\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"b\", \"C\": \"c\"}, errors=\"raise\")\n Traceback (most recent call last):\n KeyError: ['C'] not found in axis\n\n Using axis-style parameters:\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n \"\"\"\n return super().rename(\n mapper=mapper,\n index=index,\n columns=columns,\n axis=axis,\n copy=copy,\n inplace=inplace,\n level=level,\n errors=errors,\n )\n\n @doc(NDFrame.fillna, **_shared_doc_kwargs)\n def fillna(\n self,\n value=None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit=None,\n downcast=None,\n ) -> Optional[DataFrame]:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Label) -> Series:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n return super().pop(item=item)\n\n @doc(NDFrame.replace, **_shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace: bool = False,\n limit=None,\n regex: bool = False,\n method: str = \"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n def _replace_columnwise(\n self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex\n ):\n \"\"\"\n Dispatch to Series.replace column-wise.\n\n\n Parameters\n ----------\n mapping : dict\n of the form {col: (target, value)}\n inplace : bool\n regex : bool or same types as `to_replace` in DataFrame.replace\n\n Returns\n -------\n DataFrame or None\n \"\"\"\n # Operate column-wise\n res = self if inplace else self.copy()\n ax = self.columns\n\n for i in range(len(ax)):\n if ax[i] in mapping:\n ser = self.iloc[:, i]\n\n target, value = mapping[ax[i]]\n newobj = ser.replace(target, value, regex=regex)\n\n res.iloc[:, i] = newobj\n\n if inplace:\n return\n return res.__finalize__(self)\n\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"])\n def shift(\n self, periods=1, freq=None, axis: Axis = 0, fill_value=lib.no_default\n ) -> DataFrame:\n axis = self._get_axis_number(axis)\n\n ncols = len(self.columns)\n if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:\n # We will infer fill_value to match the closest column\n\n # Use a column that we know is valid for our column's dtype GH#38434\n label = self.columns[0]\n\n if periods > 0:\n result = self.iloc[:, :-periods]\n for col in range(min(ncols, abs(periods))):\n # TODO(EA2D): doing this in a loop unnecessary with 2D EAs\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, 0].shift(len(self))\n result.insert(0, label, filler, allow_duplicates=True)\n else:\n result = self.iloc[:, -periods:]\n for col in range(min(ncols, abs(periods))):\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, -1].shift(len(self))\n result.insert(\n len(result.columns), label, filler, allow_duplicates=True\n )\n\n result.columns = self.columns.copy()\n return result\n\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def set_index(\n self,\n keys,\n drop: bool = True,\n append: bool = False,\n inplace: bool = False,\n verify_integrity: bool = False,\n ):\n \"\"\"\n Set the DataFrame index using existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and\n instances of :class:`~collections.abc.Iterator`.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n If True, modifies the DataFrame in place (do not create a new object).\n verify_integrity : bool, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method.\n\n Returns\n -------\n DataFrame or None\n Changed row labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]})\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month')\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month'])\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n\n Create a MultiIndex using an Index and a column:\n\n >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])\n month sale\n year\n 1 2012 1 55\n 2 2014 4 40\n 3 2013 7 84\n 4 2014 10 31\n\n Create a MultiIndex using two Series:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> df.set_index([s, s**2])\n month year sale\n 1 1 1 2012 55\n 2 4 4 2014 40\n 3 9 7 2013 84\n 4 16 10 2014 31\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if not isinstance(keys, list):\n keys = [keys]\n\n err_msg = (\n 'The parameter \"keys\" may be a column key, one-dimensional '\n \"array, or a list containing only valid column keys and \"\n \"one-dimensional arrays.\"\n )\n\n missing: List[Label] = []\n for col in keys:\n if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):\n # arrays are fine as long as they are one-dimensional\n # iterators get converted to list below\n if getattr(col, \"ndim\", 1) != 1:\n raise ValueError(err_msg)\n else:\n # everything else gets tried as a key; see GH 24969\n try:\n found = col in self.columns\n except TypeError as err:\n raise TypeError(\n f\"{err_msg}. Received column of type {type(col)}\"\n ) from err\n else:\n if not found:\n missing.append(col)\n\n if missing:\n raise KeyError(f\"None of {missing} are in the columns\")\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names: List[Label] = []\n if append:\n names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index._get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove: List[Label] = []\n for col in keys:\n if isinstance(col, MultiIndex):\n for n in range(col.nlevels):\n arrays.append(col._get_level_values(n))\n names.extend(col.names)\n elif isinstance(col, (Index, Series)):\n # if Index then not MultiIndex (treated above)\n arrays.append(col)\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray)):\n arrays.append(col)\n names.append(None)\n elif isinstance(col, abc.Iterator):\n arrays.append(list(col))\n names.append(None)\n # from here, col can only be a column label\n else:\n arrays.append(frame[col]._values)\n names.append(col)\n if drop:\n to_remove.append(col)\n\n if len(arrays[-1]) != len(self):\n # check newest element against length of calling frame, since\n # ensure_index_from_sequences would not raise for append=False.\n raise ValueError(\n f\"Length mismatch: Expected {len(self)} rows, \"\n f\"received array of length {len(arrays[-1])}\"\n )\n\n index = ensure_index_from_sequences(arrays, names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index[index.duplicated()].unique()\n raise ValueError(f\"Index has duplicate keys: {duplicates}\")\n\n # use set to handle duplicate column names gracefully in case of drop\n for c in set(to_remove):\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n @overload\n # https://github.com/python/mypy/issues/6580\n # Overloaded function signatures 1 and 2 overlap with incompatible return types\n def reset_index( # type: ignore[misc]\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[False] = ...,\n col_level: Hashable = ...,\n col_fill: Label = ...,\n ) -> DataFrame:\n ...\n\n @overload\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[True] = ...,\n col_level: Hashable = ...,\n col_fill: Label = ...,\n ) -> None:\n ...\n\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n drop: bool = False,\n inplace: bool = False,\n col_level: Hashable = 0,\n col_fill: Label = \"\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Reset the index, or a level of it.\n\n Reset the index of the DataFrame, and use the default one instead.\n If the DataFrame has a MultiIndex, this method can remove one or more\n levels.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the new index or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column, and a\n new sequential index is used:\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = pd.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class')\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1)\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='species')\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='genus')\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n new_index = ibase.default_index(len(new_obj))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if not drop:\n to_insert: Iterable[Tuple[Any, Optional[Any]]]\n if isinstance(self.index, MultiIndex):\n names = [\n (n if n is not None else f\"level_{i}\")\n for i, n in enumerate(self.index.names)\n ]\n to_insert = zip(self.index.levels, self.index.codes)\n else:\n default = \"index\" if \"index\" not in self else \"level_0\"\n names = [default] if self.index.name is None else [self.index.name]\n to_insert = ((self.index, None),)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(to_insert))):\n if not (level is None or i in level):\n continue\n name = names[i]\n if multi_col:\n col_name = list(name) if isinstance(name, tuple) else [name]\n if col_fill is None:\n if len(col_name) not in (1, self.columns.nlevels):\n raise ValueError(\n \"col_fill=None is incompatible \"\n f\"with incomplete column name {name}\"\n )\n col_fill = col_name[0]\n\n lev_num = self.columns._get_level_number(col_level)\n name_lst = [col_fill] * lev_num + col_name\n missing = self.columns.nlevels - len(name_lst)\n name_lst += [col_fill] * missing\n name = tuple(name_lst)\n\n # to ndarray and maybe infer different dtype\n level_values = lev._values\n if level_values.dtype == np.object_:\n level_values = lib.maybe_convert_objects(level_values)\n\n if lab is not None:\n # if we have the codes, extract the values with a mask\n level_values = algorithms.take(\n level_values, lab, allow_fill=True, fill_value=lev._na_value\n )\n\n new_obj.insert(0, name, level_values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n return None\n\n # ----------------------------------------------------------------------\n # Reindex-based selection methods\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isna(self) -> DataFrame:\n result = self._constructor(self._mgr.isna(func=isna))\n return result.__finalize__(self, method=\"isna\")\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self) -> DataFrame:\n return self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notna(self) -> DataFrame:\n return ~self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self) -> DataFrame:\n return ~self.isna()\n\n def dropna(\n self,\n axis: Axis = 0,\n how: str = \"any\",\n thresh=None,\n subset=None,\n inplace: bool = False,\n ):\n \"\"\"\n Remove missing values.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n * 1, or 'columns' : Drop columns which contain missing value.\n\n .. versionchanged:: 1.0.0\n\n Pass tuple or list to drop on multiple axes.\n Only a single axis is allowed.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.isna: Indicate missing values.\n DataFrame.notna : Indicate existing (non-missing) values.\n DataFrame.fillna : Replace missing values.\n Series.dropna : Drop missing values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.nan, 'Batmobile', 'Bullwhip'],\n ... \"born\": [pd.NaT, pd.Timestamp(\"1940-04-25\"),\n ... pd.NaT]})\n >>> df\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if isinstance(axis, (tuple, list)):\n # GH20987\n raise TypeError(\"supplying multiple axes to axis is no longer supported.\")\n\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n agg_obj = self.take(indices, axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == \"all\":\n mask = count > 0\n else:\n if how is not None:\n raise ValueError(f\"invalid how option: {how}\")\n else:\n raise TypeError(\"must specify how or thresh\")\n\n result = self.loc(axis=axis)[mask]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def drop_duplicates(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n inplace: bool = False,\n ignore_index: bool = False,\n ) -> Optional[DataFrame]:\n \"\"\"\n Return DataFrame with duplicate rows removed.\n\n Considering certain columns is optional. Indexes, including time indexes\n are ignored.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : bool, default False\n Whether to drop duplicates in place or to return a copy.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, โ€ฆ, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n DataFrame or None\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.value_counts: Count unique combinations of columns.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, it removes duplicate rows based on all columns.\n\n >>> df.drop_duplicates()\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n To remove duplicates on specific column(s), use ``subset``.\n\n >>> df.drop_duplicates(subset=['brand'])\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n\n To remove duplicates and keep last occurrences, use ``keep``.\n\n >>> df.drop_duplicates(subset=['brand', 'style'], keep='last')\n brand style rating\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 4 Indomie pack 5.0\n \"\"\"\n if self.empty:\n return self.copy()\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ignore_index = validate_bool_kwarg(ignore_index, \"ignore_index\")\n duplicated = self.duplicated(subset, keep=keep)\n\n result = self[-duplicated]\n if ignore_index:\n result.index = ibase.default_index(len(result))\n\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n ) -> Series:\n \"\"\"\n Return boolean Series denoting duplicate rows.\n\n Considering certain columns is optional.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to mark.\n\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series\n Boolean series for each duplicated rows.\n\n See Also\n --------\n Index.duplicated : Equivalent method on index.\n Series.duplicated : Equivalent method on Series.\n Series.drop_duplicates : Remove duplicate values from Series.\n DataFrame.drop_duplicates : Remove duplicate values from DataFrame.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, for each set of duplicated values, the first occurrence\n is set on False and all others on True.\n\n >>> df.duplicated()\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True.\n\n >>> df.duplicated(keep='last')\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By setting ``keep`` on False, all duplicates are True.\n\n >>> df.duplicated(keep=False)\n 0 True\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n To find duplicates on specific column(s), use ``subset``.\n\n >>> df.duplicated(subset=['brand'])\n 0 False\n 1 True\n 2 False\n 3 True\n 4 True\n dtype: bool\n \"\"\"\n from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64\n\n if self.empty:\n return self._constructor_sliced(dtype=bool)\n\n def f(vals):\n labels, shape = algorithms.factorize(\n vals, size_hint=min(len(self), SIZE_HINT_LIMIT)\n )\n return labels.astype(\"i8\", copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif (\n not np.iterable(subset)\n or isinstance(subset, str)\n or isinstance(subset, tuple)\n and subset in self.columns\n ):\n subset = (subset,)\n\n # needed for mypy since can't narrow types using np.iterable\n subset = cast(Iterable, subset)\n\n # Verify all columns in subset exist in the queried dataframe\n # Otherwise, raise a KeyError, same as if you try to __getitem__ with a\n # key that doesn't exist.\n diff = Index(subset).difference(self.columns)\n if not diff.empty:\n raise KeyError(diff)\n\n vals = (col.values for name, col in self.items() if name in subset)\n labels, shape = map(list, zip(*map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n # ----------------------------------------------------------------------\n # Sorting\n # TODO: Just move the sort_values doc here.\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.sort_values.__doc__)\n # error: Signature of \"sort_values\" incompatible with supertype \"NDFrame\"\n def sort_values( # type: ignore[override]\n self,\n by,\n axis: Axis = 0,\n ascending=True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n key: ValueKeyFunc = None,\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = self._get_axis_number(axis)\n\n if not isinstance(by, list):\n by = [by]\n if is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) != length of by ({len(by)})\"\n )\n if len(by) > 1:\n\n keys = [self._get_label_or_level_values(x, axis=axis) for x in by]\n\n # need to rewrap columns in Series to apply key function\n if key is not None:\n keys = [Series(k, name=name) for (k, name) in zip(keys, by)]\n\n indexer = lexsort_indexer(\n keys, orders=ascending, na_position=na_position, key=key\n )\n indexer = ensure_platform_int(indexer)\n else:\n\n by = by[0]\n k = self._get_label_or_level_values(by, axis=axis)\n\n # need to rewrap column in Series to apply key function\n if key is not None:\n k = Series(k, name=by)\n\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = nargsort(\n k, kind=kind, ascending=ascending, na_position=na_position, key=key\n )\n\n new_data = self._mgr.take(\n indexer, axis=self._get_block_manager_axis(axis), verify=False\n )\n\n if ignore_index:\n new_data.axes[1] = ibase.default_index(len(indexer))\n\n result = self._constructor(new_data)\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n def sort_index(\n self,\n axis: Axis = 0,\n level: Optional[Level] = None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort object by labels (along an axis).\n\n Returns a new DataFrame sorted by label if `inplace` argument is\n ``False``, otherwise updates the original DataFrame and returns None.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool or list of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. `mergesort` and `stable` are the only stable algorithms. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, โ€ฆ, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape. For MultiIndex\n inputs, the key is applied *per level*.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame or None\n The original DataFrame sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort Series by the index.\n DataFrame.sort_values : Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],\n ... columns=['A'])\n >>> df.sort_index()\n A\n 1 4\n 29 2\n 100 1\n 150 5\n 234 3\n\n By default, it sorts in ascending order, to sort in descending order,\n use ``ascending=False``\n\n >>> df.sort_index(ascending=False)\n A\n 234 3\n 150 5\n 100 1\n 29 2\n 1 4\n\n A key function can be specified which is applied to the index before\n sorting. For a ``MultiIndex`` this is applied to each level separately.\n\n >>> df = pd.DataFrame({\"a\": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])\n >>> df.sort_index(key=lambda x: x.str.lower())\n a\n A 1\n b 2\n C 3\n d 4\n \"\"\"\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def value_counts(\n self,\n subset: Optional[Sequence[Label]] = None,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n ):\n \"\"\"\n Return a Series containing counts of unique rows in the DataFrame.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n subset : list-like, optional\n Columns to use when counting unique combinations.\n normalize : bool, default False\n Return proportions rather than frequencies.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.value_counts: Equivalent method on Series.\n\n Notes\n -----\n The returned Series will have a MultiIndex with one level per input\n column. By default, rows that contain any NA values are omitted from\n the result. By default, the resulting Series will be in descending\n order so that the first element is the most frequently-occurring row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],\n ... 'num_wings': [2, 0, 0, 0]},\n ... index=['falcon', 'dog', 'cat', 'ant'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n cat 4 0\n ant 6 0\n\n >>> df.value_counts()\n num_legs num_wings\n 4 0 2\n 2 2 1\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(sort=False)\n num_legs num_wings\n 2 2 1\n 4 0 2\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(ascending=True)\n num_legs num_wings\n 2 2 1\n 6 0 1\n 4 0 2\n dtype: int64\n\n >>> df.value_counts(normalize=True)\n num_legs num_wings\n 4 0 0.50\n 2 2 0.25\n 6 0 0.25\n dtype: float64\n \"\"\"\n if subset is None:\n subset = self.columns.tolist()\n\n counts = self.groupby(subset).grouper.size()\n\n if sort:\n counts = counts.sort_values(ascending=ascending)\n if normalize:\n counts /= counts.sum()\n\n # Force MultiIndex for single column\n if len(subset) == 1:\n counts.index = MultiIndex.from_arrays(\n [counts.index], names=[counts.index.name]\n )\n\n return counts\n\n def nlargest(self, n, columns, keep: str = \"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - `first` : prioritize the first occurrence(s)\n - `last` : prioritize the last occurrence(s)\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 11300,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 11300 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"population\".\n\n >>> df.nlargest(3, 'population')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nlargest(3, 'population', keep='last')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nlargest(3, 'population', keep='all')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n\n To order by the largest values in column \"population\" and then \"GDP\",\n we can specify multiple columns like in the next example.\n\n >>> df.nlargest(3, ['population', 'GDP'])\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n \"\"\"\n return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()\n\n def nsmallest(self, n, columns, keep: str = \"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=True).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - ``first`` : take the first occurrence.\n - ``last`` : take the last occurrence.\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 337000,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"population\".\n\n >>> df.nsmallest(3, 'population')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nsmallest(3, 'population', keep='last')\n population GDP alpha-2\n Anguilla 11300 311 AI\n Tuvalu 11300 38 TV\n Nauru 337000 182 NR\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nsmallest(3, 'population', keep='all')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n\n To order by the smallest values in column \"population\" and then \"GDP\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(3, ['population', 'GDP'])\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Nauru 337000 182 NR\n \"\"\"\n return algorithms.SelectNFrame(\n self, n=n, keep=keep, columns=columns\n ).nsmallest()\n\n def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n\n if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only swap levels on a hierarchical axis.\")\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.swaplevel(i, j)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:\n \"\"\"\n Rearrange index levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Where to reorder levels.\n\n Returns\n -------\n DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.reorder_levels(order)\n return result\n\n # ----------------------------------------------------------------------\n # Arithmetic Methods\n\n def _cmp_method(self, other, op):\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)\n\n # See GH#4537 for discussion of scalar op behavior\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n def _arith_method(self, other, op):\n if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):\n return ops.frame_arith_method_with_reindex(self, other, op)\n\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)\n\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n _logical_method = _arith_method\n\n def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):\n \"\"\"\n Evaluate the frame operation func(left, right) by evaluating\n column-by-column, dispatching to the Series implementation.\n\n Parameters\n ----------\n right : scalar, Series, or DataFrame\n func : arithmetic or comparison operator\n axis : {None, 0, 1}\n\n Returns\n -------\n DataFrame\n \"\"\"\n # Get the appropriate array-op to apply to each column/block's values.\n array_op = ops.get_array_op(func)\n\n right = lib.item_from_zerodim(right)\n if not is_list_like(right):\n # i.e. scalar, faster than checking np.ndim(right) == 0\n bm = self._mgr.apply(array_op, right=right)\n return type(self)(bm)\n\n elif isinstance(right, DataFrame):\n assert self.index.equals(right.index)\n assert self.columns.equals(right.columns)\n # TODO: The previous assertion `assert right._indexed_same(self)`\n # fails in cases with empty columns reached via\n # _frame_arith_method_with_reindex\n\n bm = self._mgr.operate_blockwise(right._mgr, array_op)\n return type(self)(bm)\n\n elif isinstance(right, Series) and axis == 1:\n # axis=1 means we want to operate row-by-row\n assert right.index.equals(self.columns)\n\n right = right._values\n # maybe_align_as_frame ensures we do not have an ndarray here\n assert not isinstance(right, np.ndarray)\n\n arrays = [\n array_op(_left, _right)\n for _left, _right in zip(self._iter_column_arrays(), right)\n ]\n\n elif isinstance(right, Series):\n assert right.index.equals(self.index) # Handle other cases later\n right = right._values\n\n arrays = [array_op(left, right) for left in self._iter_column_arrays()]\n\n else:\n # Remaining cases have less-obvious dispatch rules\n raise NotImplementedError(right)\n\n return type(self)._from_arrays(\n arrays, self.columns, self.index, verify_integrity=False\n )\n\n def _combine_frame(self, other: DataFrame, func, fill_value=None):\n # at this point we have `self._indexed_same(other)`\n\n if fill_value is None:\n # since _arith_op may be called in a loop, avoid function call\n # overhead if possible by doing this check once\n _arith_op = func\n\n else:\n\n def _arith_op(left, right):\n # for the mixed_type case where we iterate over columns,\n # _arith_op(left, right) is equivalent to\n # left._binop(right, func, fill_value=fill_value)\n left, right = ops.fill_binop(left, right, fill_value)\n return func(left, right)\n\n new_data = self._dispatch_frame_op(other, _arith_op)\n return new_data\n\n def _construct_result(self, result) -> DataFrame:\n \"\"\"\n Wrap the result of an arithmetic, comparison, or logical operation.\n\n Parameters\n ----------\n result : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n out = self._constructor(result, copy=False)\n # Pin columns instead of passing to constructor for compat with\n # non-unique columns case\n out.columns = self.columns\n out.index = self.index\n return out\n\n def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = self // other\n mod = self - div * other\n return div, mod\n\n def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = other // self\n mod = other - div * self\n return div, mod\n\n # ----------------------------------------------------------------------\n # Combination-Related\n\n @doc(\n _shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nDataFrame\n DataFrame that shows the differences stacked side by side.\n\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\nRaises\n------\nValueError\n When the two DataFrames don't have identical labels or shape.\n\nSee Also\n--------\nSeries.compare : Compare with another Series and show differences.\nDataFrame.equals : Test whether two objects contain the same elements.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nCan only compare identically-labeled\n(i.e. same shape, identical row and column labels) DataFrames\n\nExamples\n--------\n>>> df = pd.DataFrame(\n... {{\n... \"col1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n... \"col2\": [1.0, 2.0, 3.0, np.nan, 5.0],\n... \"col3\": [1.0, 2.0, 3.0, 4.0, 5.0]\n... }},\n... columns=[\"col1\", \"col2\", \"col3\"],\n... )\n>>> df\n col1 col2 col3\n0 a 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 3.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\n>>> df2 = df.copy()\n>>> df2.loc[0, 'col1'] = 'c'\n>>> df2.loc[2, 'col3'] = 4.0\n>>> df2\n col1 col2 col3\n0 c 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 4.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\nAlign the differences on columns\n\n>>> df.compare(df2)\n col1 col3\n self other self other\n0 a c NaN NaN\n2 NaN NaN 3.0 4.0\n\nStack the differences on rows\n\n>>> df.compare(df2, align_axis=0)\n col1 col3\n0 self a NaN\n other c NaN\n2 self NaN 3.0\n other NaN 4.0\n\nKeep the equal values\n\n>>> df.compare(df2, keep_equal=True)\n col1 col3\n self other self other\n0 a c 1.0 1.0\n2 b b 3.0 4.0\n\nKeep all original rows and columns\n\n>>> df.compare(df2, keep_shape=True)\n col1 col2 col3\n self other self other self other\n0 a c NaN NaN NaN NaN\n1 NaN NaN NaN NaN NaN NaN\n2 NaN NaN NaN NaN 3.0 4.0\n3 NaN NaN NaN NaN NaN NaN\n4 NaN NaN NaN NaN NaN NaN\n\nKeep all original rows and columns and also all original values\n\n>>> df.compare(df2, keep_shape=True, keep_equal=True)\n col1 col2 col3\n self other self other self other\n0 a c 1.0 1.0 1.0 1.0\n1 a a 2.0 2.0 2.0 2.0\n2 b b 3.0 3.0 3.0 4.0\n3 b b NaN NaN 4.0 4.0\n4 a a 5.0 5.0 5.0 5.0\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: DataFrame,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> DataFrame:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(\n self, other: DataFrame, func, fill_value=None, overwrite: bool = True\n ) -> DataFrame:\n \"\"\"\n Perform column-wise combine with another DataFrame.\n\n Combines a DataFrame with `other` DataFrame using `func`\n to element-wise combine columns. The row and column indexes of the\n resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n The DataFrame to merge column-wise.\n func : function\n Function that takes two series as inputs and return a Series or a\n scalar. Used to merge the two dataframes column by columns.\n fill_value : scalar value, default None\n The value to fill NaNs with prior to passing any column to the\n merge func.\n overwrite : bool, default True\n If True, columns in `self` that do not exist in `other` will be\n overwritten with NaNs.\n\n Returns\n -------\n DataFrame\n Combination of the provided DataFrames.\n\n See Also\n --------\n DataFrame.combine_first : Combine two DataFrame objects and default to\n non-null values in frame calling the method.\n\n Examples\n --------\n Combine using a simple function that chooses the smaller column.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2\n >>> df1.combine(df2, take_smaller)\n A B\n 0 0 3\n 1 0 3\n\n Example using a true element-wise combine function.\n\n >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, np.minimum)\n A B\n 0 1 2\n 1 0 3\n\n Using `fill_value` fills Nones prior to passing the column to the\n merge function.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 4.0\n\n However, if the same element in both dataframes is None, that None\n is preserved\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 3.0\n\n Example that demonstrates the use of `overwrite` and behavior when\n the axis differ between the dataframes.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])\n >>> df1.combine(df2, take_smaller)\n A B C\n 0 NaN NaN NaN\n 1 NaN 3.0 -10.0\n 2 NaN 3.0 1.0\n\n >>> df1.combine(df2, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 -10.0\n 2 NaN 3.0 1.0\n\n Demonstrating the preference of the passed in dataframe.\n\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])\n >>> df2.combine(df1, take_smaller)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 NaN\n 2 NaN 3.0 NaN\n\n >>> df2.combine(df1, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isna(series)\n other_mask = isna(otherSeries)\n\n # don't overwrite columns unnecessarily\n # DO propagate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n if col not in self.columns:\n # If self DataFrame does not have col in other DataFrame,\n # try to promote series, which is all NaN, as other_dtype.\n new_dtype = other_dtype\n try:\n series = series.astype(new_dtype, copy=False)\n except ValueError:\n # e.g. new_dtype is integer types\n pass\n else:\n # if we have different dtypes, possibly promote\n new_dtype = find_common_type([this_dtype, other_dtype])\n if not is_dtype_equal(this_dtype, new_dtype):\n series = series.astype(new_dtype)\n if not is_dtype_equal(other_dtype, new_dtype):\n otherSeries = otherSeries.astype(new_dtype)\n\n arr = func(series, otherSeries)\n arr = maybe_downcast_to_dtype(arr, new_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result, index=new_index, columns=new_columns)\n\n def combine_first(self, other: DataFrame) -> DataFrame:\n \"\"\"\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.combine : Perform series-wise operation on two DataFrames\n using a given function.\n\n Examples\n --------\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine_first(df2)\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value\n does not exist in `other`\n\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n >>> df1.combine_first(df2)\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n def combiner(x, y):\n mask = extract_array(isna(x))\n\n x_values = extract_array(x, extract_numpy=True)\n y_values = extract_array(y, extract_numpy=True)\n\n # If the column y in other DataFrame is not in first DataFrame,\n # just return y_values.\n if y.name not in self.columns:\n return y_values\n\n return expressions.where(mask, y_values, x_values)\n\n return self.combine(other, combiner, overwrite=False)\n\n def update(\n self,\n other,\n join: str = \"left\",\n overwrite: bool = True,\n filter_func=None,\n errors: str = \"ignore\",\n ) -> None:\n \"\"\"\n Modify in place using non-NA values from another DataFrame.\n\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n Should have at least one matching index/column label\n with the original DataFrame. If a Series is passed,\n its name attribute must be set, and that will be\n used as the column name to align with the original DataFrame.\n join : {'left'}, default 'left'\n Only left join is implemented, keeping the index and columns of the\n original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values\n with values from `other`.\n * False: only update values that are NA in\n the original DataFrame.\n\n filter_func : callable(1d-array) -> bool 1d-array, optional\n Can choose to replace values other than NA. Return True for values\n that should be updated.\n errors : {'raise', 'ignore'}, default 'ignore'\n If 'raise', will raise a ValueError if the DataFrame and `other`\n both contain non-NA data in the same place.\n\n .. versionchanged:: 0.24.0\n Changed from `raise_conflict=False|True`\n to `errors='ignore'|'raise'`.\n\n Returns\n -------\n None : method directly changes calling object\n\n Raises\n ------\n ValueError\n * When `errors='raise'` and there's overlapping non-NA data.\n * When `errors` is not either `'ignore'` or `'raise'`\n NotImplementedError\n * If `join != 'left'`\n\n See Also\n --------\n dict.update : Similar method for dictionaries.\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, 5, 6],\n ... 'C': [7, 8, 9]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})\n >>> df.update(new_df)\n >>> df\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, its name attribute must be set.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df\n A B\n 0 a d\n 1 b y\n 2 c e\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])\n >>> df.update(new_df)\n >>> df\n A B\n 0 a x\n 1 b d\n 2 c e\n\n If `other` contains NaNs the corresponding values are not updated\n in the original dataframe.\n\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n # TODO: Support other joins\n if join != \"left\": # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n if errors not in [\"ignore\", \"raise\"]:\n raise ValueError(\"The parameter errors must be either 'ignore' or 'raise'\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col]._values\n that = other[col]._values\n if filter_func is not None:\n with np.errstate(all=\"ignore\"):\n mask = ~filter_func(this) | isna(that)\n else:\n if errors == \"raise\":\n mask_this = notna(that)\n mask_that = notna(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isna(that)\n else:\n mask = notna(this)\n\n # don't overwrite columns unnecessarily\n if mask.all():\n continue\n\n self[col] = expressions.where(mask, this, that)\n\n # ----------------------------------------------------------------------\n # Data reshaping\n @Appender(\n \"\"\"\nExamples\n--------\n>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',\n... 'Parrot', 'Parrot'],\n... 'Max Speed': [380., 370., 24., 26.]})\n>>> df\n Animal Max Speed\n0 Falcon 380.0\n1 Falcon 370.0\n2 Parrot 24.0\n3 Parrot 26.0\n>>> df.groupby(['Animal']).mean()\n Max Speed\nAnimal\nFalcon 375.0\nParrot 25.0\n\n**Hierarchical Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},\n... index=index)\n>>> df\n Max Speed\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\n>>> df.groupby(level=0).mean()\n Max Speed\nAnimal\nFalcon 370.0\nParrot 25.0\n>>> df.groupby(level=\"Type\").mean()\n Max Speed\nType\nCaptive 210.0\nWild 185.0\n\nWe can also choose to include NA in group keys or not by setting\n`dropna` parameter, the default setting is `True`:\n\n>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=[\"b\"]).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\n\n>>> df.groupby(by=[\"b\"], dropna=False).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\nNaN 1 4\n\n>>> l = [[\"a\", 12, 12], [None, 12.3, 33.], [\"b\", 12.3, 123], [\"a\", 1, 1]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=\"a\").sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\n\n>>> df.groupby(by=\"a\", dropna=False).sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\nNaN 12.3 33.0\n\"\"\"\n )\n @Appender(_shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis: Axis = 0,\n level: Optional[Level] = None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> DataFrameGroupBy:\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return DataFrameGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n dropna=dropna,\n )\n\n _shared_docs[\n \"pivot\"\n ] = \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation, multiple values will result in a MultiIndex in the\n columns. See the :ref:`User Guide <reshaping>` for more on reshaping.\n\n Parameters\n ----------%s\n index : str or object or a list of str, optional\n Column to use to make new frame's index. If None, uses\n existing index.\n\n .. versionchanged:: 1.1.0\n Also accept list of index names.\n\n columns : str or object or a list of str\n Column to use to make new frame's columns.\n\n .. versionchanged:: 1.1.0\n Also accept list of columns names.\n\n values : str, object or a list of the previous, optional\n Column(s) to use for populating new frame's values. If not\n specified, all remaining columns will be used and the result will\n have hierarchically indexed columns.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n Raises\n ------\n ValueError:\n When there are any `index`, `columns` combinations with multiple\n values. `DataFrame.pivot_table` when you need to aggregate.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods.\n\n Examples\n --------\n >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',\n ... 'two'],\n ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n ... 'baz': [1, 2, 3, 4, 5, 6],\n ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar')['baz']\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\n baz zoo\n bar A B C A B C\n foo\n one 1 2 3 x y z\n two 4 5 6 q w t\n\n You could also assign a list of column names or a list of index names.\n\n >>> df = pd.DataFrame({\n ... \"lev1\": [1, 1, 1, 2, 2, 2],\n ... \"lev2\": [1, 1, 2, 1, 1, 2],\n ... \"lev3\": [1, 2, 1, 2, 1, 2],\n ... \"lev4\": [1, 2, 3, 4, 5, 6],\n ... \"values\": [0, 1, 2, 3, 4, 5]})\n >>> df\n lev1 lev2 lev3 lev4 values\n 0 1 1 1 1 0\n 1 1 1 2 2 1\n 2 1 2 1 3 2\n 3 2 1 2 4 3\n 4 2 1 1 5 4\n 5 2 2 2 6 5\n\n >>> df.pivot(index=\"lev1\", columns=[\"lev2\", \"lev3\"],values=\"values\")\n lev2 1 2\n lev3 1 2 1 2\n lev1\n 1 0.0 1.0 2.0 NaN\n 2 4.0 3.0 NaN 5.0\n\n >>> df.pivot(index=[\"lev1\", \"lev2\"], columns=[\"lev3\"],values=\"values\")\n lev3 1 2\n lev1 lev2\n 1 1 0.0 1.0\n 2 2.0 NaN\n 2 1 4.0 3.0\n 2 NaN 5.0\n\n A ValueError is raised if there are any duplicates.\n\n >>> df = pd.DataFrame({\"foo\": ['one', 'one', 'two', 'two'],\n ... \"bar\": ['A', 'A', 'B', 'C'],\n ... \"baz\": [1, 2, 3, 4]})\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n Notice that the first two rows are the same for our `index`\n and `columns` arguments.\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n Traceback (most recent call last):\n ...\n ValueError: Index contains duplicate entries, cannot reshape\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot\"])\n def pivot(self, index=None, columns=None, values=None) -> DataFrame:\n from pandas.core.reshape.pivot import pivot\n\n return pivot(self, index=index, columns=columns, values=values)\n\n _shared_docs[\n \"pivot_table\"\n ] = \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame.\n\n The levels in the pivot table will be stored in MultiIndex objects\n (hierarchical indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------%s\n values : column to aggregate, optional\n index : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table index. If an array is passed,\n it is being used as the same manner as column values.\n columns : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table column. If an array is passed,\n it is being used as the same manner as column values.\n aggfunc : function, list of functions, dict, default numpy.mean\n If list of functions passed, the resulting pivot table will have\n hierarchical columns whose top level are the function names\n (inferred from the function objects themselves)\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with (in the resulting pivot table,\n after aggregation).\n margins : bool, default False\n Add all row / columns (e.g. for subtotal / grand totals).\n dropna : bool, default True\n Do not include columns whose entries are all NaN.\n margins_name : str, default 'All'\n Name of the row / column that will contain the totals\n when margins is True.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionchanged:: 0.25.0\n\n Returns\n -------\n DataFrame\n An Excel style pivot table.\n\n See Also\n --------\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.melt: Unpivot a DataFrame from wide to long format,\n optionally leaving identifiers set.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\",\n ... \"bar\", \"bar\", \"bar\", \"bar\"],\n ... \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\",\n ... \"one\", \"one\", \"two\", \"two\"],\n ... \"C\": [\"small\", \"large\", \"large\", \"small\",\n ... \"small\", \"large\", \"small\", \"small\",\n ... \"large\"],\n ... \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum)\n >>> table\n C large small\n A B\n bar one 4.0 5.0\n two 7.0 6.0\n foo one 4.0 1.0\n two NaN 6.0\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum, fill_value=0)\n >>> table\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n The next example aggregates by taking the mean across multiple columns.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': np.mean})\n >>> table\n D E\n A C\n bar large 5.500000 7.500000\n small 5.500000 8.500000\n foo large 2.000000 4.500000\n small 2.333333 4.333333\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': [min, max, np.mean]})\n >>> table\n D E\n mean max mean min\n A C\n bar large 5.500000 9.0 7.500000 6.0\n small 5.500000 9.0 8.500000 8.0\n foo large 2.000000 5.0 4.500000 4.0\n small 2.333333 6.0 4.333333 2.0\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot_table\"])\n def pivot_table(\n self,\n values=None,\n index=None,\n columns=None,\n aggfunc=\"mean\",\n fill_value=None,\n margins=False,\n dropna=True,\n margins_name=\"All\",\n observed=False,\n ) -> DataFrame:\n from pandas.core.reshape.pivot import pivot_table\n\n return pivot_table(\n self,\n values=values,\n index=index,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n )\n\n def stack(self, level: Level = -1, dropna: bool = True):\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n Parameters\n ----------\n level : int, str, list, default -1\n Level(s) to stack from the column axis onto the index\n axis, defined as one index or label, or a list of indices\n or labels.\n dropna : bool, default True\n Whether to drop rows in the resulting Frame/Series with\n missing values. Stacking a column level onto the index\n axis can create combinations of index and column values\n that are missing from the original dataframe. See Examples\n section.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack()\n cat weight 0\n height 1\n dog weight 2\n height 3\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack()\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n\n **Prescribing the level(s) to be stacked**\n\n The first parameter controls which level or levels are stacked:\n\n >>> df_multi_level_cols2.stack(0)\n kg m\n cat height NaN 2.0\n weight 1.0 NaN\n dog height NaN 4.0\n weight 3.0 NaN\n >>> df_multi_level_cols2.stack([0, 1])\n cat height m 2.0\n weight kg 1.0\n dog height m 4.0\n weight kg 3.0\n dtype: float64\n\n **Dropping missing values**\n\n >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n Note that rows where all values are missing are dropped by\n default but this behaviour can be controlled via the dropna\n keyword parameter:\n\n >>> df_multi_level_cols3\n weight height\n kg m\n cat NaN 1.0\n dog 2.0 3.0\n >>> df_multi_level_cols3.stack(dropna=False)\n height weight\n cat kg NaN NaN\n m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n >>> df_multi_level_cols3.stack(dropna=True)\n height weight\n cat m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n \"\"\"\n from pandas.core.reshape.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n result = stack_multiple(self, level, dropna=dropna)\n else:\n result = stack(self, level, dropna=dropna)\n\n return result.__finalize__(self, method=\"stack\")\n\n def explode(\n self, column: Union[str, Tuple], ignore_index: bool = False\n ) -> DataFrame:\n \"\"\"\n Transform each element of a list-like to a row, replicating index values.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, โ€ฆ, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n Raises\n ------\n ValueError :\n if columns of the frame are not unique.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n Series.explode : Explode a DataFrame from list-like columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of rows in the\n output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 foo 1\n 2 [] 1\n 3 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1 1\n 0 2 1\n 0 3 1\n 1 foo 1\n 2 NaN 1\n 3 3 1\n 3 4 1\n \"\"\"\n if not (is_scalar(column) or isinstance(column, tuple)):\n raise ValueError(\"column must be a scalar\")\n if not self.columns.is_unique:\n raise ValueError(\"columns must be unique\")\n\n df = self.reset_index(drop=True)\n result = df[column].explode()\n result = df.drop([column], axis=1).join(result)\n if ignore_index:\n result.index = ibase.default_index(len(result))\n else:\n result.index = self.index.take(result.index)\n result = result.reindex(columns=self.columns, copy=False)\n\n return result\n\n def unstack(self, level=-1, fill_value=None):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series\n (the analogue of stack when the columns are not a MultiIndex).\n\n Parameters\n ----------\n level : int, str, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name.\n fill_value : int, str or dict\n Replace NaN with this value if the unstack produces missing values.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1.0 2.0\n two 3.0 4.0\n\n >>> s.unstack(level=0)\n one two\n a 1.0 3.0\n b 2.0 4.0\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n result = unstack(self, level, fill_value)\n\n return result.__finalize__(self, method=\"unstack\")\n\n @Appender(_shared_docs[\"melt\"] % {\"caller\": \"df.melt(\", \"other\": \"melt\"})\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level: Optional[Level] = None,\n ignore_index=True,\n ) -> DataFrame:\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n ignore_index=ignore_index,\n )\n\n # ----------------------------------------------------------------------\n # Time series-related\n\n @doc(\n Series.diff,\n klass=\"Dataframe\",\n extra_params=\"axis : {0 or 'index', 1 or 'columns'}, default 0\\n \"\n \"Take difference over rows (0) or columns (1).\\n\",\n other_klass=\"Series\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]})\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(axis=1)\n a b c\n 0 NaN 0 0\n 1 NaN -1 3\n 2 NaN -1 7\n 3 NaN -1 13\n 4 NaN 0 20\n 5 NaN 2 28\n\n Difference with 3rd previous row\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n\n Overflow in input dtype\n\n >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)\n >>> df.diff()\n a\n 0 NaN\n 1 255.0\"\"\"\n ),\n )\n def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:\n if not isinstance(periods, int):\n if not (is_float(periods) and periods.is_integer()):\n raise ValueError(\"periods must be an integer\")\n periods = int(periods)\n\n bm_axis = self._get_block_manager_axis(axis)\n\n if bm_axis == 0 and periods != 0:\n return self - self.shift(periods, axis=axis)\n\n new_data = self._mgr.diff(n=periods, axis=bm_axis)\n return self._constructor(new_data).__finalize__(self, \"diff\")\n\n # ----------------------------------------------------------------------\n # Function application\n\n def _gotitem(\n self,\n key: Union[Label, List[Label]],\n ndim: int,\n subset: Optional[FrameOrSeriesUnion] = None,\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n if subset is None:\n subset = self\n elif subset.ndim == 1: # is Series\n return subset\n\n # TODO: _shallow_copy(subset)?\n return subset[key]\n\n _agg_summary_and_see_also_doc = dedent(\n \"\"\"\n The aggregation operations are always performed over an axis, either the\n index (default) or the column axis. This behavior is different from\n `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,\n `var`), where the default is to compute the aggregation of the flattened\n array, e.g., ``numpy.mean(arr_2d)`` as opposed to\n ``numpy.mean(arr_2d, axis=0)``.\n\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Perform any type of operations.\n DataFrame.transform : Perform transformation type operations.\n core.groupby.GroupBy : Perform operations over groups.\n core.resample.Resampler : Perform operations over resampled bins.\n core.window.Rolling : Perform operations over rolling window.\n core.window.Expanding : Perform operations over expanding window.\n core.window.ExponentialMovingWindow : Perform operation over exponential weighted\n window.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=['A', 'B', 'C'])\n\n Aggregate these functions over the rows.\n\n >>> df.agg(['sum', 'min'])\n A B C\n sum 12.0 15.0 18.0\n min 1.0 2.0 3.0\n\n Different aggregations per column.\n\n >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})\n A B\n sum 12.0 NaN\n min 1.0 2.0\n max NaN 8.0\n\n Aggregate different functions over the columns and rename the index of the resulting\n DataFrame.\n\n >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))\n A B C\n x 7.0 NaN NaN\n y NaN 2.0 NaN\n z NaN NaN 6.0\n\n Aggregate over the columns.\n\n >>> df.agg(\"mean\", axis=\"columns\")\n 0 2.0\n 1 5.0\n 2 8.0\n 3 NaN\n dtype: float64\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_summary_and_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):\n axis = self._get_axis_number(axis)\n\n relabeling, func, columns, order = reconstruct_func(func, **kwargs)\n\n result = None\n try:\n result, how = self._aggregate(func, axis, *args, **kwargs)\n except TypeError as err:\n exc = TypeError(\n \"DataFrame constructor called with \"\n f\"incompatible data and dtype: {err}\"\n )\n raise exc from err\n if result is None:\n return self.apply(func, axis=axis, args=args, **kwargs)\n\n if relabeling:\n # This is to keep the order to columns occurrence unchanged, and also\n # keep the order of new columns occurrence unchanged\n\n # For the return values of reconstruct_func, if relabeling is\n # False, columns and order will be None.\n assert columns is not None\n assert order is not None\n\n result_in_dict = relabel_result(result, func, columns, order)\n result = DataFrame(result_in_dict, index=columns)\n\n return result\n\n def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs):\n if axis == 1:\n # NDFrame.aggregate returns a tuple, and we need to transpose\n # only result\n result, how = aggregate(self.T, arg, *args, **kwargs)\n result = result.T if result is not None else result\n return result, how\n return aggregate(self, arg, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> DataFrame:\n result = transform(self, func, axis, *args, **kwargs)\n assert isinstance(result, DataFrame)\n return result\n\n def apply(\n self,\n func: AggFuncType,\n axis: Axis = 0,\n raw: bool = False,\n result_type=None,\n args=(),\n **kwds,\n ):\n \"\"\"\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame's index (``axis=0``) or the DataFrame's columns\n (``axis=1``). By default (``result_type=None``), the final return type\n is inferred from the return type of the applied function. Otherwise,\n it depends on the `result_type` argument.\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the function is applied:\n\n * 0 or 'index': apply function to each column.\n * 1 or 'columns': apply function to each row.\n\n raw : bool, default False\n Determines if row or column is passed as a Series or ndarray object:\n\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray objects\n instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n result_type : {'expand', 'reduce', 'broadcast', None}, default None\n These only act when ``axis=1`` (columns):\n\n * 'expand' : list-like results will be turned into columns.\n * 'reduce' : returns a Series if possible rather than expanding\n list-like results. This is the opposite of 'expand'.\n * 'broadcast' : results will be broadcast to the original shape\n of the DataFrame, the original index and columns will be\n retained.\n\n The default behaviour (None) depends on the return value of the\n applied function: list-like results will be returned as a Series\n of those. However if the apply function returns a Series these\n are expanded to columns.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap: For elementwise operations.\n DataFrame.aggregate: Only perform aggregating type operations.\n DataFrame.transform: Only perform transforming type operations.\n\n Examples\n --------\n >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> df.apply(np.sqrt)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n Using a reducing function on either axis\n\n >>> df.apply(np.sum, axis=0)\n A 12\n B 27\n dtype: int64\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n Passing ``result_type='expand'`` will expand list-like results\n to columns of a Dataframe\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')\n 0 1\n 0 1 2\n 1 1 2\n 2 1 2\n\n Returning a Series inside the function is similar to passing\n ``result_type='expand'``. The resulting column names\n will be the Series index.\n\n >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)\n foo bar\n 0 1 2\n 1 1 2\n 2 1 2\n\n Passing ``result_type='broadcast'`` will ensure the same shape\n result, whether list-like or scalar is returned by the function,\n and broadcast it along the axis. The resulting column names will\n be the originals.\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')\n A B\n 0 1 2\n 1 1 2\n 2 1 2\n \"\"\"\n from pandas.core.apply import frame_apply\n\n op = frame_apply(\n self,\n func=func,\n axis=axis,\n raw=raw,\n result_type=result_type,\n args=args,\n kwds=kwds,\n )\n return op.get_result()\n\n def applymap(\n self, func: PythonFuncType, na_action: Optional[str] = None\n ) -> DataFrame:\n \"\"\"\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n na_action : {None, 'ignore'}, default None\n If โ€˜ignoreโ€™, propagate NaN values, without passing them to func.\n\n .. versionadded:: 1.2\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> df.applymap(lambda x: len(str(x)))\n 0 1\n 0 3 4\n 1 5 5\n\n Like Series.map, NA values can be ignored:\n\n >>> df_copy = df.copy()\n >>> df_copy.iloc[0, 0] = pd.NA\n >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')\n 0 1\n 0 <NA> 4\n 1 5 5\n\n Note that a vectorized version of `func` often exists, which will\n be much faster. You could square each number elementwise.\n\n >>> df.applymap(lambda x: x**2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n But it's better to avoid applymap in that case.\n\n >>> df ** 2\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n \"\"\"\n if na_action not in {\"ignore\", None}:\n raise ValueError(\n f\"na_action must be 'ignore' or None. Got {repr(na_action)}\"\n )\n ignore_na = na_action == \"ignore\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if x.empty:\n return lib.map_infer(x, func, ignore_na=ignore_na)\n return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)\n\n return self.apply(infer).__finalize__(self, \"applymap\")\n\n # ----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(\n self,\n other,\n ignore_index: bool = False,\n verify_integrity: bool = False,\n sort: bool = False,\n ) -> DataFrame:\n \"\"\"\n Append rows of `other` to the end of caller, returning a new object.\n\n Columns in `other` that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, โ€ฆ, n - 1.\n verify_integrity : bool, default False\n If True, raise ValueError on creating index with duplicates.\n sort : bool, default False\n Sort columns if the columns of `self` and `other` are not aligned.\n\n .. versionchanged:: 1.0.0\n\n Changed to not sort by default.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in\n the DataFrame's index, the order of the columns in the resulting\n DataFrame will be unchanged.\n\n Iteratively appending rows to a DataFrame can be more computationally\n intensive than a single concatenate. A better solution is to append\n those rows to a list and then concatenate the list with the original\n DataFrame all at once.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n The following, while not recommended methods for generating DataFrames,\n show two ways to generate a DataFrame from multiple data sources.\n\n Less efficient:\n\n >>> df = pd.DataFrame(columns=['A'])\n >>> for i in range(5):\n ... df = df.append({'A': i}, ignore_index=True)\n >>> df\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n\n More efficient:\n\n >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n ... ignore_index=True)\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n if not ignore_index:\n raise TypeError(\"Can only append a dict if ignore_index=True\")\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError(\n \"Can only append a Series if ignore_index=True \"\n \"or if the Series has a name\"\n )\n\n index = Index([other.name], name=self.index.name)\n idx_diff = other.index.difference(self.columns)\n try:\n combined_columns = self.columns.append(idx_diff)\n except TypeError:\n combined_columns = self.columns.astype(object).append(idx_diff)\n other = (\n other.reindex(combined_columns, copy=False)\n .to_frame()\n .T.infer_objects()\n .rename_axis(index.names, copy=False)\n )\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list):\n if not other:\n pass\n elif not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.reindex(columns=self.columns)\n\n from pandas.core.reshape.concat import concat\n\n if isinstance(other, (list, tuple)):\n to_concat = [self, *other]\n else:\n to_concat = [self, other]\n return (\n concat(\n to_concat,\n ignore_index=ignore_index,\n verify_integrity=verify_integrity,\n sort=sort,\n )\n ).__finalize__(self, method=\"append\")\n\n def join(\n self,\n other: FrameOrSeriesUnion,\n on: Optional[IndexLabel] = None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n sort: bool = False,\n ) -> DataFrame:\n \"\"\"\n Join columns of another DataFrame.\n\n Join columns with `other` DataFrame either on index or on a key\n column. Efficiently join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame.\n on : str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index\n in `other`, otherwise joins index-on-index. If multiple\n values given, the `other` DataFrame must have a MultiIndex. Can\n pass an array as the join key if it is not already contained in\n the calling DataFrame. Like an Excel VLOOKUP operation.\n how : {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use calling frame's index (or column if on is specified)\n * right: use `other`'s index.\n * outer: form union of calling frame's index (or column if on is\n specified) with `other`'s index, and sort it.\n lexicographically.\n * inner: form intersection of calling frame's index (or column if\n on is specified) with `other`'s index, preserving the order\n of the calling's one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from right frame's overlapping columns.\n sort : bool, default False\n Order result DataFrame lexicographically by the join key. If False,\n the order of the join key depends on the join type (how keyword).\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the caller and `other`.\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Notes\n -----\n Parameters `on`, `lsuffix`, and `rsuffix` are not supported when\n passing a list of `DataFrame` objects.\n\n Support for specifying index levels as the `on` parameter was added\n in version 0.23.0.\n\n Examples\n --------\n >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n\n >>> df\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n 4 K4 A4\n 5 K5 A5\n\n >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']})\n\n >>> other\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> df.join(other, lsuffix='_caller', rsuffix='_other')\n key_caller A key_other B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 NaN NaN\n 4 K4 A4 NaN NaN\n 5 K5 A5 NaN NaN\n\n If we want to join using the key columns, we need to set key to be\n the index in both `df` and `other`. The joined DataFrame will have\n key as its index.\n\n >>> df.set_index('key').join(other.set_index('key'))\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 NaN\n K4 A4 NaN\n K5 A5 NaN\n\n Another option to join using the key columns is to use the `on`\n parameter. DataFrame.join always uses `other`'s index but we can use\n any column in `df`. This method preserves the original DataFrame's\n index in the result.\n\n >>> df.join(other.set_index('key'), on='key')\n key A B\n 0 K0 A0 B0\n 1 K1 A1 B1\n 2 K2 A2 B2\n 3 K3 A3 NaN\n 4 K4 A4 NaN\n 5 K5 A5 NaN\n \"\"\"\n return self._join_compat(\n other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort\n )\n\n def _join_compat(\n self,\n other: FrameOrSeriesUnion,\n on: Optional[IndexLabel] = None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n sort: bool = False,\n ):\n from pandas.core.reshape.concat import concat\n from pandas.core.reshape.merge import merge\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError(\"Other Series must have a name\")\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n if how == \"cross\":\n return merge(\n self,\n other,\n how=how,\n on=on,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n return merge(\n self,\n other,\n left_on=on,\n how=how,\n left_index=on is None,\n right_index=True,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n else:\n if on is not None:\n raise ValueError(\n \"Joining multiple DataFrames only supported for joining on index\"\n )\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n # join indexes only using concat\n if can_concat:\n if how == \"left\":\n res = concat(\n frames, axis=1, join=\"outer\", verify_integrity=True, sort=sort\n )\n return res.reindex(self.index, copy=False)\n else:\n return concat(\n frames, axis=1, join=how, verify_integrity=True, sort=sort\n )\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(\n joined, frame, how=how, left_index=True, right_index=True\n )\n\n return joined\n\n @Substitution(\"\")\n @Appender(_merge_doc, indents=2)\n def merge(\n self,\n right: FrameOrSeriesUnion,\n how: str = \"inner\",\n on: Optional[IndexLabel] = None,\n left_on: Optional[IndexLabel] = None,\n right_on: Optional[IndexLabel] = None,\n left_index: bool = False,\n right_index: bool = False,\n sort: bool = False,\n suffixes: Suffixes = (\"_x\", \"_y\"),\n copy: bool = True,\n indicator: bool = False,\n validate: Optional[str] = None,\n ) -> DataFrame:\n from pandas.core.reshape.merge import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n copy=copy,\n indicator=indicator,\n validate=validate,\n )\n\n def round(self, decimals=0, *args, **kwargs) -> DataFrame:\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n *args\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n\n Returns\n -------\n DataFrame\n A DataFrame with the affected columns rounded to the specified\n number of decimal places.\n\n See Also\n --------\n numpy.around : Round a numpy array to the given number of decimals.\n Series.round : Round a Series to the given number of decimals.\n\n Examples\n --------\n >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],\n ... columns=['dogs', 'cats'])\n >>> df\n dogs cats\n 0 0.21 0.32\n 1 0.01 0.67\n 2 0.66 0.03\n 3 0.21 0.18\n\n By providing an integer each column is rounded to the same number\n of decimal places\n\n >>> df.round(1)\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.7\n 2 0.7 0.0\n 3 0.2 0.2\n\n With a dict, the number of places for specific columns can be\n specified with the column names as key and the number of decimal\n places as value\n\n >>> df.round({'dogs': 1, 'cats': 0})\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n\n Using a Series, the number of places for specific columns can be\n specified with the column names as index and the number of\n decimal places as value\n\n >>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])\n >>> df.round(decimals)\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n def _dict_round(df, decimals):\n for col, vals in df.items():\n try:\n yield _series_round(vals, decimals[col])\n except KeyError:\n yield vals\n\n def _series_round(s, decimals):\n if is_integer_dtype(s) or is_float_dtype(s):\n return s.round(decimals)\n return s\n\n nv.validate_round(args, kwargs)\n\n if isinstance(decimals, (dict, Series)):\n if isinstance(decimals, Series):\n if not decimals.index.is_unique:\n raise ValueError(\"Index of decimals must be unique\")\n new_cols = list(_dict_round(self, decimals))\n elif is_integer(decimals):\n # Dispatch to Series.round\n new_cols = [_series_round(v, decimals) for _, v in self.items()]\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n if len(new_cols) > 0:\n return self._constructor(\n concat(new_cols, axis=1), index=self.index, columns=self.columns\n )\n else:\n return self\n\n # ----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method=\"pearson\", min_periods=1) -> DataFrame:\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float. Note that the returned matrix from corr\n will have 1 along the diagonals and will be symmetric\n regardless of the callable's behavior.\n\n .. versionadded:: 0.24.0\n\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for Pearson\n and Spearman correlation.\n\n Returns\n -------\n DataFrame\n Correlation matrix.\n\n See Also\n --------\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n Series.corr : Compute the correlation between two Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr(method=histogram_intersection)\n dogs cats\n dogs 1.0 0.3\n cats 0.3 1.0\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if method == \"pearson\":\n correl = libalgos.nancorr(mat, minp=min_periods)\n elif method == \"spearman\":\n correl = libalgos.nancorr_spearman(mat, minp=min_periods)\n elif method == \"kendall\" or callable(method):\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n if i > j:\n continue\n\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = np.nan\n elif i == j:\n c = 1.0\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n else:\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n return self._constructor(correl, index=idx, columns=cols)\n\n def cov(\n self, min_periods: Optional[int] = None, ddof: Optional[int] = 1\n ) -> DataFrame:\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.\n core.window.Expanding.cov : Expanding sample covariance.\n core.window.Rolling.cov : Rolling sample covariance.\n\n Notes\n -----\n Returns the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-ddof.\n\n For DataFrames that have Series that are missing data (assuming that\n data is `missing at random\n <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)\n the returned covariance matrix will be an unbiased estimate\n of the variance and covariance between the member Series.\n\n However, for many applications this estimate may not be acceptable\n because the estimate covariance matrix is not guaranteed to be positive\n semi-definite. This could lead to estimate correlations having\n absolute values which are greater than one, and/or a non-invertible\n covariance matrix. See `Estimation of covariance matrices\n <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_\n matrices>`__ for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> df.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if notna(mat).all():\n if min_periods is not None and min_periods > len(mat):\n base_cov = np.empty((mat.shape[1], mat.shape[1]))\n base_cov.fill(np.nan)\n else:\n base_cov = np.cov(mat.T, ddof=ddof)\n base_cov = base_cov.reshape((len(cols), len(cols)))\n else:\n base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)\n\n return self._constructor(base_cov, index=idx, columns=cols)\n\n def corrwith(self, other, axis: Axis = 0, drop=False, method=\"pearson\") -> Series:\n \"\"\"\n Compute pairwise correlation.\n\n Pairwise correlation is computed between rows or columns of\n DataFrame with rows or columns of Series or DataFrame. DataFrames\n are first aligned along both axes before computing the\n correlations.\n\n Parameters\n ----------\n other : DataFrame, Series\n Object with which to compute correlations.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for\n row-wise.\n drop : bool, default False\n Drop missing indices from result.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series\n Pairwise correlations.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation of columns.\n \"\"\"\n axis = self._get_axis_number(axis)\n this = self._get_numeric_data()\n\n if isinstance(other, Series):\n return this.apply(lambda x: other.corr(x, method=method), axis=axis)\n\n other = other._get_numeric_data()\n left, right = this.align(other, join=\"inner\", copy=False)\n\n if axis == 1:\n left = left.T\n right = right.T\n\n if method == \"pearson\":\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n elif method in [\"kendall\", \"spearman\"] or callable(method):\n\n def c(x):\n return nanops.nancorr(x[0], x[1], method=method)\n\n correl = self._constructor_sliced(\n map(c, zip(left.values.T, right.values.T)), index=left.columns\n )\n\n else:\n raise ValueError(\n f\"Invalid method {method} was passed, \"\n \"valid methods are: 'pearson', 'kendall', \"\n \"'spearman', or callable\"\n )\n\n if not drop:\n # Find non-matching labels along the given axis\n # and append missing correlations (GH 22375)\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n idx_diff = result_index.difference(correl.index)\n\n if len(idx_diff) > 0:\n correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))\n\n return correl\n\n # ----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(\n self, axis: Axis = 0, level: Optional[Level] = None, numeric_only: bool = False\n ):\n \"\"\"\n Count non-NA cells for each column or row.\n\n The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending\n on `pandas.options.mode.use_inf_as_na`) are considered NA.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index' counts are generated for each column.\n If 1 or 'columns' counts are generated for each row.\n level : int or str, optional\n If the axis is a `MultiIndex` (hierarchical), count along a\n particular `level`, collapsing into a `DataFrame`.\n A `str` specifies the level name.\n numeric_only : bool, default False\n Include only `float`, `int` or `boolean` data.\n\n Returns\n -------\n Series or DataFrame\n For each column/row the number of non-NA/null entries.\n If `level` is specified returns a `DataFrame`.\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.value_counts: Count unique combinations of columns.\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = pd.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]})\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n Counts for each **row**:\n\n >>> df.count(axis='columns')\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n Counts for one level of a `MultiIndex`:\n\n >>> df.set_index([\"Person\", \"Single\"]).count(level=\"Person\")\n Age\n Person\n John 2\n Lewis 1\n Myla 1\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis, numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type or frame._mgr.any_extension_types:\n # the or any_extension_types is really only hit for single-\n # column frames with an extension array\n result = notna(frame).sum(axis=axis)\n else:\n # GH13407\n series_counts = notna(frame).sum(axis=axis)\n counts = series_counts.values\n result = self._constructor_sliced(\n counts, index=frame._get_agg_axis(axis)\n )\n\n return result.astype(\"int64\")\n\n def _count_level(self, level: Level, axis: Axis = 0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\n f\"Can only count levels on hierarchical {self._get_axis_name(axis)}.\"\n )\n\n # Mask NaNs: Mask rows or columns where the index level is NaN, and all\n # values in the DataFrame that are NaN\n if frame._is_mixed_type:\n # Since we have mixed types, calling notna(frame.values) might\n # upcast everything to object\n values_mask = notna(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n values_mask = notna(frame.values)\n\n index_mask = notna(count_axis.get_level_values(level=level))\n if axis == 1:\n mask = index_mask & values_mask\n else:\n mask = index_mask.reshape(-1, 1) & values_mask\n\n if isinstance(level, str):\n level = count_axis._get_level_number(level)\n\n level_name = count_axis._names[level]\n level_index = count_axis.levels[level]._shallow_copy(name=level_name)\n level_codes = ensure_int64(count_axis.codes[level])\n counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)\n\n if axis == 1:\n result = self._constructor(counts, index=agg_axis, columns=level_index)\n else:\n result = self._constructor(counts, index=level_index, columns=agg_axis)\n\n return result\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis: Axis = 0,\n skipna: bool = True,\n numeric_only: Optional[bool] = None,\n filter_type=None,\n **kwds,\n ):\n\n assert filter_type is None or filter_type == \"bool\", filter_type\n out_dtype = \"bool\" if filter_type == \"bool\" else None\n\n own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]\n\n dtype_is_dt = np.array(\n [is_datetime64_any_dtype(dtype) for dtype in own_dtypes],\n dtype=bool,\n )\n if numeric_only is None and name in [\"mean\", \"median\"] and dtype_is_dt.any():\n warnings.warn(\n \"DataFrame.mean and DataFrame.median with numeric_only=None \"\n \"will include datetime64 and datetime64tz columns in a \"\n \"future version.\",\n FutureWarning,\n stacklevel=5,\n )\n cols = self.columns[~dtype_is_dt]\n self = self[cols]\n\n # TODO: Make other agg func handle axis=None properly GH#21597\n axis = self._get_axis_number(axis)\n labels = self._get_agg_axis(axis)\n assert axis in [0, 1]\n\n def func(values: np.ndarray):\n # We only use this in the case that operates on self.values\n return op(values, axis=axis, skipna=skipna, **kwds)\n\n def blk_func(values):\n if isinstance(values, ExtensionArray):\n return values._reduce(name, skipna=skipna, **kwds)\n else:\n return op(values, axis=1, skipna=skipna, **kwds)\n\n def _get_data() -> DataFrame:\n if filter_type is None:\n data = self._get_numeric_data()\n else:\n # GH#25101, GH#24434\n assert filter_type == \"bool\"\n data = self._get_bool_data()\n return data\n\n if numeric_only is not None or axis == 0:\n # For numeric_only non-None and axis non-None, we know\n # which blocks to use and no try/except is needed.\n # For numeric_only=None only the case with axis==0 and no object\n # dtypes are unambiguous can be handled with BlockManager.reduce\n # Case with EAs see GH#35881\n df = self\n if numeric_only is True:\n df = _get_data()\n if axis == 1:\n df = df.T\n axis = 0\n\n ignore_failures = numeric_only is None\n\n # After possibly _get_data and transposing, we are now in the\n # simple case where we can use BlockManager.reduce\n res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)\n out = df._constructor(res).iloc[0]\n if out_dtype is not None:\n out = out.astype(out_dtype)\n if axis == 0 and len(self) == 0 and name in [\"sum\", \"prod\"]:\n # Even if we are object dtype, follow numpy and return\n # float64, see test_apply_funcs_over_empty\n out = out.astype(np.float64)\n return out\n\n assert numeric_only is None\n\n data = self\n values = data.values\n\n try:\n result = func(values)\n\n except TypeError:\n # e.g. in nanops trying to convert strs to float\n\n data = _get_data()\n labels = data._get_agg_axis(axis)\n\n values = data.values\n with np.errstate(all=\"ignore\"):\n result = func(values)\n\n if filter_type == \"bool\" and notna(result).all():\n result = result.astype(np.bool_)\n elif filter_type is None and is_object_dtype(result.dtype):\n try:\n result = result.astype(np.float64)\n except (ValueError, TypeError):\n # try to coerce to the original dtypes item by item if we can\n pass\n\n result = self._constructor_sliced(result, index=labels)\n return result\n\n def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:\n \"\"\"\n Count distinct observations over requested axis.\n\n Return Series with number of distinct observations. Can ignore NaN\n values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for\n column-wise.\n dropna : bool, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.nunique: Method nunique for Series.\n DataFrame.count: Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(axis=1)\n 0 1\n 1 2\n 2 2\n dtype: int64\n \"\"\"\n return self.apply(Series.nunique, axis=axis, dropna=dropna)\n\n def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of minima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmin : Return index of the minimum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the minimum value in each column.\n\n >>> df.idxmin()\n consumption Pork\n co2_emissions Wheat Products\n dtype: object\n\n To return the index for the minimum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmin(axis=\"columns\")\n Pork consumption\n Wheat Products co2_emissions\n Beef consumption\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmin, \"argmin\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of maxima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmax : Return index of the maximum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the maximum value in each column.\n\n >>> df.idxmax()\n consumption Wheat Products\n co2_emissions Beef\n dtype: object\n\n To return the index for the maximum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmax(axis=\"columns\")\n Pork co2_emissions\n Wheat Products consumption\n Beef co2_emissions\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmax, \"argmax\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num: int) -> Index:\n \"\"\"\n Let's be explicit about this.\n \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError(f\"Axis must be 0 or 1 (got {repr(axis_num)})\")\n\n def mode(\n self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True\n ) -> DataFrame:\n \"\"\"\n Get the mode(s) of each element along the selected axis.\n\n The mode of a set of values is the value that appears most often.\n It can be multiple values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to iterate over while searching for the mode:\n\n * 0 or 'index' : get mode of each column\n * 1 or 'columns' : get mode of each row.\n\n numeric_only : bool, default False\n If True, only apply to numeric columns.\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The modes of each column or row.\n\n See Also\n --------\n Series.mode : Return the highest frequency value in a Series.\n Series.value_counts : Return the counts of values in a Series.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 2, 2),\n ... ('mammal', 4, np.nan),\n ... ('arthropod', 8, 0),\n ... ('bird', 2, np.nan)],\n ... index=('falcon', 'horse', 'spider', 'ostrich'),\n ... columns=('species', 'legs', 'wings'))\n >>> df\n species legs wings\n falcon bird 2 2.0\n horse mammal 4 NaN\n spider arthropod 8 0.0\n ostrich bird 2 NaN\n\n By default, missing values are not considered, and the mode of wings\n are both 0 and 2. Because the resulting DataFrame has two rows,\n the second row of ``species`` and ``legs`` contains ``NaN``.\n\n >>> df.mode()\n species legs wings\n 0 bird 2.0 0.0\n 1 NaN NaN 2.0\n\n Setting ``dropna=False`` ``NaN`` values are considered and they can be\n the mode (like for wings).\n\n >>> df.mode(dropna=False)\n species legs wings\n 0 bird 2 NaN\n\n Setting ``numeric_only=True``, only the mode of numeric columns is\n computed, and columns of other types are ignored.\n\n >>> df.mode(numeric_only=True)\n legs wings\n 0 2.0 0.0\n 1 NaN 2.0\n\n To compute the mode over columns and not rows, use the axis parameter:\n\n >>> df.mode(axis='columns', numeric_only=True)\n 0 1\n falcon 2.0 NaN\n horse 4.0 NaN\n spider 0.0 8.0\n ostrich 2.0 NaN\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n\n def f(s):\n return s.mode(dropna=dropna)\n\n data = data.apply(f, axis=axis)\n # Ensure index is type stable (should always use int index)\n if data.empty:\n data.index = ibase.default_index(0)\n\n return data\n\n def quantile(\n self,\n q=0.5,\n axis: Axis = 0,\n numeric_only: bool = True,\n interpolation: str = \"linear\",\n ):\n \"\"\"\n Return values at the given quantile over requested axis.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n Value between 0 <= q <= 1, the quantile(s) to compute.\n axis : {0, 1, 'index', 'columns'}, default 0\n Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be\n computed as well.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n Series or DataFrame\n\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n See Also\n --------\n core.window.Rolling.quantile: Rolling quantile.\n numpy.percentile: Numpy function to compute the percentile.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n ... columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n Name: 0.1, dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n\n Specifying `numeric_only=False` will also compute the quantile of\n datetime and timedelta data.\n\n >>> df = pd.DataFrame({'A': [1, 2],\n ... 'B': [pd.Timestamp('2010'),\n ... pd.Timestamp('2011')],\n ... 'C': [pd.Timedelta('1 days'),\n ... pd.Timedelta('2 days')]})\n >>> df.quantile(0.5, numeric_only=False)\n A 1.5\n B 2010-07-02 12:00:00\n C 1 days 12:00:00\n Name: 0.5, dtype: object\n \"\"\"\n validate_percentile(q)\n\n data = self._get_numeric_data() if numeric_only else self\n axis = self._get_axis_number(axis)\n is_transposed = axis == 1\n\n if is_transposed:\n data = data.T\n\n if len(data.columns) == 0:\n # GH#23925 _get_numeric_data may have dropped all columns\n cols = Index([], name=self.columns.name)\n if is_list_like(q):\n return self._constructor([], index=q, columns=cols)\n return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)\n\n result = data._mgr.quantile(\n qs=q, axis=1, interpolation=interpolation, transposed=is_transposed\n )\n\n if result.ndim == 2:\n result = self._constructor(result)\n else:\n result = self._constructor_sliced(result, name=q)\n\n if is_transposed:\n result = result.T\n\n return result\n\n @doc(NDFrame.asfreq, **_shared_doc_kwargs)\n def asfreq(\n self,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool = False,\n fill_value=None,\n ) -> \"DataFrame\":\n return super().asfreq(\n freq=freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.resample, **_shared_doc_kwargs)\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: Optional[int] = None,\n on=None,\n level=None,\n origin: Union[str, \"TimestampConvertibleTypes\"] = \"start_day\",\n offset: Optional[\"TimedeltaConvertibleTypes\"] = None,\n ) -> \"Resampler\":\n return super().resample(\n rule=rule,\n axis=axis,\n closed=closed,\n label=label,\n convention=convention,\n kind=kind,\n loffset=loffset,\n base=base,\n on=on,\n level=level,\n origin=origin,\n offset=offset,\n )\n\n def to_timestamp(\n self, freq=None, how: str = \"start\", axis: Axis = 0, copy: bool = True\n ) -> DataFrame:\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with DatetimeIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_timestamp(freq=freq, how=how)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex.\n\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed).\n\n Parameters\n ----------\n freq : str, default\n Frequency of the PeriodIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with PeriodIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_period(freq=freq)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def isin(self, values) -> DataFrame:\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dict\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dict, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n See Also\n --------\n DataFrame.eq: Equality test for DataFrame.\n Series.isin: Equivalent method on Series.\n Series.str.contains: Test if pattern or regex is contained within a\n string of a Series or Index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n\n When ``values`` is a Series or DataFrame the index and column must\n match. Note that 'falcon' does not match based on the number of legs\n in df2.\n\n >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},\n ... index=['spider', 'falcon'])\n >>> df.isin(other)\n num_legs num_wings\n falcon True True\n dog False False\n \"\"\"\n if isinstance(values, dict):\n from pandas.core.reshape.concat import concat\n\n values = collections.defaultdict(list, values)\n return concat(\n (\n self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)\n ),\n axis=1,\n )\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis=\"index\")\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\n \"only list-like or dict-like objects are allowed \"\n \"to be passed to DataFrame.isin(), \"\n f\"you passed a '{type(values).__name__}'\"\n )\n return self._constructor(\n algorithms.isin(self.values.ravel(), values).reshape(self.shape),\n self.index,\n self.columns,\n )\n\n # ----------------------------------------------------------------------\n # Add index and columns\n _AXIS_ORDERS = [\"index\", \"columns\"]\n _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {\n **NDFrame._AXIS_TO_AXIS_NUMBER,\n 1: 1,\n \"columns\": 1,\n }\n _AXIS_REVERSED = True\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 1\n _info_axis_name = \"columns\"\n\n index: Index = properties.AxisProperty(\n axis=1, doc=\"The index (row labels) of the DataFrame.\"\n )\n columns: Index = properties.AxisProperty(\n axis=0, doc=\"The column labels of the DataFrame.\"\n )\n\n @property\n def _AXIS_NUMBERS(self) -> Dict[str, int]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NUMBERS\n return {\"index\": 0, \"columns\": 1}\n\n @property\n def _AXIS_NAMES(self) -> Dict[int, str]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NAMES\n return {0: \"index\", 1: \"columns\"}\n\n # ----------------------------------------------------------------------\n # Add plotting methods to DataFrame\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n hist = pandas.plotting.hist_frame\n boxplot = pandas.plotting.boxplot_frame\n sparse = CachedAccessor(\"sparse\", SparseFrameAccessor)\n\n\nDataFrame._add_numeric_operations()\n\nops.add_flex_arithmetic_methods(DataFrame)\n\n\ndef _from_nested_dict(data) -> collections.defaultdict:\n new_data: collections.defaultdict = collections.defaultdict(dict)\n for index, s in data.items():\n for col, v in s.items():\n new_data[col][index] = v\n return new_data\n\n\ndef _reindex_for_setitem(value: FrameOrSeriesUnion, index: Index) -> ArrayLike:\n # reindex if necessary\n\n if value.index.equals(index) or not len(index):\n return value._values.copy()\n\n # GH#4107\n try:\n reindexed_value = value.reindex(index)._values\n except ValueError as err:\n # raised in MultiIndex.from_tuples, see test_insert_error_msmgs\n if not value.index.is_unique:\n # duplicate axis\n raise err\n\n raise TypeError(\n \"incompatible index of inserted column with frame index\"\n ) from err\n return reindexed_value\n\n\ndef _maybe_atleast_2d(value):\n # TODO(EA2D): not needed with 2D EAs\n\n if is_extension_array_dtype(value):\n return value\n\n return np.atleast_2d(np.asarray(value))\n" ]
[ [ "pandas.core.ops.add_flex_arithmetic_methods", "pandas.core.common.asarray_tuplesafe", "pandas.core.construction.extract_array", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.aggregation.aggregate", "pandas.io.formats.format.DataFrameRenderer", "pandas.core.dtypes.cast.maybe_convert_platform", "numpy.empty", "pandas.io.formats.style.Styler", "pandas.core.computation.expressions.where", "pandas.core.apply.frame_apply", "pandas.core.dtypes.common.pandas_dtype", "pandas.io.formats.console.get_console_size", "pandas.core.reshape.melt.melt", "pandas._libs.algos.nancorr", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.indexes.base.default_index", "pandas.util._decorators.rewrite_axis_style_signature", "pandas.core.aggregation.transform", "pandas.core.internals.construction.nested_data_to_arrays", "pandas.core.sorting.nargsort", "pandas.util._decorators.doc", "pandas.util._decorators.deprecate_kwarg", "pandas.core.indexing.check_bool_indexer", "pandas.core.indexes.multi.maybe_droplevels", "pandas.compat.numpy.function.validate_transpose", "pandas.core.dtypes.common.is_hashable", "numpy.cov", "numpy.iterable", "numpy.errstate", "pandas.core.internals.construction.to_arrays", "pandas.compat._optional.import_optional_dependency", "pandas.core.accessor.CachedAccessor", "pandas.io.stata.StataWriterUTF8", "pandas._config.get_option", "pandas.core.reshape.merge.merge", "pandas.io.parquet.to_parquet", "pandas.core.internals.construction.init_ndarray", "numpy.dot", "pandas.io.feather_format.to_feather", "pandas.core.indexes.api.ensure_index", "pandas.core.internals.construction.sanitize_index", "pandas.core.ops.align_method_FRAME", "pandas.core.dtypes.missing.isna", "pandas.core.algorithms.take", "pandas.core.indexes.multi.MultiIndex.from_arrays", "pandas.core.indexes.api.Index", "pandas.core.common.standardize_mapping", "pandas.core.common.is_bool_indexer", "pandas.core.internals.construction.masked_rec_array_to_mgr", "pandas.core.indexing.convert_to_index_sliceable", "numpy.array", "pandas.core.dtypes.cast.maybe_box_datetimelike", "pandas.core.internals.construction.dataclasses_to_dicts", "pandas.core.internals.construction.reorder_arrays", "pandas.core.dtypes.common.is_iterator", "pandas.io.common.get_handle", "pandas.core.reshape.reshape.unstack", "pandas.core.series.Series", "pandas.core.groupby.generic.DataFrameGroupBy", "pandas.core.algorithms.SelectNFrame", "pandas.core.ops.frame_arith_method_with_reindex", "pandas.option_context", "pandas.core.reshape.reshape.stack", "pandas.core.sorting.lexsort_indexer", "pandas.core.ops.should_reindex_frame_op", "pandas.core.dtypes.common.is_bool_dtype", "numpy.compress", "pandas.core.dtypes.cast.invalidate_string_dtypes", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas.core.reshape.reshape.stack_multiple", "numpy.where", "pandas.core.dtypes.cast.find_common_type", "pandas.core.aggregation.relabel_result", "pandas.core.dtypes.common.ensure_platform_int", "pandas.io.formats.info.DataFrameInfo", "pandas.core.internals.construction.arrays_to_mgr", "pandas.core.common.apply_if_callable", "pandas.core.dtypes.common.infer_dtype_from_object", "pandas.core.internals.construction.treat_as_nested", "pandas.core.dtypes.common.is_dict_like", "pandas.core.dtypes.cast.validate_numeric_casting", "numpy.transpose", "pandas.core.nanops.nancorr", "pandas.core.dtypes.common.is_object_dtype", "pandas._libs.lib.item_from_zerodim", "pandas.util._decorators.Substitution", "pandas._libs.algos.nancorr_spearman", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_integer", "pandas.core.ops.get_array_op", "pandas.io.formats.format.DataFrameFormatter", "pandas.core.dtypes.common.ensure_int64", "pandas.core.dtypes.common.is_list_like", "numpy.rec.fromarrays", "pandas.io.gbq.to_gbq", "pandas._libs.lib.map_infer", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "pandas.core.construction.sanitize_masked_array", "numpy.asarray", "pandas.core.dtypes.cast.maybe_infer_to_datetimelike", "pandas.core.reshape.pivot.pivot_table", "pandas.core.reshape.pivot.pivot", "pandas.core.dtypes.common.is_float", "pandas.core.nanops.get_corr_func", "pandas.core.dtypes.common.is_dataclass", "pandas.core.sorting.get_group_index", "pandas.core.internals.construction.init_dict", "pandas.io.formats.console.in_interactive_session", "numpy.full", "pandas.core.dtypes.missing.notna", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.generic.NDFrame.__init__", "pandas.core.algorithms.take_2d_multi", "pandas.util._validators.validate_bool_kwarg", "pandas.io.formats.console.in_ipython_frontend", "numpy.isfinite", "pandas.core.indexes.api.ensure_index_from_sequences", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_sequence", "pandas.util._decorators.Appender", "pandas._libs.lib.maybe_convert_objects", "pandas._libs.hashtable.duplicated_int64", "numpy.shape", "pandas.core.aggregation.reconstruct_func", "pandas.core.computation.eval.eval", "pandas.core.ops.fill_binop", "pandas._libs.properties.AxisProperty", "pandas.util._validators.validate_percentile", "pandas.compat.numpy.function.validate_round", "pandas.util._validators.validate_axis_style_args", "pandas.core.reshape.concat.concat" ] ]
oronnir/CAST
[ "c2b095a516e5ad0cdfec8b13196045549cbd3f4c" ]
[ "Featurizer/classifier.py" ]
[ "import collections\nimport torch\n\n\nclass Classifier(torch.nn.Module):\n def __init__(self, num_classes, feature_planes):\n assert isinstance(num_classes, int) or isinstance(num_classes, collections.abc.Iterable)\n super(Classifier, self).__init__()\n self.feature_planes = feature_planes\n if isinstance(num_classes, int):\n num_classes = (num_classes,)\n self.num_classes = num_classes\n self.features = None # Need to be initialized by a child class.\n self.fcs = torch.nn.ModuleList([torch.nn.Linear(feature_planes, c) for c in num_classes])\n\n self.outputs = []\n\n def forward(self, x, classifier_index=0):\n if isinstance(x, list):\n sizes = [y.size()[0] for y in x]\n x = torch.cat(x, 0)\n out = self.features(x)\n out = out.view(-1, self.feature_planes)\n outs = torch.split(out, sizes, dim=0)\n return [self.fcs[i](out) for i, out in enumerate(outs)]\n else:\n assert classifier_index < len(self.fcs)\n out = self.features(x)\n out = out.view(-1, self.feature_planes)\n return self.fcs[classifier_index](out)\n\n def _forward_hook(self, module, input, output):\n self.outputs.append(output)\n \n def get_outputs(self, x, layer_names):\n for layer_name in layer_names:\n layer = self.find_layer_by_name(layer_name)\n layer.register_forward_hook(self.forward_hook)\n \n self.outputs = []\n self.forward(x)\n return self.outputs\n\n def find_layer_by_name(self, name):\n paths = name.split('.')\n current = self\n for p in paths:\n children = current.named_children()\n current = children[p]\n return current\n\n def make_feature_extractor(self):\n for param in self.features.parameters():\n param.requires_grad = False\n\n def load_feature_extractor_state(self, state_dict):\n filtered_dict = {}\n features_state_dict = self.features.state_dict()\n for name in features_state_dict.keys():\n name = 'features.' + name\n if name in state_dict:\n filtered_dict[name] = state_dict[name]\n else:\n print(\"Warning: {} is missing\".format(name))\n \n self.load_state_dict(filtered_dict, strict=False)\n \n \nclass SimplisticClassifier(Classifier):\n def __init__(self, num_classes, feature_planes):\n super(SimplisticClassifier, self).__init__(num_classes, feature_planes)\n self.features = lambda x: x\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.split" ] ]
Kreijeck/learning
[ "eaffee08e61f2a34e01eb8f9f04519aac633f48c" ]
[ "Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/solutions/ex01_even_odd_ordering.py" ]
[ "# Beispielprogramm fรผr das Buch \"Python Challenge\"\n#\n# Copyright 2020 by Michael Inden\n\n\nimport numpy as np\n\nfrom ch02_math.solutions.ex01_basics import is_odd, is_even\nfrom ch06_arrays.intro.intro import swap\n\n\ndef order_even_before_odd(numbers):\n i = 0\n while i < len(numbers):\n value = numbers[i]\n\n if is_even(value):\n # gerade Zahl, also weiter mit nรคchster Zahl\n i += 1\n else:\n # ungerade Zahl, springe รผber alle ungeraden, bis zur ersten geraden\n j = i + 1\n while j < len(numbers) and not is_even(numbers[j]):\n j += 1\n\n if j < len(numbers):\n swap(numbers, i, j)\n else:\n break # keine weiteren Zahlen\n\n i += 1\n\n\ndef order_even_before_odd_optimized(numbers):\n next_even = 0\n next_odd = len(numbers) - 1\n\n while next_even < next_odd:\n current_value = numbers[next_even]\n if is_even(current_value):\n next_even += 1\n else:\n swap(numbers, next_even, next_odd)\n\n next_odd -= 1\n\n\ndef order_even_before_odd_optimized_v2(numbers):\n left = 0\n right = len(numbers) - 1\n\n while left < right:\n # laufe bis zur ersten ungeraden Zahl oder zum Array-Ende\n while left < len(numbers) and is_even(numbers[left]):\n left += 1\n\n # laufe bis zur ersten geraden Zahl oder zum Array-Anfang\n while right >= 0 and is_odd(numbers[right]):\n right -= 1\n\n if left < right:\n swap(numbers, left, right)\n left += 1\n right -= 1\n\n\ndef main():\n values = [1, 2, 3, 4, 5, 6, 7]\n order_even_before_odd(values)\n print(values)\n\n values2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n order_even_before_odd(values2)\n print(values2)\n\n values3 = [2, 4, 6, 1, 8]\n print(order_even_before_odd(values3))\n values4 = [2, 4, 6, 8, 1]\n print(order_even_before_odd(values4))\n print(order_even_before_odd([]))\n\n def myprint(item):\n print(item, end=' ')\n\n one_to_seven = [1, 2, 3, 4, 5, 6, 7]\n order_even_before_odd_optimized(one_to_seven)\n print(one_to_seven)\n print(order_even_before_odd_optimized([1, 2, 3, 4, 5, 6, 7, 8, 9]))\n\n print(order_even_before_odd_optimized([2, 4, 6, 1, 8]))\n print(order_even_before_odd_optimized([2, 4, 6, 8, 1]))\n print(order_even_before_odd_optimized([]))\n\n values_one_to_seven = [1, 2, 3, 4, 5, 6, 7]\n order_even_before_odd_optimized_v2(values_one_to_seven)\n print(values_one_to_seven)\n print(order_even_before_odd_optimized_v2([1, 2, 3, 4, 5, 6, 7, 8, 9]))\n\n print(order_even_before_odd_optimized_v2([2, 4, 6, 1, 8]))\n print(order_even_before_odd_optimized_v2([2, 4, 6, 8, 1]))\n print(order_even_before_odd_optimized_v2([]))\n\n print(\"-------------------------\")\n values = np.array([1, 2, 3, 4, 5, 6, 7])\n order_even_before_odd(values)\n print(values)\n\n values = np.array([1, 2, 3, 4, 5, 6, 7])\n order_even_before_odd_optimized(values)\n print(values)\n\n values = np.array([1, 2, 3, 4, 5, 6, 7])\n order_even_before_odd_optimized_v2(values)\n print(values)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array" ] ]
frank1010111/pywaterflood
[ "70c6d39e1a979548db0835296d54b7bb392870f0" ]
[ "pywaterflood/crm.py" ]
[ "\"\"\"Analyze waterfloods with capacitance-resistance models. # noqa: D401,D400\n\nClasses\n-------\nCRM : standard capacitance resistance modeling\nCrmCompensated : including pressure\n\nMethods\n-------\nq_primary : primary production\nq_CRM_perpair : production due to injection (injector-producer pairs)\nq_CRM_perproducer : production due to injection (one producer, many injectors)\nq_bhp : production from changing bottomhole pressures of producers\n\"\"\"\nfrom __future__ import annotations\n\nimport pickle\nfrom typing import Any, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom numba import njit\nfrom numpy import ndarray\nfrom scipy import optimize\n\n\n@njit\ndef q_primary(\n production: ndarray, time: ndarray, gain_producer: ndarray, tau_producer: ndarray\n) -> ndarray:\n \"\"\"Calculate primary production contribution.\n\n Uses Arps equation with b=0\n .. math::\n q_{p}(t) = q_i e^{-bt}\n\n Args\n ----------\n production : ndarray\n Production, size: Number of time steps\n time : ndarray\n Producing times to forecast, size: Number of time steps\n gain_producer : ndarray\n Arps q_i factor\n tau_producer : ndarray\n Arps time constant\n\n Returns\n ----------\n q_hat : ndarray\n Calculated production, size: Number of time steps\n \"\"\"\n time_decay = np.exp(-time / tau_producer)\n q_hat = time_decay * production[0] * gain_producer\n return q_hat\n\n\n@njit\ndef q_CRM_perpair(injection: ndarray, time: ndarray, gains: ndarray, taus: ndarray) -> ndarray:\n \"\"\"Calculate per injector-producer pair production.\n\n Runs for influences of each injector on one producer, assuming\n individual `gain`s and `tau`s for each pair\n\n Args\n ----------\n injection : ndarray\n Injected fluid, size: Number of time steps\n time : ndarray\n Producing times to forecast, size: Number of time steps\n gains : ndarray\n Connectivities between each injector and the producer,\n size: Number of injectors\n taus : ndarray\n Time constants between each injector and the producer,\n size: Number of injectors\n\n Returns\n ----------\n q_hat : ndarray\n Calculated production, size: Number of time steps\n \"\"\"\n n = len(time)\n q_hat = np.zeros(n)\n conv_injected = np.zeros((n, injection.shape[1]))\n\n # Compute convolved injection rates\n for j in range(injection.shape[1]):\n conv_injected[0, j] += (1 - np.exp((time[0] - time[1]) / taus[j])) * injection[0, j]\n for k in range(1, n):\n for m in range(1, k + 1):\n time_decay = (1 - np.exp((time[m - 1] - time[m]) / taus[j])) * np.exp(\n (time[m] - time[k]) / taus[j]\n )\n conv_injected[k, j] += time_decay * injection[m, j]\n\n # Calculate waterflood rates\n for k in range(n):\n for j in range(injection.shape[1]):\n q_hat[k] += gains[j] * conv_injected[k, j]\n return q_hat\n\n\n@njit\ndef q_CRM_perproducer(injection: ndarray, time: ndarray, gain: ndarray, tau: float) -> ndarray:\n \"\"\"Calculate per injector-producer pair production (simplified tank).\n\n Uses simplified CRMp model that assumes a single tau for each producer\n\n Args\n ----------\n injection : ndarray\n injected fluid in reservoir volumes, size: Number of time steps\n time : ndarray\n Producing times to forecast, size: Number of time steps\n gains : ndarray\n Connectivities between each injector and the producer\n size: Number of injectors\n tau : float\n Time constants all injectors and the producer\n\n Returns\n ----------\n q_hat : ndarray\n Calculated production, size: Number of time steps\n \"\"\"\n tau2 = tau * np.ones(injection.shape[1])\n return q_CRM_perpair(injection, time, gain, tau2)\n\n\n@njit\ndef _pressure_diff(pressure_local: ndarray, pressure: ndarray) -> ndarray:\n \"\"\"Pressure differences from local to each producer each timestep.\"\"\"\n n_t, n_p = pressure.shape\n pressure_diff = np.zeros((n_p, n_t))\n for j in range(n_p):\n for t in range(1, n_t):\n pressure_diff[j, t] = pressure_local[t - 1] - pressure[t, j]\n return pressure_diff\n\n\ndef q_bhp(pressure_local: ndarray, pressure: ndarray, v_matrix: ndarray) -> ndarray:\n r\"\"\"Calculate the production effect from bottom-hole pressure variation.\n\n This looks like\n .. math::\n q_{BHP,j}(t_i) = \\sum_{k} v_{kj}\\left[ p_j(t_{i-1}) - p_k(t_i) \\right]\n\n Args\n ----\n pressure_local : ndarray\n pressure for the well in question, shape: n_time\n pressure : ndarray\n bottomhole pressure, shape: n_time, n_producers\n v_matrix : ndarray\n connectivity between one producer and all producers, shape: n_producers\n\n Returns\n -------\n q : ndarray\n production from changing BHP\n shape: n_time\n \"\"\"\n pressure_diff = _pressure_diff(pressure_local, pressure)\n q = np.einsum(\"j,jt->t\", v_matrix, pressure_diff)\n return q\n\n\ndef random_weights(n_i: int, n_j: int, axis: int = 0, seed: int | None = None) -> ndarray:\n \"\"\"Generate random weights for producer-injector gains.\n\n Args\n ----\n n_i : int\n n_j : int\n axis : int, default is 0\n seed : int, default is None\n\n Returns\n -------\n gains_guess: ndarray\n \"\"\"\n rng = np.random.default_rng(seed)\n limit = 10 * (n_i if axis == 0 else n_j)\n vec = rng.integers(0, limit, (n_i, n_j))\n axis_sum = vec.sum(axis, keepdims=True)\n return vec / axis_sum\n\n\nclass CRM:\n \"\"\"A Capacitance Resistance Model history matcher.\n\n CRM uses a physics-inspired mass balance approach to explain production for \\\n waterfloods. It treats each injector-producer well pair as a system \\\n with mass input, output, and pressure related to the mass balance. \\\n Several versions exist. Select them from the arguments.\n\n Args\n ----------\n primary : bool\n Whether to model primary production (strongly recommended)\n tau_selection : str\n How many tau values to select\n - If 'per-pair', fit tau for each producer-injector pair\n - If 'per-producer', fit tau for each producer (CRMp model)\n constraints : str\n How to constrain the gains\n * If 'up-to one' (default), let gains vary from 0 (no connection) to 1 \\\n (all injection goes to producer)\n * If 'positive', require each gain to be positive \\\n (It is unlikely to go negative in real life)\n * If 'sum-to-one', require the gains for each injector to sum to one \\\n (all production accounted for)\n * If 'sum-to-one injector' (not implemented), require each injector's \\\n gains to sum to one (all injection accounted for)\n\n Examples\n ----------\n crm = CRM(True, \"per-pair\", \"up-to one\")\n\n References\n ----------\n \"A State-of-the-Art Literature Review on Capacitance Resistance Models for\n Reservoir Characterization and Performance Forecasting\" - Holanda et al., 2018.\n \"\"\"\n\n def __init__(\n self,\n primary: bool = True,\n tau_selection: str = \"per-pair\",\n constraints: str = \"positive\",\n ):\n \"\"\"Initialize CRM with appropriate settings.\"\"\"\n if type(primary) != bool:\n raise TypeError(\"primary must be a boolean\")\n self.primary = primary\n if constraints not in (\n \"positive\",\n \"up-to one\",\n \"sum-to-one\",\n \"sum-to-one injector\",\n ):\n raise ValueError(\"Invalid constraints\")\n self.constraints = constraints\n self.tau_selection = tau_selection\n if tau_selection == \"per-pair\":\n self.q_CRM = q_CRM_perpair\n elif tau_selection == \"per-producer\":\n self.q_CRM = q_CRM_perproducer\n else:\n raise ValueError(\n \"tau_selection must be one of\"\n + '(\"per-pair\",\"per-producer\")'\n + f\", not {tau_selection}\"\n )\n\n def fit(\n self,\n production: ndarray,\n injection: ndarray,\n time: ndarray,\n initial_guess: ndarray = None,\n num_cores: int = 1,\n random: bool = False,\n **kwargs,\n ):\n \"\"\"Build a CRM model from the production and injection data.\n\n Args\n ----------\n production : ndarray\n production rates for each time period,\n shape: (n_time, n_producers)\n injection : ndarray\n injection rates for each time period,\n shape: (n_time, n_injectors)\n time : ndarray\n relative time for each rate measurement, starting from 0,\n shape: (n_time)\n initial_guess : ndarray\n initial guesses for gains, taus, primary production contribution\n shape: (len(guess), n_producers)\n num_cores (int): number of cores to run fitting procedure on, defaults to 1\n random : bool\n whether to randomly initialize the gains\n **kwargs:\n keyword arguments to pass to scipy.optimize fitting routine\n\n Returns\n ----------\n self: trained model\n \"\"\"\n _validate_inputs(production, injection, time)\n self.production = production\n self.injection = injection\n self.time = time\n\n if not initial_guess:\n initial_guess = self._get_initial_guess(random=random)\n bounds, constraints = self._get_bounds()\n num_cores = kwargs.pop(\"num_cores\", 1)\n\n def fit_well(production, x0):\n # residual is an L2 norm\n def residual(x, production):\n return sum(\n (production - self._calculate_qhat(x, production, injection, time)) ** 2\n )\n\n result = optimize.minimize(\n residual,\n x0,\n bounds=bounds,\n constraints=constraints,\n args=(production,),\n **kwargs,\n )\n return result\n\n if num_cores == 1:\n results = map(fit_well, self.production.T, initial_guess)\n else:\n results = Parallel(n_jobs=num_cores)(\n delayed(fit_well)(p, x0) for p, x0 in zip(self.production.T, initial_guess)\n )\n\n opts_perwell = [self._split_opts(r[\"x\"]) for r in results]\n gains_perwell, tau_perwell, gains_producer, tau_producer = map(list, zip(*opts_perwell))\n\n self.gains = np.vstack(gains_perwell)\n self.tau = np.vstack(tau_perwell)\n self.gains_producer = np.array(gains_producer)\n self.tau_producer = np.array(tau_producer)\n return self\n\n def predict(self, injection=None, time=None, connections=None):\n \"\"\"Predict production for a trained model.\n\n If the injection and time are not provided, this will use the training values\n\n Args\n ----------\n injection : ndarray\n The injection rates to input to the system, shape (n_time, n_inj)\n time : ndarray\n The timesteps to predict\n connections : dict\n if present, the gains, tau, gains_producer, tau_producer\n matrices\n\n Returns\n ----------\n q_hat :ndarray\n The predicted values, shape (n_time, n_producers)\n \"\"\"\n if connections is not None:\n gains = connections.get(\"gains\", self.gains)\n tau = connections.get(\"tau\", self.tau)\n gains_producer = connections.get(\"gains_producer\", self.gains_producer)\n tau_producer = connections.get(\"tau_producer\", self.tau_producer)\n else:\n gains = self.gains\n tau = self.tau\n gains_producer = self.gains_producer\n tau_producer = self.tau_producer\n production = self.production\n n_producers = production.shape[1]\n\n if int(injection is None) + int(time is None) == 1:\n raise TypeError(\"predict() takes 1 or 3 arguments, 2 given\")\n if injection is None:\n injection = self.injection\n if time is None:\n time = self.time\n if time.shape[0] != injection.shape[0]:\n raise ValueError(\"injection and time need same number of steps\")\n\n q_hat = np.zeros((len(time), n_producers))\n for i in range(n_producers):\n q_hat[:, i] += q_primary(production[:, i], time, gains_producer[i], tau_producer[i])\n q_hat[:, i] += self.q_CRM(injection, time, gains[i, :], tau[i])\n return q_hat\n\n def set_rates(self, production=None, injection=None, time=None):\n \"\"\"Set production and injection rates and time array.\n\n Args\n -----\n production : ndarray\n production rates with shape (n_time, n_producers)\n injection : ndarray\n injection rates with shape (n_time, n_injectors)\n time : ndarray\n timesteps with shape n_time\n \"\"\"\n _validate_inputs(production, injection, time)\n if production is not None:\n self.production = production\n if injection is not None:\n self.injection = injection\n if time is not None:\n self.time = time\n\n def set_connections(self, gains=None, tau=None, gains_producer=None, tau_producer=None):\n \"\"\"Set waterflood properties.\n\n Args\n -----\n gains : ndarray\n connectivity between injector and producer\n shape: n_gains, n_producers\n tau : ndarray\n time-constant for injection to be felt by production\n shape: either n_producers or (n_gains, n_producers)\n gains_producer : ndarray\n gain on primary production, shape: n_producers\n tau_producer : ndarray\n Arps time constant for primary production, shape: n_producers\n \"\"\"\n if gains is not None:\n self.gains = gains\n if tau is not None:\n self.tau = tau\n if gains_producer is not None:\n self.gains_producer = gains_producer\n if tau_producer is not None:\n self.tau_producer = tau_producer\n\n def residual(self, production=None, injection=None, time=None):\n \"\"\"Calculate the production minus the predicted production for a trained model.\n\n If the production, injection, and time are not provided, this will use the\n training values\n\n Args\n ----------\n production : ndarray\n The production rates observed, shape: (n_timesteps, n_producers)\n injection : ndarray\n The injection rates to input to the system,\n shape: (n_timesteps, n_injectors)\n time : ndarray\n The timesteps to predict\n\n Returns\n ----------\n residual : ndarray\n The true production data minus the predictions, shape (n_time, n_producers)\n \"\"\"\n q_hat = self.predict(injection, time)\n if production is None:\n production = self.production\n return production - q_hat\n\n def to_excel(self, fname: str):\n \"\"\"Write trained model to an Excel file.\n\n Args\n ----\n fname : str\n Excel file to write out\n\n \"\"\"\n for x in (\"gains\", \"tau\", \"gains_producer\", \"tau_producer\"):\n if x not in self.__dict__.keys():\n raise (ValueError(\"Model has not been trained\"))\n with pd.ExcelWriter(fname) as f:\n pd.DataFrame(self.gains).to_excel(f, sheet_name=\"Gains\")\n pd.DataFrame(self.tau).to_excel(f, sheet_name=\"Taus\")\n pd.DataFrame(\n {\n \"Producer gains\": self.gains_producer,\n \"Producer taus\": self.tau_producer,\n }\n ).to_excel(f, sheet_name=\"Primary production\")\n\n def to_pickle(self, fname: str):\n \"\"\"Write trained model to a pickle file.\n\n Args\n -----\n fname : str\n pickle file to write out\n \"\"\"\n with open(fname, \"wb\") as f:\n pickle.dump(self, f)\n\n def _get_initial_guess(self, tau_selection: str | None = None, random=False):\n \"\"\"Create initial guesses for the CRM model parameters.\n\n :meta private:\n\n Args\n ----------\n tau_selection : str, one of 'per-pair' or 'per-producer'\n sets whether to use CRM (per-pair) or CRMp model\n random : bool\n whether initial gains are randomly (true) or proportionally assigned\n Returns\n ----------\n x0 : ndarray\n Initial primary production gain, time constant and waterflood gains\n and time constants, as one long 1-d array\n \"\"\"\n if tau_selection is not None:\n self.tau_selection = tau_selection\n\n n_inj = self.injection.shape[1]\n n_prod = self.production.shape[1]\n d_t = self.time[1] - self.time[0]\n n_gains, n_tau, n_primary = self._opt_numbers()[:3]\n\n axis = 1 if (self.constraints == \"sum-to-one injector\") else 0\n if random:\n rng = np.random.default_rng()\n gains_producer_guess1 = rng.random(n_prod)\n gains_guess1 = random_weights(n_prod, n_inj, axis)\n else:\n gains_unnormed = np.ones((n_prod, n_inj))\n gains_guess1 = gains_unnormed / np.sum(gains_unnormed, axis, keepdims=True)\n gains_producer_guess1 = np.ones(n_prod)\n tau_producer_guess1 = d_t * np.ones(n_prod)\n if self.tau_selection == \"per-pair\":\n tau_guess1 = d_t * np.ones((n_prod, n_inj))\n else: # 'per-producer'\n tau_guess1 = d_t * np.ones((n_prod, 1))\n\n if self.primary:\n x0 = [\n np.concatenate(\n [\n gains_guess1[i, :],\n tau_guess1[i, :],\n gains_producer_guess1[[i]],\n tau_producer_guess1[[i]],\n ]\n )\n for i in range(n_prod)\n ]\n else:\n x0 = [np.concatenate([gains_guess1[i, :], tau_guess1[i, :]]) for i in range(n_prod)]\n return x0\n\n def _opt_numbers(self) -> tuple[int, int, int]:\n \"\"\"Return the number of gains, taus, and primary production parameters to fit.\"\"\"\n n_gains = self.injection.shape[1]\n if self.tau_selection == \"per-pair\":\n n_tau = n_gains\n else:\n n_tau = 1\n if self.primary:\n n_primary = 2\n else:\n n_primary = 0\n return n_gains, n_tau, n_primary\n\n def _get_bounds(self, constraints: str = \"\") -> tuple[tuple, tuple | dict]:\n \"\"\"Create bounds for the model from initialized constraints.\"\"\"\n if constraints:\n self.constraints = constraints\n\n n_inj = self.injection.shape[1]\n n = sum(self._opt_numbers())\n\n if self.constraints == \"positive\":\n bounds = ((0, np.inf),) * n\n constraints_optimizer = () # type: Union[Tuple, dict]\n elif self.constraints == \"sum-to-one\":\n bounds = ((0, np.inf),) * n\n\n def constrain(x):\n x = x[:n_inj]\n return np.sum(x) - 1\n\n constraints_optimizer = {\"type\": \"eq\", \"fun\": constrain}\n elif self.constraints == \"sum-to-one injector\":\n raise NotImplementedError(\"sum-to-one injector is not implemented\")\n elif self.constraints == \"up-to one\":\n lb = np.full(n, 0)\n ub = np.full(n, np.inf)\n ub[:n_inj] = 1\n bounds = tuple(zip(lb, ub))\n constraints_optimizer = ()\n else:\n bounds = ((0, np.inf),) * n\n constraints_optimizer = ()\n return bounds, constraints_optimizer\n\n def _calculate_qhat(\n self,\n x: np.ndarray,\n production: np.ndarray,\n injection: np.ndarray,\n time: np.ndarray,\n ):\n gains, tau, gain_producer, tau_producer = self._split_opts(x)\n if self.primary:\n q_hat = q_primary(production, time, gain_producer, tau_producer)\n else:\n q_hat = np.zeros(len(time))\n\n q_hat += self.q_CRM(injection, time, gains, tau)\n return q_hat\n\n def _split_opts(self, x: np.ndarray):\n n_inj = self.injection.shape[1]\n # n_prod = self.production.shape[1]\n n_gains, n_tau, n_primary = self._opt_numbers()\n\n gains = x[:n_inj]\n if self.tau_selection == \"per-pair\":\n tau = x[n_inj : n_inj * 2]\n else:\n tau = x[n_inj]\n if self.primary:\n gain_producer = x[-2]\n tau_producer = x[-1]\n else:\n gain_producer = 0\n tau_producer = 1\n if self.tau_selection == \"per-pair\":\n tau[tau < 1e-10] = 1e-10\n elif tau < 1e-10:\n tau = 1e-10\n if tau_producer < 1e-10:\n tau_producer = 1e-10\n return gains, tau, gain_producer, tau_producer\n\n\nclass CrmCompensated(CRM):\n \"\"\"Bottom-hole pressure compensated CRM.\"\"\"\n\n def fit(\n self,\n production: ndarray,\n pressure: ndarray,\n injection: ndarray,\n time: ndarray,\n initial_guess: ndarray = None,\n num_cores: int = 1,\n random: bool = False,\n **kwargs,\n ):\n \"\"\"Fit a CRM model from the production, pressure, and injection data.\n\n Args\n ----------\n production : ndarray\n production rates for each time period,\n shape: (n_time, n_producers)\n pressure : ndarray\n average pressure for each producer for each time period,\n shape: (n_time, n_producers)\n injection : ndarray\n injection rates for each time period,\n shape: (n_time, n_injectors)\n time : ndarray\n relative time for each rate measurement, starting from 0,\n shape: (n_time)\n initial_guess : ndarray\n initial guesses for gains, taus, primary production\n contribution\n shape: (len(guess), n_producers)\n num_cores (int): number of cores to run fitting procedure on, defaults to 1\n random : bool\n whether to randomly initialize the gains\n **kwargs:\n keyword arguments to pass to scipy.optimize fitting routine\n\n Returns\n ----------\n self: trained model\n \"\"\"\n _validate_inputs(production, injection, time, pressure)\n self.production = production\n self.injection = injection\n self.time = time\n self.pressure = pressure\n\n if not initial_guess:\n initial_guess = self._get_initial_guess(random=random)\n bounds, constraints = self._get_bounds()\n\n def fit_well(production, pressure_local, x0):\n # residual is an L2 norm\n def residual(x, production):\n return sum(\n (\n production\n - self._calculate_qhat(\n x, production, injection, time, pressure_local, pressure\n )\n )\n ** 2\n )\n\n result = optimize.minimize(\n residual,\n x0,\n bounds=bounds,\n constraints=constraints,\n args=(production,),\n **kwargs,\n )\n return result\n\n if num_cores == 1:\n results = map(fit_well, self.production.T, pressure.T, initial_guess)\n else:\n results = Parallel(n_jobs=num_cores)(\n delayed(fit_well)(prod, pressure, x0)\n for prod, pressure, x0 in zip(self.production.T, pressure.T, initial_guess)\n )\n\n opts_perwell = [self._split_opts(r[\"x\"]) for r in results]\n gains_perwell, tau_perwell, gains_producer, tau_producer, gain_pressure = map(\n list, zip(*opts_perwell)\n )\n\n self.gains = np.vstack(gains_perwell)\n self.tau = np.vstack(tau_perwell)\n self.gains_producer = np.array(gains_producer)\n self.tau_producer = np.array(tau_producer)\n self.gain_pressure = np.vstack(gain_pressure)\n return self\n\n def _calculate_qhat( # TODO: start here\n self,\n x: np.ndarray,\n production: np.ndarray,\n injection: np.ndarray,\n time: np.ndarray,\n pressure_local: np.ndarray,\n pressure: np.ndarray,\n ):\n gains, tau, gain_producer, tau_producer, gain_pressure = self._split_opts(x)\n if self.primary:\n q_hat = q_primary(production, time, gain_producer, tau_producer)\n else:\n q_hat = np.zeros(len(time))\n\n q_hat += self.q_CRM(injection, time, gains, tau)\n q_hat += q_bhp(pressure_local, pressure, gain_pressure)\n return q_hat\n\n def _opt_numbers(self) -> tuple[int, int, int, int]:\n n_gain, n_tau, n_primary = super()._opt_numbers()\n return n_gain, n_tau, n_primary, self.production.shape[1]\n\n def _split_opts(self, x: np.ndarray) -> tuple[ndarray, ndarray, Any, Any, ndarray]:\n n_gains, n_tau, n_primary = self._opt_numbers()[:3]\n n_connectivity = n_gains + n_tau\n\n gains = x[:n_gains]\n tau = x[n_gains:n_connectivity]\n if self.primary:\n gain_producer = x[n_connectivity:][0]\n tau_producer = x[n_connectivity:][1]\n else:\n gain_producer = 0\n tau_producer = 1\n gain_pressure = x[n_connectivity + n_primary :]\n\n # boundary setting\n if self.tau_selection == \"per-pair\":\n tau[tau < 1e-10] = 1e-10\n elif tau < 1e-10:\n tau = 1e-10\n if tau_producer < 1e-10:\n tau_producer = 1e-10\n return gains, tau, gain_producer, tau_producer, gain_pressure\n\n def _get_initial_guess(self, tau_selection: str | None = None, random=False):\n \"\"\"Make the initial guesses for the CRM model parameters.\n\n :meta private:\n\n Args\n ----------\n tau_selection : str, one of 'per-pair' or 'per-producer'\n sets whether to use CRM (per-pair) or CRMp model\n\n Returns\n ----------\n x0 : ndarray\n Initial primary production gain, time constant and waterflood gains\n and time constants, as one long 1-d array\n \"\"\"\n guess = super()._get_initial_guess(tau_selection=tau_selection, random=random)\n _, _, _, n_pressure = self._opt_numbers()\n pressure_guess = np.ones(n_pressure)\n guess = [np.concatenate([guess[i], pressure_guess]) for i in range(len(guess))]\n return guess\n\n\ndef _validate_inputs(\n production: ndarray | None = None,\n injection: ndarray | None = None,\n time: ndarray | None = None,\n pressure: ndarray | None = None,\n) -> None:\n \"\"\"Validate shapes and values of inputs.\n\n Args\n ----\n production : ndarray, optional\n injection : ndarray, optional\n time : ndarray, optional\n pressure : ndarray, optional\n\n Raises\n ------\n ValueError if timesteps don't match or production and pressure don't match\n \"\"\"\n inputs = {\n \"production\": production,\n \"injection\": injection,\n \"time\": time,\n \"pressure\": pressure,\n }\n inputs = {key: val for key, val in inputs.items() if val is not None}\n # Shapes\n test_prod_inj_timesteps = production is not None and injection is not None\n if test_prod_inj_timesteps and (production.shape[0] != injection.shape[0]):\n raise ValueError(\"production and injection do not have the same number of timesteps\")\n if time is not None:\n for timeseries in inputs:\n if inputs[timeseries].shape[0] != time.shape[0]:\n raise ValueError(f\"{timeseries} and time do not have the same number of timesteps\")\n if production is not None:\n if (injection is not None) and (production.shape[0] != injection.shape[0]):\n raise ValueError(\"production and injection do not have the same number of timesteps\")\n if (pressure is not None) and (production.shape != pressure.shape):\n raise ValueError(\"production and pressure are not of the same shape\")\n if (\n (injection is not None)\n and (pressure is not None)\n and (injection.shape[0] != pressure.shape[0])\n ):\n raise ValueError(\"injection and pressure do not have the same number of timesteps\")\n # Values\n for timeseries in inputs:\n if np.any(np.isnan(inputs[timeseries])):\n raise ValueError(f\"{timeseries} cannot have NaNs\")\n if np.any(inputs[timeseries] < 0.0):\n raise ValueError(f\"{timeseries} cannot be negative\")\n" ]
[ [ "numpy.concatenate", "numpy.full", "numpy.array", "numpy.isnan", "numpy.zeros", "numpy.sum", "pandas.DataFrame", "numpy.ones", "numpy.random.default_rng", "numpy.exp", "numpy.any", "numpy.einsum", "pandas.ExcelWriter", "scipy.optimize.minimize", "numpy.vstack" ] ]
bbalegere/MILP-Interview-Scheduler
[ "8f8e584c8022e45a363aa001d5eda08cf4a74d89" ]
[ "InterviewScheduler.py" ]
[ "\"\"\"\n Author: Bharat Balegere\n Date created: 10-Oct-2017\n Date last modified: 23-Jan-2018\n Python Version: 3.6\n\"\"\"\nimport argparse\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nfrom gurobipy import *\n\n\ndef read_input_csv(filename, typ=None):\n sldf = pd.read_csv(filename, header=0, dtype=typ)\n sldf.columns = sldf.columns.str.strip()\n sldf[sldf.columns[0]] = sldf[sldf.columns[0]].astype(str).str.strip()\n sldf.set_index(sldf.columns[0], inplace=True)\n return sldf.to_dict('index'), sorted(sldf.columns.values), list(sldf.index.values)\n\n\ndef read_slots_interviews(filename):\n sidf = pd.read_csv(filename, dtype=object)\n sidf.columns = sidf.columns.str.strip()\n sidict = sidf.to_dict('list')\n return dict((key, int(v[0])) for key, v in sidict.items())\n\n\ndef read_shortlists(filename):\n sldf = pd.read_csv(filename, dtype=object)\n sldf.columns = sldf.columns.str.strip()\n comps = list(sldf.columns.values)\n comtupl = [(c, str(n).strip()) for c in comps for n in list(sldf[c].dropna().values)]\n return dict((x, 1) for x in comtupl), sorted(comps), sorted(set([x[1] for x in comtupl]))\n\n\ndef read_lp(filename):\n exnames = []\n with open(filename) as f:\n for csvline in f:\n exnames = exnames + [str(x).strip() for x in csvline.strip().split(',') if len(str(x).strip()) > 0]\n\n return sorted(set(exnames))\n\n\ndef generateSchedule(companies, fixedints, allnames, panels, prefs, shortlists, slots, slots_int, out):\n print(datetime.datetime.now().time())\n # Find out max number of panels\n maxpanels = dict((c, max(panels[s][c] for s in slots)) for c in companies)\n # Generate cost of slots\n costs = dict((slots[s], s + 1) for s in range(len(slots)))\n # Calculate number shortlists for each students\n crit = dict((n, sum(shortlists.get((c, n), 0) for c in companies)) for n in allnames)\n # Remove names who dont have any shortlists\n names = [key for key, value in crit.items() if value > 0]\n # Calculate number shortlists per company\n compshortlists = dict((c, sum(shortlists.get((c, n), 0) for n in names)) for c in companies)\n # Calculate total number of panels per company\n comppanels = dict((c, int(sum(panels[s][c] for s in slots) / slots_int.get(c, 1))) for c in companies)\n\n for c in companies:\n if compshortlists[c] > comppanels[c]:\n print(c + \" has shortlists greater than no of panels \" + str(compshortlists[c]) + \" > \" + str(comppanels[c]))\n\n fibonacii = [2, 3]\n for i in range(2, 1 + int(max(crit.values()))):\n fibonacii.append(fibonacii[i - 1] + fibonacii[i - 2])\n\n # Create Objective Coefficients\n prefsnew = dict()\n objcoeff = dict()\n\n if len(prefs):\n for n in names:\n actpref = dict((c, prefs[n][c] * shortlists.get((c, n), 0)) for c in companies if shortlists.get((c, n), 0) > 0)\n scaledpref = {key: rank for rank, key in enumerate(sorted(actpref, key=actpref.get), 1)}\n\n for c, rank in scaledpref.items():\n prefsnew[n, c] = rank\n for s in slots:\n if compshortlists[c] > comppanels[c]:\n objcoeff[s, c, n] = (rank / (crit[n] + 1)) * (len(slots) + 1 - costs[s])\n else:\n objcoeff[s, c, n] = (1 - rank / (crit[n] + 1)) * costs[s]\n\n print('Creating IPLP')\n model = Model('interviews')\n compnames = tuplelist([(c, n) for c, n in shortlists.keys() if n in names and c in companies])\n choices = model.addVars(slots, compnames, vtype=GRB.BINARY, name='G')\n # Objective - allocate max students to the initial few slots\n model.setObjective(quicksum(choices[s, c, n] * objcoeff.get((s, c, n), costs[s]) for s in slots for c, n in compnames), GRB.MINIMIZE)\n # Constraint - maximum number in a slot for a club is limited by panels\n model.addConstrs((choices.sum(s, c, '*') <= panels[s][c] for s in slots for c in companies))\n # Constraint - allocate student only if he has a shortlist\n model.addConstrs((choices.sum('*', c, n) <= shortlists.get((c, n), 0) * slots_int.get(c, 1) for n in names for c in companies))\n # Constraint - slots should not conflict for a student\n model.addConstrs((choices.sum(s, '*', n) <= 1 for s in slots for n in names))\n # Constraint - allocate all students or number of interviews possible\n model.addConstrs((choices.sum('*', c, '*') == min(compshortlists[c], comppanels[c]) * slots_int.get(c, 1) for c in companies))\n # Constraint - for multiple slots per interview, same candidate should be allocated\n for c, si in slots_int.items():\n\n start_slot = 0\n while panels[slots[start_slot]][c] == 0:\n start_slot += 1\n\n if si > 1:\n for i in range(si - 1 + start_slot, len(slots), si):\n for x, n in compnames.select(c, '*'):\n for j in range(i - si + 1, i):\n model.addConstr((choices[slots[i], c, n] - choices[slots[j], c, n]), GRB.EQUAL, 0)\n\n # Constraint - Fix manually given schedule\n flist = [(s, c, n) for s, vals in fixedints.items() for c, n in vals.items() if (c, n) in compnames]\n model.addConstrs((choices[s, c, n] == 1 for s, c, n in flist))\n\n print('Optimising')\n model.optimize()\n solution = model.getAttr('X', choices)\n\n sche = [['Slot'] + [c + str(j + 1) for c in companies for j in range(int(maxpanels[c]))]]\n\n for s in slots:\n temp = [s]\n for c in companies:\n row = [''] * int(maxpanels[c])\n i = 0\n for n in [name for com, name in compnames if com == c]:\n if solution.get((s, c, n), 0):\n row[i] = n\n i = i + 1\n temp = temp + row\n sche.append(temp)\n\n schedf = pd.DataFrame(sche)\n schedf.to_csv(out + '\\\\sche.csv', index=False, header=False)\n\n namesdf = pd.DataFrame.from_dict(dict((s, {n: c for c in companies for n in names if solution.get((s, c, n), 0)}) for s in slots), orient='index')\n namesdf.sort_index(axis=1).to_csv(out + '\\\\names.csv')\n\n print(model.status)\n print(datetime.datetime.now().time())\n\n if prefsnew:\n unordn = set()\n for n in names:\n init = 1\n for s in slots:\n stop = False\n for c in companies:\n if solution.get((s, c, n), 0) == 1:\n if prefsnew[n, c] < init:\n unordn.add(n)\n stop = True\n break\n else:\n init = prefsnew[n, c]\n\n if stop:\n break\n print('The following candidates preference order has been violated')\n print(unordn)\n print(len(unordn))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('shortlists', help='Shortlists File per company as CSV', metavar='Shortlists.csv')\n parser.add_argument('slotspanels', help='Slots and Panels per company as CSV', metavar='SlotsPanels.csv')\n parser.add_argument('-s', '--slotsint', help='Number of Slots required per Interview for each company', metavar='SlotsInterview.csv')\n parser.add_argument('-p', '--prefs', help='CSV with a matrix containing names and companies', metavar='prefs.csv')\n parser.add_argument('-l', '--leftprocess', help='CSV with a list of candidates who have left the process', metavar='lp.csv')\n parser.add_argument('-f', '--fixed', help='CSV of the schedule with pre fixed candidates. Should satisfy constraints', metavar='fixed.csv')\n parser.add_argument('-o', '--output', help='Output directory', default='out')\n\n args = parser.parse_args()\n shortlists, shcompanies, names = read_shortlists(args.shortlists)\n\n panels, companies, slots = read_input_csv(args.slotspanels)\n print('Number of Companies')\n print(len(companies))\n print('Number of Candidates')\n print(len(names))\n print('Number of Slots')\n print(len(slots))\n print(set(companies) ^ set(shcompanies))\n if not set(companies).issubset(set(shcompanies)):\n raise ValueError('Shortlists are not present for all companies')\n\n if len([x for vals in panels.values() for x in vals.values() if not np.issubdtype(x, int) or x < 0]):\n raise ValueError('The number of panels must be a positive integer ')\n\n slots_int = dict()\n if args.slotsint:\n slots_int = read_slots_interviews(args.slotsint)\n assert (sorted(slots_int.keys()) == sorted(companies))\n\n lp = list()\n if args.leftprocess:\n lp = read_lp(args.leftprocess)\n names = [n for n in names if n not in lp]\n\n prefs = dict()\n\n if args.prefs:\n prefs, comps3, names2 = read_input_csv(args.prefs)\n for vals in prefs.values():\n for val in vals.values():\n if val not in range(1, len(shcompanies) + 1):\n raise ValueError('Incorrect preference ' + str(val) + '. It should be between 1 and ' + str(len(shcompanies)))\n assert (set(companies).issubset(set(comps3)))\n assert (shcompanies == comps3)\n\n missing = set(names) - set(names2)\n if len(missing):\n print('Preferences are missing for below names')\n print(missing)\n raise ValueError('Some names are mssing')\n\n fixedints = dict()\n if args.fixed:\n fixedints, comps4, slots2 = read_input_csv(args.fixed, typ=object)\n\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n\n generateSchedule(companies, fixedints, names, panels, prefs, shortlists, slots, slots_int, args.output)\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "numpy.issubdtype" ] ]
dougbrion/ModernGL
[ "6de8938ccd0042c1389a32b697af5f9c9d279e41" ]
[ "examples/experimental/julia_fractal.py" ]
[ "import moderngl.experimental as mgl\n\nimport numpy as np\n\nfrom example_window import Example, run_example\n\n\nclass Fractal(Example):\n def __init__(self):\n self.ctx = mgl.create_context()\n\n self.prog = self.ctx.program(\n vertex_shader='''\n #version 330\n\n in vec2 in_vert;\n out vec2 v_text;\n\n void main() {\n gl_Position = vec4(in_vert, 0.0, 1.0);\n v_text = in_vert / 2.0 + 0.5;\n }\n ''',\n fragment_shader='''\n #version 330\n\n in vec2 v_text;\n out vec4 f_color;\n\n uniform vec2 Center;\n uniform int Iter;\n\n void main() {\n vec2 z = vec2(5.0 * (v_text.x - 0.5), 3.0 * (v_text.y - 0.5));\n vec2 c = Center;\n\n int i;\n for(i = 0; i < Iter; i++) {\n vec2 v = vec2(\n (z.x * z.x - z.y * z.y) + c.x,\n (z.y * z.x + z.x * z.y) + c.y\n );\n if (dot(v, v) > 4.0) break;\n z = v;\n }\n\n float cm = fract((i == Iter ? 0.0 : float(i)) * 10 / Iter);\n f_color = vec4(\n fract(cm + 0.0 / 3.0),\n fract(cm + 1.0 / 3.0),\n fract(cm + 2.0 / 3.0),\n 1.0\n );\n }\n ''',\n )\n\n vertices = np.array([-1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0])\n\n self.vbo = self.ctx.buffer(vertices.astype('f4').tobytes())\n self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert')\n\n def render(self):\n self.ctx.screen.viewport = self.wnd.viewport\n self.ctx.clear(1.0, 1.0, 1.0)\n\n self.prog['Center'] = (0.49, 0.32)\n self.prog['Iter'] = 100\n\n self.vao.render(mgl.TRIANGLE_STRIP)\n\n\nrun_example(Fractal)\n" ]
[ [ "numpy.array" ] ]
openmednlp/shipyard
[ "a325bb4aeb838dad53b76aa75a815a02357c752a" ]
[ "ris/cranial/embeddings.py" ]
[ "import pandas as pd\nfrom configparser import ConfigParser\nimport re\nfrom gensim.models.word2vec import Word2Vec\n\nconfig = ConfigParser()\nconfig.read('config.ini')\n\n\ndef vectorize_dataset(x, x_val, y, y_val, stratify=False):\n from bedrock.feature import train_tfidf_vectorizer\n from bedrock.collection import balance_df\n\n train_df = pd.DataFrame({'x': x, 'y': y})\n\n if stratify:\n balanced_train_df = balance_df(train_df, 'y')\n x = balanced_train_df['x']\n y = balanced_train_df['y']\n\n vectorizer = train_tfidf_vectorizer(\n x,\n config['DEFAULT']['vectorizer']\n )\n\n x_balanced_vec = vectorizer.transform(x)\n x_val_vec = vectorizer.transform(x_val)\n\n return x_balanced_vec, x_val_vec, y, y_val\n\n\ndef regex_label_sentences(sentences, pattern_dict):\n labels = []\n for sentence in sentences:\n label = None\n for key in pattern_dict.keys():\n if re.search(pattern_dict[key], sentence):\n label = key\n break\n labels.append(label)\n return labels\n\n\ndef word2vec(sentences):\n from gensim.models.word2vec import Word2Vec\n\n print('doing w2v')\n model = Word2Vec(sentences, workers=6, size=200, min_count=1, window=15, sample=1e-3)\n words = model.wv.vocab\n vectors = model[words]\n # df = pd.DataFrame(data=vectors.transpose(), columns=words)\n return words, vectors\n" ]
[ [ "pandas.DataFrame" ] ]
xc-kiwiberry/TRICE
[ "5b37e68fd1111fdc8db7e8544080dcd83c805b35" ]
[ "thumt/models/modeling_bart.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BART model, ported from the fairseq repo.\"\"\"\nimport math\nimport random\nimport warnings\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom transformers.activations import ACT2FN\nfrom transformers.configuration_bart import BartConfig\nfrom transformers.file_utils import (\n add_code_sample_docstrings,\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n replace_return_docstrings,\n)\nfrom transformers.modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPast,\n Seq2SeqLMOutput,\n Seq2SeqModelOutput,\n Seq2SeqQuestionAnsweringModelOutput,\n Seq2SeqSequenceClassifierOutput,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BartConfig\"\n_TOKENIZER_FOR_DOC = \"BartTokenizer\"\n\n\nBART_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/bart-base\",\n \"facebook/bart-large\",\n \"facebook/bart-large-mnli\",\n \"facebook/bart-large-cnn\",\n \"facebook/bart-large-xsum\",\n \"facebook/mbart-large-en-ro\",\n]\n# This list is incomplete. See all BART models at https://huggingface.co/models?filter=bart\n\n\nBART_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\n\"\"\"\n\nBART_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig\n\n >>> # see ``examples/summarization/bart/run_eval.py`` for a longer example\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')\n\n >>> ARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs.\"\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])\n\n\"\"\"\n\nBART_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`.\n See :meth:`transformers.PreTrainedTokenizer.encode` and\n :meth:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for translation and summarization training. By default, the model will create this tensor by\n shifting the :obj:`input_ids` to the right, following the paper.\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n\n If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and\n modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more\n information on the default strategy.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`)\n :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a\n sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of\n the decoder.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape\n :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\ndef invert_mask(attention_mask):\n \"\"\"Turns 1->0, 0->1, False->True, True-> False\"\"\"\n assert attention_mask.dim() == 2\n return attention_mask.eq(0)\n\n\ndef _prepare_bart_decoder_inputs(\n config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32\n):\n \"\"\"Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if\n none are provided. This mimics the default behavior in fairseq. To override it pass in masks.\n Note: this is not called during generation\n \"\"\"\n pad_token_id = config.pad_token_id\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)\n bsz, tgt_len = decoder_input_ids.size()\n if decoder_padding_mask is None:\n decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)\n else:\n decoder_padding_mask = invert_mask(decoder_padding_mask)\n if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1:\n # never mask leading token, even if it is pad\n decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1]\n tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len))\n mask = torch.arange(tmp.size(-1))\n tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0)\n causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device)\n return decoder_input_ids, decoder_padding_mask, causal_mask\n\n\nclass PretrainedBartModel(PreTrainedModel):\n config_class = BartConfig\n base_model_prefix = \"model\"\n\n def _init_weights(self, module):\n std = self.config.init_std\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, SinusoidalPositionalEmbedding):\n pass\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n @property\n def dummy_inputs(self):\n pad_token = self.config.pad_token_id\n input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)\n dummy_inputs = {\n \"attention_mask\": input_ids.ne(pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n\ndef _make_linear_from_emb(emb):\n vocab_size, emb_size = emb.weight.shape\n lin_layer = nn.Linear(vocab_size, emb_size, bias=False)\n lin_layer.weight.data = emb.weight.data\n return lin_layer\n\n\n# Helper Functions, mostly for making masks\ndef _check_shapes(shape_1, shape2):\n if shape_1 != shape2:\n raise AssertionError(\"shape mismatch: {} != {}\".format(shape_1, shape2))\n\n\ndef shift_tokens_right(input_ids, pad_token_id):\n \"\"\"Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).\"\"\"\n prev_output_tokens = input_ids.clone()\n index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)\n prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()\n prev_output_tokens[:, 1:] = input_ids[:, :-1]\n return prev_output_tokens\n\n\ndef make_padding_mask(input_ids, padding_idx=1):\n \"\"\"True for pad tokens\"\"\"\n padding_mask = input_ids.eq(padding_idx)\n if not padding_mask.any():\n padding_mask = None\n return padding_mask\n\n\n# Helper Modules\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, config: BartConfig):\n super().__init__()\n self.embed_dim = config.d_model\n self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)\n self.normalize_before = config.normalize_before\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def forward(self, x, encoder_padding_mask, output_attentions=False, attn_mask=None):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n x, attn_weights = self.self_attn(\n query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions,\n attn_mask=attn_mask\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if torch.isinf(x).any() or torch.isnan(x).any():\n clamp_value = torch.finfo(x.dtype).max - 1000\n x = torch.clamp(x, min=-clamp_value, max=clamp_value)\n return x, attn_weights\n\n\nclass BartEncoder(nn.Module):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer\n is a :class:`EncoderLayer`.\n\n Args:\n config: BartConfig\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens):\n super().__init__()\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n\n embed_dim = embed_tokens.embedding_dim\n self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = config.max_position_embeddings\n\n self.embed_tokens = embed_tokens\n if config.static_position_embeddings:\n self.embed_positions = SinusoidalPositionalEmbedding(\n config.max_position_embeddings, embed_dim, self.padding_idx\n )\n else:\n self.embed_positions = LearnedPositionalEmbedding(\n config.max_position_embeddings,\n embed_dim,\n self.padding_idx,\n config.extra_pos_embeddings,\n )\n self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)])\n self.layernorm_embedding = LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()\n # mbart has one extra layer_norm\n self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None\n\n def forward(\n self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False,\n return_dict=False, segment_embedding=None\n ):\n \"\"\"\n Args:\n input_ids (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n attention_mask (torch.LongTensor): indicating which indices are padding tokens.\n Returns:\n BaseModelOutput or Tuple comprised of:\n - **x** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_states** (tuple(torch.FloatTensor)): all intermediate\n hidden states of shape `(src_len, batch, embed_dim)`.\n Only populated if *output_hidden_states:* is True.\n - **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer.\n During training might not be of length n_layers because of layer dropout.\n \"\"\"\n # check attention mask and invert\n if attention_mask is not None:\n attention_mask = invert_mask(attention_mask)\n\n if isinstance(input_ids, tuple):\n src_len, hyp_len = input_ids[0].size(1), input_ids[1].size(1)\n\n if segment_embedding is not None:\n embed_pos0 = self.embed_positions(input_ids[0])\n embed_pos0 = embed_pos0 + segment_embedding[0,:].to(embed_pos0)\n embed_pos1 = self.embed_positions(input_ids[1])\n embed_pos1 = embed_pos1 + segment_embedding[1,:].to(embed_pos1)\n embed_pos = torch.cat([embed_pos0, embed_pos1], dim=0)\n else:\n embed_pos = torch.cat([self.embed_positions(input_ids[0]), \n self.embed_positions(input_ids[1])], dim=0)\n input_ids = torch.cat(input_ids, dim=1)\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n else:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n embed_pos = self.embed_positions(input_ids)\n\n x = inputs_embeds + embed_pos\n x = self.layernorm_embedding(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n encoder_states = [] if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for i, encoder_layer in enumerate(self.layers):\n\n if output_hidden_states:\n encoder_states.append(x)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop): # skip the layer\n attn = None\n else:\n x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions)\n\n if output_attentions:\n all_attentions = all_attentions + (attn,)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n if output_hidden_states:\n encoder_states.append(x)\n # T x B x C -> B x T x C\n encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if not return_dict:\n return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)\n return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, config: BartConfig):\n super().__init__()\n self.embed_dim = config.d_model\n\n self.self_attn = Attention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n )\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.normalize_before = config.normalize_before\n\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.encoder_attn = Attention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n encoder_decoder_attention=True,\n )\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)\n self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def forward(\n self,\n x,\n encoder_hidden_states,\n encoder_attn_mask=None,\n layer_state=None,\n causal_mask=None,\n decoder_padding_mask=None,\n output_attentions=False,\n ):\n residual = x\n\n if layer_state is None:\n layer_state = {}\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n # Self Attention\n\n x, self_attn_weights = self.self_attn(\n query=x,\n key=x,\n layer_state=layer_state, # adds keys to layer state\n key_padding_mask=decoder_padding_mask,\n attn_mask=causal_mask,\n output_attentions=output_attentions,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n # Cross attention\n residual = x\n assert self.encoder_attn.cache_key != self.self_attn.cache_key\n if self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n if isinstance(encoder_hidden_states, tuple):\n x0, _ = self.encoder_attn(\n query=x,\n key=encoder_hidden_states[0],\n key_padding_mask=encoder_attn_mask[0],\n layer_state=layer_state, # mutates layer state\n idx=\"0\",\n )\n x1, _ = self.encoder_attn(\n query=x,\n key=encoder_hidden_states[1],\n key_padding_mask=encoder_attn_mask[1],\n layer_state=layer_state, # mutates layer state\n idx=\"1\",\n )\n x = (x0 + x1) / 2\n else:\n x, _ = self.encoder_attn(\n query=x,\n key=encoder_hidden_states,\n key_padding_mask=encoder_attn_mask,\n layer_state=layer_state, # mutates layer state\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n\n # Fully Connected\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n return (\n x,\n self_attn_weights,\n layer_state,\n ) # just self_attn weights for now, following t5, layer_state = cache for decoding\n\n\nclass BartDecoder(nn.Module):\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer\n is a :class:`DecoderLayer`.\n Args:\n config: BartConfig\n embed_tokens (torch.nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):\n super().__init__()\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm # layernorm variant\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n self.embed_tokens = embed_tokens\n if config.static_position_embeddings:\n self.embed_positions = SinusoidalPositionalEmbedding(\n config.max_position_embeddings, config.d_model, config.pad_token_id\n )\n else:\n self.embed_positions = LearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n self.padding_idx,\n config.extra_pos_embeddings,\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config) for _ in range(config.decoder_layers)]\n ) # type: List[DecoderLayer]\n self.layernorm_embedding = LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()\n self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None\n\n def forward(\n self,\n input_ids,\n encoder_hidden_states,\n encoder_padding_mask,\n decoder_padding_mask,\n decoder_causal_mask,\n past_key_values=None,\n use_cache=False,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False,\n **unused,\n ):\n \"\"\"\n Includes several features from \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n input_ids (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_hidden_states: output from the encoder, used for\n encoder-side attention\n encoder_padding_mask: for ignoring pad tokens\n past_key_values (dict or None): dictionary used for storing state during generation\n\n Returns:\n BaseModelOutputWithPast or tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - the cache\n - hidden states\n - attentions\n \"\"\"\n if \"decoder_cached_states\" in unused:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_cached_states\")\n if \"decoder_past_key_values\" in unused:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_past_key_values\")\n\n # check attention mask and invert\n if encoder_padding_mask is not None:\n if isinstance(encoder_padding_mask, tuple):\n encoder_padding_mask = (invert_mask(encoder_padding_mask[0]), invert_mask(encoder_padding_mask[1]))\n else:\n encoder_padding_mask = invert_mask(encoder_padding_mask)\n\n # embed positions\n positions = self.embed_positions(input_ids, use_cache=use_cache)\n\n if use_cache:\n input_ids = input_ids[:, -1:]\n positions = positions[:, -1:]\n\n x = self.embed_tokens(input_ids) * self.embed_scale\n if self.do_blenderbot_90_layernorm:\n x = self.layernorm_embedding(x)\n x += positions\n else:\n x += positions\n x = self.layernorm_embedding(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\n x = x.transpose(0, 1)\n if isinstance(encoder_hidden_states, tuple):\n encoder_hidden_states = (encoder_hidden_states[0].transpose(0, 1), encoder_hidden_states[1].transpose(0, 1))\n else:\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache = []\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (x,)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop):\n continue\n\n layer_state = past_key_values[idx] if past_key_values is not None else None\n\n x, layer_self_attn, layer_past = decoder_layer(\n x,\n encoder_hidden_states,\n encoder_attn_mask=encoder_padding_mask,\n decoder_padding_mask=decoder_padding_mask,\n layer_state=layer_state,\n causal_mask=decoder_causal_mask,\n output_attentions=output_attentions,\n )\n\n if use_cache:\n next_decoder_cache.append(layer_past.copy())\n\n if output_attentions:\n all_self_attns += (layer_self_attn,)\n\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\n x = self.layer_norm(x)\n\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\n if output_hidden_states:\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\n x = x.transpose(0, 1)\n if isinstance(encoder_hidden_states, tuple):\n encoder_hidden_states = (encoder_hidden_states[0].transpose(0, 1), encoder_hidden_states[1].transpose(0, 1))\n else:\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\n\n next_cache = next_decoder_cache if use_cache else None\n\n if not return_dict:\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\n return BaseModelOutputWithPast(\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\n )\n\n\ndef _reorder_buffer(attn_cache, new_order):\n for k, input_buffer_k in attn_cache.items():\n if input_buffer_k is not None:\n attn_cache[k] = input_buffer_k.index_select(0, new_order)\n return attn_cache\n\n\nclass Attention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n bias=True,\n encoder_decoder_attention=False, # otherwise self_attention\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.encoder_decoder_attention = encoder_decoder_attention\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.cache_key = \"encoder_decoder\" if self.encoder_decoder_attention else \"self\"\n\n def _shape(self, tensor, seq_len, bsz):\n return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n layer_state: Optional[Dict[str, Optional[Tensor]]] = None,\n attn_mask: Optional[Tensor] = None,\n output_attentions=False,\n idx=\"\",\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time(SeqLen) x Batch x Channel\"\"\"\n static_kv: bool = self.encoder_decoder_attention\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n # get here for encoder decoder cause of static_kv\n if layer_state is not None: # reuse k,v and encoder_padding_mask\n saved_state = layer_state.get(self.cache_key+idx, {})\n if \"prev_key\" in saved_state and static_kv:\n # previous time steps are cached - no need to recompute key and value if they are static\n key = None\n else:\n saved_state = None\n layer_state = {}\n\n q = self.q_proj(query) * self.scaling\n if static_kv:\n if key is None:\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n else:\n k = self.k_proj(query)\n v = self.v_proj(query)\n\n q = self._shape(q, tgt_len, bsz)\n if k is not None:\n k = self._shape(k, -1, bsz)\n if v is not None:\n v = self._shape(v, -1, bsz)\n\n if saved_state is not None:\n k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)\n\n # Update cache\n layer_state[self.cache_key+idx] = {\n \"prev_key\": k.view(bsz, self.num_heads, -1, self.head_dim),\n \"prev_value\": v.view(bsz, self.num_heads, -1, self.head_dim),\n \"prev_key_padding_mask\": key_padding_mask if not static_kv else None,\n }\n\n assert k is not None\n src_len = k.size(1)\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)\n\n if attn_mask is not None:\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n # This is part of a workaround to get around fork/join parallelism not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n assert key_padding_mask is None or key_padding_mask.size()[:2] == (\n bsz,\n src_len,\n )\n\n if key_padding_mask is not None: # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)\n attn_weights = attn_weights.masked_fill(reshaped, float(\"-inf\"))\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n attn_weights = F.softmax(attn_weights, dim=-1)\n attn_probs = F.dropout(\n attn_weights,\n p=self.dropout,\n training=self.training,\n )\n\n assert v is not None\n attn_output = torch.bmm(attn_probs, v)\n assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn_output = self.out_proj(attn_output)\n if output_attentions:\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n else:\n attn_weights = None\n return attn_output, attn_weights\n\n def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = torch.reshape(_prev_key, [bsz * self.num_heads, -1, self.head_dim])\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = torch.reshape(_prev_value, [bsz * self.num_heads, -1, self.head_dim])\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n assert k is not None and v is not None\n prev_key_padding_mask: Optional[Tensor] = saved_state.get(\"prev_key_padding_mask\", None)\n if prev_key_padding_mask is not None:\n if static_kv:\n new_key_padding_mask = prev_key_padding_mask\n else:\n new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)\n else:\n new_key_padding_mask = key_padding_mask\n return k, v, new_key_padding_mask\n\n\nclass BartClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n # This can trivially be shared with RobertaClassificationHead\n\n def __init__(\n self,\n input_dim,\n inner_dim,\n num_classes,\n pooler_dropout,\n ):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, x):\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\nclass LearnedPositionalEmbedding(nn.Embedding):\n \"\"\"\n This module learns positional embeddings up to a fixed maximum size.\n Padding ids are ignored by either offsetting based on padding_idx\n or by setting padding_idx to None and ensuring that the appropriate\n position ids are passed to the forward function.\n \"\"\"\n\n def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset):\n # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2\n # and adjust num_embeddings appropriately. Other models dont have this hack\n self.offset = offset\n assert padding_idx is not None\n num_embeddings += offset\n super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)\n\n def forward(self, input_ids, use_cache=False):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids.shape[:2]\n if use_cache:\n positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing\n else:\n # starts at 0, ends at 1-seq_len\n positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)\n return super().forward(positions + self.offset)\n\n\ndef LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):\n if torch.cuda.is_available():\n try:\n from apex.normalization import FusedLayerNorm\n\n return FusedLayerNorm(normalized_shape, eps, elementwise_affine)\n except ImportError:\n pass\n return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)\n\n\ndef fill_with_neg_inf(t):\n \"\"\"FP16-compatible function that fills a input_ids with -inf.\"\"\"\n return t.float().fill_(float(\"-inf\")).type_as(t)\n\n\n# Public API\ndef _get_shape(t):\n return getattr(t, \"shape\", None)\n\n\n@add_start_docstrings(\n \"The bare BART Model outputting raw hidden-states without any specific head on top.\",\n BART_START_DOCSTRING,\n)\nclass BartModel(PretrainedBartModel):\n def __init__(self, config: BartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = BartEncoder(config, self.shared)\n self.decoder = BartDecoder(config, self.shared)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs: Optional[Tuple] = None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n\n if decoder_input_ids is None:\n use_cache = False\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # make masks if user doesn't supply\n if not use_cache:\n decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(\n self.config,\n input_ids,\n decoder_input_ids=decoder_input_ids,\n decoder_padding_mask=decoder_attention_mask,\n causal_mask_dtype=self.shared.weight.dtype,\n )\n else:\n decoder_padding_mask, causal_mask = None, None\n\n assert decoder_input_ids is not None\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOuput when return_dict=False\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n decoder_input_ids,\n encoder_outputs[0],\n attention_mask,\n decoder_padding_mask,\n decoder_causal_mask=causal_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_output_embeddings(self):\n return _make_linear_from_emb(self.shared) # make it on the fly\n\n\n@add_start_docstrings(\n \"The BART Model with a language modeling head. Can be used for summarization.\", BART_START_DOCSTRING\n)\nclass BartForConditionalGeneration(PretrainedBartModel):\n base_model_prefix = \"model\"\n authorized_missing_keys = [r\"final_logits_bias\", r\"encoder\\.version\", r\"decoder\\.version\"]\n\n def __init__(self, config: BartConfig):\n super().__init__(config)\n base_model = BartModel(config)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n old_num_tokens = self.model.shared.num_embeddings\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self.model.shared = new_embeddings\n self._resize_final_logits_bias(new_num_tokens, old_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **unused,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss.\n Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens\n with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n\n Conditional generation example::\n\n >>> # Mask filling only works for bart-large\n >>> from transformers import BartTokenizer, BartForConditionalGeneration\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n >>> TXT = \"My friends are <mask> but they eat too many carbs.\"\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = logits[0, masked_index].softmax(dim=0)\n >>> values, predictions = probs.topk(5)\n\n >>> tokenizer.decode(predictions).split()\n >>> # ['good', 'great', 'all', 'really', 'very']\n \"\"\"\n if \"lm_labels\" in unused:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = unused.pop(\"lm_labels\")\n if \"decoder_cached_states\" in unused:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_cached_states\")\n if \"decoder_past_key_values\" in unused:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_past_key_values\")\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n use_cache = False\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # TODO(SS): do we need to ignore pad tokens in labels?\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs\n ):\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def adjust_logits_during_generation(self, logits, cur_len, max_length):\n if cur_len == 1 and self.config.force_bos_token_to_be_generated:\n self._force_token_ids_generation(logits, self.config.bos_token_id)\n elif cur_len == max_length - 1 and self.config.eos_token_id is not None:\n self._force_token_ids_generation(logits, self.config.eos_token_id)\n return logits\n\n def _force_token_ids_generation(self, scores, token_id) -> None:\n \"\"\"force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float(\"inf\"))\"\"\"\n scores[:, [x for x in range(self.config.vocab_size) if x != token_id]] = -float(\"inf\")\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = []\n for layer_past in past:\n # get the correct batch idx from decoder layer's batch dim for cross and self-attn\n layer_past_new = {\n attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()\n }\n reordered_past.append(layer_past_new)\n return reordered_past\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_output_embeddings(self):\n return _make_linear_from_emb(self.model.shared) # make it on the fly\n\n\n@add_start_docstrings(\n \"\"\"Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. \"\"\",\n BART_START_DOCSTRING,\n)\nclass BartForSequenceClassification(PretrainedBartModel):\n def __init__(self, config: BartConfig, **kwargs):\n super().__init__(config, **kwargs)\n self.model = BartModel(config)\n self.classification_head = BartClassificationHead(\n config.d_model,\n config.d_model,\n config.num_labels,\n config.classifier_dropout,\n )\n self.model._init_weights(self.classification_head.dense)\n self.model._init_weights(self.classification_head.out_proj)\n\n @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n x = outputs[0] # last hidden state\n eos_mask = input_ids.eq(self.config.eos_token_id)\n if len(torch.unique(eos_mask.sum(1))) > 1:\n raise ValueError(\"All examples must have the same number of <eos> tokens.\")\n sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]\n logits = self.classification_head(sentence_representation)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n BART_START_DOCSTRING,\n)\nclass BartForQuestionAnswering(PretrainedBartModel):\n def __init__(self, config):\n super().__init__(config)\n\n config.num_labels = 2\n self.num_labels = config.num_labels\n\n self.model = BartModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.model._init_weights(self.qa_outputs)\n\n @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n start_positions=None,\n end_positions=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if start_positions is not None and end_positions is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (\n start_logits,\n end_logits,\n ) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return Seq2SeqQuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\nclass SinusoidalPositionalEmbedding(nn.Embedding):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions, embedding_dim, padding_idx=None):\n super().__init__(num_positions, embedding_dim)\n if embedding_dim % 2 != 0:\n raise NotImplementedError(f\"odd embedding_dim {embedding_dim} not supported\")\n self.weight = self._init_weight(self.weight)\n\n @staticmethod\n def _init_weight(out: nn.Parameter):\n \"\"\"Identical to the XLM create_sinusoidal_embeddings except features are not interleaved.\n The cos features are in the 2nd half of the vector. [dim // 2:]\n \"\"\"\n n_pos, dim = out.shape\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n out[:, 0 : dim // 2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) # This line breaks for odd n_pos\n out[:, dim // 2 :] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n out.requires_grad = False\n return out\n\n @torch.no_grad()\n def forward(self, input_ids, use_cache=False):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids.shape[:2]\n if use_cache:\n positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing\n else:\n # starts at 0, ends at 1-seq_len\n positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)\n return super().forward(positions)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.isnan", "torch.finfo", "torch.bmm", "torch.cuda.is_available", "numpy.cos", "torch.nn.CrossEntropyLoss", "torch.reshape", "numpy.sin", "torch.nn.LayerNorm", "torch.tensor", "torch.zeros", "torch.nn.Identity", "torch.nn.functional.dropout", "torch.clamp", "torch.nn.functional.linear", "torch.isinf", "numpy.power", "torch.nn.functional.softmax", "torch.nn.Dropout", "torch.arange", "torch.no_grad", "torch.tanh", "torch.nn.Embedding" ] ]
Cyril9227/EfficientMixNet
[ "34d50152d3894c0c7b5175d43e42c72a04c49f19" ]
[ "keras_efficientmixnets/custom_optimizers.py" ]
[ "#! -*- coding: utf-8 -*-\n\nfrom tensorflow.keras import backend as K ### REM: idem\nfrom tensorflow.keras.optimizers import (SGD, Adadelta, Adagrad, Adam, Adamax, Nadam,\n Optimizer, RMSprop)\nfrom tensorflow.keras.utils import (deserialize_keras_object,\n serialize_keras_object)\n\n\n# Ported from https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam.py\nclass RectifiedAdam(Optimizer):\n \"\"\"RectifiedAdam optimizer.\n Default parameters follow those provided in the original paper.\n # Arguments\n lr: float >= 0. Learning rate.\n final_lr: float >= 0. Final learning rate.\n beta_1: float, 0 < beta < 1. Generally close to 1.\n beta_2: float, 0 < beta < 1. Generally close to 1.\n gamma: float >= 0. Convergence speed of the bound function.\n epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n weight_decay: Weight decay weight.\n amsbound: boolean. Whether to apply the AMSBound variant of this\n algorithm.\n # References\n - [On the Variance of the Adaptive Learning Rate and Beyond]\n (https://arxiv.org/abs/1908.03265)\n - [Adam - A Method for Stochastic Optimization]\n (https://arxiv.org/abs/1412.6980v8)\n - [On the Convergence of Adam and Beyond]\n (https://openreview.net/forum?id=ryQu7f-RZ)\n \"\"\"\n\n def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,\n epsilon=None, decay=0., weight_decay=0.0, **kwargs):\n super(RectifiedAdam, self).__init__(**kwargs)\n\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n self.lr = K.variable(lr, name='lr')\n self.beta_1 = K.variable(beta_1, name='beta_1')\n self.beta_2 = K.variable(beta_2, name='beta_2')\n self.decay = K.variable(decay, name='decay')\n\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n self.weight_decay = float(weight_decay)\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = [K.update_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,\n K.dtype(self.decay))))\n\n t = K.cast(self.iterations, K.floatx()) + 1\n\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n self.weights = [self.iterations] + ms + vs\n\n for p, g, m, v in zip(params, grads, ms, vs):\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n\n beta2_t = self.beta_2 ** t\n N_sma_max = 2 / (1 - self.beta_2) - 1\n N_sma = N_sma_max - 2 * t * beta2_t / (1 - beta2_t)\n\n # apply weight decay\n if self.weight_decay != 0.:\n p_wd = p - self.weight_decay * lr * p\n else:\n p_wd = None\n\n if p_wd is None:\n p_ = p\n else:\n p_ = p_wd\n\n def gt_path():\n step_size = lr * K.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max /\n (N_sma_max - 2)) / (1 - self.beta_1 ** t)\n\n denom = K.sqrt(v_t) + self.epsilon\n p_t = p_ - step_size * (m_t / denom)\n\n return p_t\n\n def lt_path():\n step_size = lr / (1 - self.beta_1 ** t)\n p_t = p_ - step_size * m_t\n\n return p_t\n\n p_t = K.switch(N_sma > 5, gt_path, lt_path)\n\n self.updates.append(K.update(m, m_t))\n self.updates.append(K.update(v, v_t))\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(K.update(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {'lr': float(K.get_value(self.lr)),\n 'beta_1': float(K.get_value(self.beta_1)),\n 'beta_2': float(K.get_value(self.beta_2)),\n 'decay': float(K.get_value(self.decay)),\n 'epsilon': self.epsilon,\n 'weight_decay': self.weight_decay}\n base_config = super(RectifiedAdam, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n ### REM : Toujours aussi lait !\n \n\n\nclass Lookahead(object):\n \"\"\"Add the [Lookahead Optimizer](https://arxiv.org/abs/1907.08610) functionality for [keras](https://keras.io/).\n \"\"\"\n\n def __init__(self, k=5, alpha=0.5):\n self.k = k\n self.alpha = alpha\n self.count = 0\n\n def inject(self, model):\n \"\"\"Inject the Lookahead algorithm for the given model.\n The following code is modified from keras's _make_train_function method.\n See: https://github.com/keras-team/keras/blob/master/keras/engine/training.py#L497\n \"\"\"\n if not hasattr(model, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n\n model._check_trainable_weights_consistency()\n\n if model.train_function is None:\n inputs = (model._feed_inputs +\n model._feed_targets +\n model._feed_sample_weights)\n if model._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n fast_params = model._collected_trainable_weights\n\n with K.name_scope('training'):\n with K.name_scope(model.optimizer.__class__.__name__):\n training_updates = model.optimizer.get_updates(\n params=fast_params,\n loss=model.total_loss)\n slow_params = [K.variable(p) for p in fast_params]\n fast_updates = (model.updates +\n training_updates +\n model.metrics_updates)\n\n slow_updates, copy_updates = [], []\n for p, q in zip(fast_params, slow_params):\n slow_updates.append(K.update(q, q + self.alpha * (p - q)))\n copy_updates.append(K.update(p, q))\n\n # Gets loss and metrics. Updates weights at each call.\n fast_train_function = K.function(\n inputs,\n [model.total_loss] + model.metrics_tensors,\n updates=fast_updates,\n name='fast_train_function',\n **model._function_kwargs)\n\n def F(inputs):\n self.count += 1\n R = fast_train_function(inputs)\n if self.count % self.k == 0:\n K.batch_get_value(slow_updates)\n K.batch_get_value(copy_updates)\n return R\n\n #### REM : C'est pas super propre ca comme maniรจre de faire\n #### Tu rompts l'encapsulation de la classe \n model.train_function = F\n\n\n\ndef deserialize(config, custom_objects=None):\n \"\"\"Inverse of the `serialize` function.\n # Arguments\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping\n names (strings) to custom objects\n (classes and functions)\n to be considered during deserialization.\n # Returns\n A Keras Optimizer instance.\n \"\"\"\n all_classes = {\n 'sgd': SGD,\n 'rmsprop': RMSprop,\n 'adagrad': Adagrad,\n 'adadelta': Adadelta,\n 'adam': Adam,\n 'adamax': Adamax,\n 'nadam': Nadam,\n 'radam' : RectifiedAdam\n }\n # Make deserialization case-insensitive for built-in optimizers.\n if config['class_name'].lower() in all_classes:\n config['class_name'] = config['class_name'].lower()\n return deserialize_keras_object(config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name='optimizer')\n\n\ndef get(identifier): ### On peut getter n'importe quoi. Ton nom de fonction doit etre plus explicite\n \"\"\"Retrieves a Keras Optimizer instance.\n # Arguments\n identifier: Optimizer identifier, one of\n - String: name of an optimizer\n - Dictionary: configuration dictionary.\n - Keras Optimizer instance (it will be returned unchanged).\n - TensorFlow Optimizer instance\n (it will be wrapped as a Keras Optimizer).\n # Returns\n A Keras Optimizer instance.\n # Raises\n ValueError: If `identifier` cannot be interpreted.\n \"\"\"\n\n if isinstance(identifier, dict):\n return deserialize(identifier)\n elif isinstance(identifier, str):\n config = {'class_name': str(identifier), 'config': {}}\n return deserialize(config)\n if isinstance(identifier, Optimizer):\n return identifier\n else:\n raise ValueError('Could not interpret optimizer identifier: ' +\n str(identifier))\n" ]
[ [ "tensorflow.keras.backend.int_shape", "tensorflow.keras.backend.dtype", "tensorflow.keras.backend.variable", "tensorflow.keras.backend.batch_get_value", "tensorflow.keras.backend.switch", "tensorflow.keras.backend.square", "tensorflow.keras.backend.learning_phase", "tensorflow.keras.backend.name_scope", "tensorflow.keras.backend.epsilon", "tensorflow.keras.backend.update", "tensorflow.keras.backend.function", "tensorflow.keras.backend.get_value", "tensorflow.keras.backend.floatx", "tensorflow.keras.utils.deserialize_keras_object", "tensorflow.keras.backend.sqrt", "tensorflow.keras.backend.update_add" ] ]
Carreau/yt
[ "d7e1cf22a8349b8a62b9c569017643ee233d9c4f" ]
[ "yt/frontends/moab/data_structures.py" ]
[ "import os\nimport weakref\n\nimport numpy as np\n\nfrom yt.data_objects.index_subobjects.unstructured_mesh import SemiStructuredMesh\nfrom yt.data_objects.static_output import Dataset\nfrom yt.funcs import setdefaultattr\nfrom yt.geometry.unstructured_mesh_handler import UnstructuredIndex\nfrom yt.utilities.file_handler import HDF5FileHandler\nfrom yt.utilities.on_demand_imports import _h5py as h5py\n\nfrom .fields import MoabFieldInfo, PyneFieldInfo\n\n\nclass MoabHex8Mesh(SemiStructuredMesh):\n _connectivity_length = 8\n _index_offset = 1\n\n\nclass MoabHex8Hierarchy(UnstructuredIndex):\n def __init__(self, ds, dataset_type=\"h5m\"):\n self.dataset = weakref.proxy(ds)\n self.dataset_type = dataset_type\n self.index_filename = self.dataset.parameter_filename\n self.directory = os.path.dirname(self.index_filename)\n self._fhandle = h5py.File(self.index_filename, mode=\"r\")\n\n UnstructuredIndex.__init__(self, ds, dataset_type)\n\n self._fhandle.close()\n\n def _initialize_mesh(self):\n con = self._fhandle[\"/tstt/elements/Hex8/connectivity\"][:]\n con = np.asarray(con, dtype=\"int64\")\n coords = self._fhandle[\"/tstt/nodes/coordinates\"][:]\n coords = np.asarray(coords, dtype=\"float64\")\n self.meshes = [MoabHex8Mesh(0, self.index_filename, con, coords, self)]\n\n def _detect_output_fields(self):\n self.field_list = [\n (\"moab\", f) for f in self._fhandle[\"/tstt/elements/Hex8/tags\"].keys()\n ]\n\n def _count_grids(self):\n self.num_grids = 1\n\n\nclass MoabHex8Dataset(Dataset):\n _index_class = MoabHex8Hierarchy\n _field_info_class = MoabFieldInfo\n periodicity = (False, False, False)\n\n def __init__(\n self,\n filename,\n dataset_type=\"moab_hex8\",\n storage_filename=None,\n units_override=None,\n unit_system=\"cgs\",\n ):\n self.fluid_types += (\"moab\",)\n Dataset.__init__(\n self,\n filename,\n dataset_type,\n units_override=units_override,\n unit_system=unit_system,\n )\n self.storage_filename = storage_filename\n self.filename = filename\n self._handle = HDF5FileHandler(filename)\n\n def _set_code_unit_attributes(self):\n # Almost everything is regarded as dimensionless in MOAB, so these will\n # not be used very much or at all.\n setdefaultattr(self, \"length_unit\", self.quan(1.0, \"cm\"))\n setdefaultattr(self, \"time_unit\", self.quan(1.0, \"s\"))\n setdefaultattr(self, \"mass_unit\", self.quan(1.0, \"g\"))\n\n def _parse_parameter_file(self):\n self._handle = h5py.File(self.parameter_filename, mode=\"r\")\n coords = self._handle[\"/tstt/nodes/coordinates\"]\n self.domain_left_edge = coords[0]\n self.domain_right_edge = coords[-1]\n self.domain_dimensions = self.domain_right_edge - self.domain_left_edge\n self.refine_by = 2\n self.dimensionality = len(self.domain_dimensions)\n self.current_time = 0.0\n self.unique_identifier = self.parameter_filename\n self.cosmological_simulation = False\n self.num_ghost_zones = 0\n self.current_redshift = 0.0\n self.omega_lambda = 0.0\n self.omega_matter = 0.0\n self.hubble_constant = 0.0\n self.cosmological_simulation = 0\n\n @classmethod\n def _is_valid(cls, filename, *args, **kwargs):\n return filename.endswith(\".h5m\")\n\n def __repr__(self):\n return self.basename.rsplit(\".\", 1)[0]\n\n\nclass PyneHex8Mesh(SemiStructuredMesh):\n _connectivity_length = 8\n _index_offset = 0\n\n\nclass PyneMeshHex8Hierarchy(UnstructuredIndex):\n def __init__(self, ds, dataset_type=\"moab_hex8_pyne\"):\n self.dataset = weakref.proxy(ds)\n self.dataset_type = dataset_type\n self.index_filename = self.dataset.parameter_filename\n self.directory = os.getcwd()\n self.pyne_mesh = ds.pyne_mesh\n\n super().__init__(ds, dataset_type)\n\n def _initialize_mesh(self):\n from pymoab import types\n\n ents = list(self.pyne_mesh.structured_iterate_vertex())\n coords = self.pyne_mesh.mesh.get_coords(ents).astype(\"float64\")\n coords = coords.reshape(len(coords) // 3, 3)\n hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX)\n vind = []\n for h in hexes:\n vind.append(\n self.pyne_mesh.mesh.get_adjacencies(\n h, 0, create_if_missing=True, op_type=types.UNION\n )\n )\n vind = np.asarray(vind, dtype=np.int64)\n vind = vind.reshape(len(vind) // 8, 8)\n self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)]\n\n def _detect_output_fields(self):\n self.field_list = [(\"pyne\", f) for f in self.pyne_mesh.tags.keys()]\n\n def _count_grids(self):\n self.num_grids = 1\n\n\nclass PyneMoabHex8Dataset(Dataset):\n _index_class = PyneMeshHex8Hierarchy\n _fieldinfo_fallback = MoabFieldInfo\n _field_info_class = PyneFieldInfo\n periodicity = (False, False, False)\n\n def __init__(\n self,\n pyne_mesh,\n dataset_type=\"moab_hex8_pyne\",\n storage_filename=None,\n units_override=None,\n unit_system=\"cgs\",\n ):\n self.fluid_types += (\"pyne\",)\n filename = f\"pyne_mesh_{id(pyne_mesh)}\"\n self.pyne_mesh = pyne_mesh\n Dataset.__init__(\n self,\n str(filename),\n dataset_type,\n units_override=units_override,\n unit_system=unit_system,\n )\n self.storage_filename = storage_filename\n self.filename = filename\n\n def _set_code_unit_attributes(self):\n # Almost everything is regarded as dimensionless in MOAB, so these will\n # not be used very much or at all.\n setdefaultattr(self, \"length_unit\", self.quan(1.0, \"cm\"))\n setdefaultattr(self, \"time_unit\", self.quan(1.0, \"s\"))\n setdefaultattr(self, \"mass_unit\", self.quan(1.0, \"g\"))\n\n def _parse_parameter_file(self):\n ents = list(self.pyne_mesh.structured_iterate_vertex())\n coords = self.pyne_mesh.mesh.get_coords(ents)\n self.domain_left_edge = coords[0:3]\n self.domain_right_edge = coords[-3:]\n self.domain_dimensions = self.domain_right_edge - self.domain_left_edge\n self.refine_by = 2\n self.dimensionality = len(self.domain_dimensions)\n self.current_time = 0.0\n self.unique_identifier = self.parameter_filename\n self.cosmological_simulation = False\n self.num_ghost_zones = 0\n self.current_redshift = 0.0\n self.omega_lambda = 0.0\n self.omega_matter = 0.0\n self.hubble_constant = 0.0\n self.cosmological_simulation = 0\n\n @classmethod\n def _is_valid(cls, filename, *args, **kwargs):\n return False\n\n def __repr__(self):\n return self.basename.rsplit(\".\", 1)[0]\n" ]
[ [ "numpy.asarray" ] ]
aron-kvvon/ai-dataset-python
[ "d10feb0f2e301456995a99227e82a4f294e0ecb7" ]
[ "ai_dataset/utils/convert_type.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nModule Description:\n This module is for converting dataset type.\n\n\"\"\"\nimport numpy as np\nimport torch\nimport tensorflow as tf\n\nfrom ai_dataset.types.keras import KerasData\nfrom ai_dataset.types.torchvision import TorchData\n\n\ndef torch2keras():\n pass\n\n\ndef keras2torch(keras_data: KerasData):\n \"\"\"\n Dataset type conversion to TorchData\n Torchvision dataset's image(X) shape: [filter][width][height]\n label(y) type: integer (e.g. 3)\n Keras dataset's image(X) shape: [width][height][filter]\n label(y) type: a list with the length of classes\n (e.g. [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] )\n :param keras_data: an instance of KerasData\n :return: an instance of TorchData\n \"\"\"\n\n images = []\n labels = []\n ext_labels = []\n for sample in keras_data.get_dataset():\n # np.swapaxes internally convert the type of Tensor to numpy(only for tf.Tensor)\n # swap input parameter 0, 2 means the highest dim. goes to the lowest.\n # swap input parameter 2, 1, means change height and width dimension.\n reshape_image = np.swapaxes(sample[0], 0, 2)\n images.append(np.swapaxes(reshape_image, 2, 1))\n\n int_label = tf.where(sample[1] == 1)[0][0]\n labels.append(int_label.numpy())\n ext_labels.append(sample[2].numpy() if len(sample) > 2 else np.zeros(1))\n\n torch_data = torch.utils.data.TensorDataset(torch.tensor(images),\n torch.tensor(labels),\n torch.tensor(ext_labels))\n return TorchData(type=keras_data.type, is_train=keras_data.is_train, dataset_in=torch_data)\n\n" ]
[ [ "numpy.swapaxes", "torch.tensor", "numpy.zeros", "tensorflow.where" ] ]
Hunteerq/GeneticAlgorithms
[ "267512e21a5c6ca1d0ffcd7162ea4bd6c8ca58ed" ]
[ "libs/generator/population_generator.py" ]
[ "import numpy as np\n\n\nclass PopulationGenerator:\n\n def __init__(self, algorithm_configuration):\n self.__algorithm_configuration = algorithm_configuration\n\n def generate_population(self):\n return np.random.uniform(low=self.__algorithm_configuration.left_range_number,\n high=self.__algorithm_configuration.right_range_number,\n size=(self.__algorithm_configuration.population_number,\n self.__algorithm_configuration.variables_number))\n" ]
[ [ "numpy.random.uniform" ] ]
grayy921013/RecSys
[ "ce0683b86755935c943722cbba5541931978498e" ]
[ "Algorithms/CB/CBAlgorithm.py" ]
[ "\"\"\"\n Content Based Algorithms Base Class\n===================================================\n\nAll CBAlgorithms should inherit from this class and included the methods here defined\n\n\"\"\"\n\n# Author: Caleb De La Cruz P. <delacruzp>\n\n\nimport logging\nfrom time import time\nimport numpy as np\nimport scipy as sp\nfrom abc import ABCMeta, abstractmethod\n\nlogger = logging.getLogger(__name__)\n\n\nclass CBAlgorithm(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self.ids = None\n self.similarity_matrix = None\n\n def __str__(self):\n return self.__name__\n \n @abstractmethod\n def index(self, data):\n '''\n Index the dataset\n :param data: Array of strings\n :return: Sparse matrix NxM where N is the same length of data and M is the number of features\n '''\n if not isinstance(data, list):\n raise AttributeError(\"The parameter data should be an array of strings\")\n\n matrix = np.array(data)\n self.ids = matrix[:, 0].astype(int)\n values = matrix[:, 1]\n\n return values.tolist()\n\n @abstractmethod\n def similarity(self, index):\n '''\n Given a index (Matrix NxM) With N items and M features, calculates the similarity between each pair of items\n :param index: Numpy matrix\n :return: Sparse matrix NxN where every cell is the similarity of its indexes\n '''\n if not isinstance(index, np.ndarray) and not isinstance(index, sp.sparse.spmatrix):\n logger.error(type(index))\n raise AttributeError(\"The parameter index should be an numpy matrix\")\n\n def add_similarity(self, index=None):\n '''\n Given a index (Matrix NxM) With N items and M features, calculates the similarity between each pair of items\n :param index: Numpy matrix\n :return: Sparse matrix NxN where every cell is the similarity of its indexes\n '''\n if index is None:\n index = self.indexed\n if index is None:\n raise AttributeError(\"You should run the index function before calculating the similarity\")\n\n t0 = time()\n\n binary_index = index.copy()\n binary_index[binary_index != 0] = 1\n\n score = index.dot(binary_index.T)\n duration = time() - t0\n # TODO: Figure out how to 0 out main diagonal on sparse matrix\n # np.fill_diagonal(score, 0)\n logger.debug(\"n_samples: %d, n_related_samples: %d\" % score.shape)\n logger.debug(\"duration: %f\\n\" % duration)\n\n self.similarity_matrix = score\n\n return score\n\n def dot_product_similarity(self, index=None):\n '''\n Given a index (Matrix NxM) With N items and M features, calculates the similarity between each pair of items\n :param index: Numpy matrix\n :return: Sparse matrix NxN where every cell is the similarity of its indexes\n '''\n if index is None:\n index = self.indexed\n if index is None:\n raise AttributeError(\"You should run the index function before calculating the similarity\")\n\n t0 = time()\n score = index.dot(index.T)\n duration = time() - t0\n # Zero out redundant scores\n # Ex. The movies (2,1) and (1,2) will have the same score\n # Thus without loosing generality \n # we will only save the pairs where m1 < m2\n # lower_triangle_idx = np.tril_indices(score.shape[0])\n # score[lower_triangle_idx] = 0\n # score.eliminate_zeros()\n\n # TODO: Figure out how to 0 out main diagonal on sparse matrix\n # np.fill_diagonal(score, 0)\n logger.debug(\"n_samples: %d, n_related_samples: %d\" % score.shape)\n logger.debug(\"duration: %f\\n\" % duration)\n\n self.similarity_matrix = score\n\n return score\n\n def ranking(self, similarity_matrix=None, rank_length=21, flag=False):\n # TODO: remove ranking itself\n\n # Reference:\n # https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array\n if similarity_matrix is None:\n similarity_matrix = self.similarity_matrix\n if similarity_matrix is None:\n raise AttributeError(\"You should run the similarity function before calculating a ranking\")\n\n n_movies = similarity_matrix.shape[0]\n\n top = []\n j = 0\n for i in xrange(n_movies):\n if i % 10000 == 0:\n logger.debug('ranking at position %d' % i)\n\n # Only get the data\n related_movies_scores_for_i = similarity_matrix[i, :].data\n related_movies_id_for_i = similarity_matrix[i, :].indices\n\n # Look the top N values in that N\n if len(related_movies_id_for_i) < rank_length:\n # If it already only has fewer possible similars, just pick the whole set\n top_n_ids = related_movies_id_for_i\n top_n_scores = related_movies_scores_for_i\n j += 1\n else:\n # Split the whole thing\n top_n = np.argpartition(related_movies_scores_for_i, -rank_length, axis=0)[-rank_length:]\n top_n_ids = related_movies_id_for_i[top_n]\n top_n_scores = related_movies_scores_for_i[top_n]\n\n # Transform Index to DB ids\n r = set()\n for i in top_n_ids:\n r.add(self.ids[i])\n # top.append(r)\n\n # TODO: Check if I really should have r as a set\n r2 = zip(list(r), top_n_scores)\n top.append(r2)\n\n logger.debug('Movies Processed: %d Movies without enough Related Movies: %d' % (len(top), j))\n\n if flag:\n top = zip(list(self.ids), top)\n return top\n\n def score(self, similarity_matrix, test_data):\n\n counter_tp = 0\n counter_fp = 0\n\n top = self.ranking(similarity_matrix)\n\n for record in test_data:\n movie_id1, movie_id2, positive = record\n index1 = np.argmax(self.ids == movie_id1)\n index2 = np.argmax(self.ids == movie_id2)\n\n if positive:\n if movie_id2 in top[index1]:\n counter_tp += 0.5\n\n if movie_id1 in top[index2]:\n counter_tp += 0.5\n else:\n if movie_id2 in top[index1]:\n counter_fp += 0.5\n\n if movie_id1 in top[index2]:\n counter_fp += 0.5\n\n logger.debug('TP %d FP %d Total %d' % (counter_tp, counter_fp, len(test_data)))\n\n return counter_tp, counter_fp\n\n def compare(self, top, baseline, baseline_ids):\n\n size_ids = len(self.ids)\n\n if size_ids != len(top):\n raise AttributeError()\n\n related_movies_set = [None] * size_ids\n idx = 0\n j = 0\n\n for i in self.ids:\n i = int(i)\n baseline_idx = np.argmax(baseline_ids == i)\n\n if baseline_idx:\n related_movies_set[idx] = baseline[baseline_idx]\n else:\n related_movies_set[idx] = set()\n j += 1\n idx += 1\n\n logger.debug('Movies %d Skipped %d' % (size_ids, j))\n\n counter = 0\n total = 0\n for i in xrange(len(related_movies_set)):\n counter += len(related_movies_set[i].intersection(top[i]))\n total += len(related_movies_set[i])\n\n if total == 0:\n return -1\n\n PRECISSION = counter / float(total)\n\n # related_movies_set\n return PRECISSION\n\n def destroy(self):\n self.ids = None\n self.vectorizer = None\n self.index = None\n self.similarity_matrix = None\n" ]
[ [ "numpy.array", "numpy.argmax", "numpy.argpartition" ] ]
Lyli724/Book_Introduce_Deep-Learning
[ "b24ecc76548794c7e4afaf01142a7e68764744d1", "b24ecc76548794c7e4afaf01142a7e68764744d1", "b24ecc76548794c7e4afaf01142a7e68764744d1" ]
[ "ch03/softmax_function.py", "ch01/Matplotlib_imshow.py", "ch04/Two_Layer_Net.py" ]
[ "# ่พ“ๅ‡บ็ฅž็ปๅ…ƒ็š„ๆฟ€ๆดปๅ‡ฝๆ•ฐ๏ผŒๅˆ†็ฑป้—ฎ้ข˜ๅธธ็”จsoftmax๏ผŒ softmax = exp(Ak) / for i in range(1, n + 1) sum(Ai)\n# softmax็š„ๅˆ†ๅญๆ˜ฏ่พ“ๅ…ฅไฟกๅทAk็š„ๆŒ‡ๆ•ฐๅ‡ฝๆ•ฐ๏ผŒๅˆ†ๆฏๆ˜ฏๆ‰€ๆœ‰่พ“ๅ…ฅไฟกๅท็š„ๆŒ‡ๆ•ฐๅ‡ฝๆ•ฐๅ’Œ\nimport numpy as np\n'''\nsoftmax funciton:\nsoftmax็š„่พ“ๅ‡บ่Œƒๅ›ด้ƒฝๅœจ0.0~1.0ไน‹้—ด็š„ๅฎžๆ•ฐ\nsoftmax็š„่พ“ๅ‡บๆ€ปๅ’Œไธบ1๏ผŒ ๅ› ไธบๆœ‰่ฟ™ไธช็ผ˜ๆ•…๏ผŒๆˆ‘ไปฌๅฏไปฅๆŠŠsoftmax็š„่พ“ๅ‡บ่งฃ้‡Šไธบๆฆ‚็އ\n'''\n# way 1: normalize:\ndef softmax_normal(a):\n exp_a = np.exp(a)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n\n return y\n\n# way 2: Multiply Contant C\"\ndef softmax_optimize(a):\n c = np.max(a)\n exp_a = np.exp(a - c) # ่งฃๅ†ณๆบขๅ‡บ้—ฎ้ข˜\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n\n return y", "# pyplot่ฟ˜ๆไพ›ไบ†imshow()ๆ˜พ็คบๅ›พๅƒ๏ผŒ ไฝฟ็”จmatplotlib.imageๆจกๅ—ไธญ็š„imread()ๆ–นๆณ•่ฏปๅ…ฅๅ›พๅƒ\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\n\nimg = imread('lena.png')\nplt.imshow(img)\n\nplt.show()", "import numpy as np\nimport sys, os\nsys.path.append(os.pardir)\nfrom common.functions import *\nfrom common.gradient import numerical_gradient\n\nclass TwoLayerNet:\n def __init__(self, input_size, hidden_size, output_size, weight_init=0.01):\n # ๅˆๅง‹ๅŒ–ๆƒ้‡\n self.params ={}\n self.params['W1'] = weight_init * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n def predict(self, x):\n W1, W2 = self.params['W1'], self.params['W2']\n b1, b2 = self.params['b1'], self.params['b2']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n a2 = np.dot(z1, W2) + b2\n y = softmax(a2)\n\n return y\n\n def loss(self, x, t):\n y = self.predict(x)\n\n return cross_entropy_error(y, t)\n\n def accuracy(self, x, t):\n y = self.predict(x)\n y = np.argmax(y, axis=1)\n t = np.argmax(t, axis=1)\n\n accuracy = np.sum(y == t) / float(x.shape[0])\n return accuracy\n\n def numerical_gradient(self, x, t):\n loss_W = lambda W: self.loss(x, t)\n\n grads = {}\n grads['W1'] = numerical_gradient(loss_W, self.params['W1'])\n grads['b1'] = numerical_gradient(loss_W, self.params['b1'])\n grads['W2'] = numerical_gradient(loss_W, self.params['W2'])\n grads['b2'] = numerical_gradient(loss_W, self.params['b2'])\n return grads" ]
[ [ "numpy.max", "numpy.sum", "numpy.exp" ], [ "matplotlib.pyplot.show", "matplotlib.image.imread", "matplotlib.pyplot.imshow" ], [ "numpy.dot", "numpy.zeros", "numpy.sum", "numpy.random.randn", "numpy.argmax" ] ]
velasale/PickApp
[ "2a6b025de217a6f350bd4e83a9bf120d7a2bae56" ]
[ "src/pickapp_data.py" ]
[ "# @Time : 4/7/2022 11:15 AM\n# @Author : Alejandro Velasquez\n\"\"\"\nThis script performs the data post-processing, before feeding it into any machine learning algorithm\n\"\"\"\n\nimport os\nimport numpy as np\nfrom numpy import genfromtxt\nimport math\nimport statistics as st\nimport matplotlib.pyplot as plt\nfrom statistics import mean, stdev\nimport pandas as pd\nfrom tqdm import tqdm\nimport csv\nimport shutil\nimport random\n\n\ndef check_size(source):\n\n lowest = 10000\n highest = 0\n sizes = []\n for filename in tqdm(os.listdir(source)):\n\n data = pd.read_csv(source + filename)\n n_samples = data.shape[0]\n sizes.append(n_samples)\n\n if n_samples < lowest:\n lowest = n_samples\n\n if n_samples > highest:\n highest = n_samples\n\n title = \"Lowest= \" + str(lowest) + \" / Highest= \" + str(highest) + \" / Mean=\" + str(round(mean(sizes),2)) + \" / SD= \" + str(round(stdev(sizes),2))\n plt.title(title)\n plt.boxplot(sizes)\n plt.show()\n\n return lowest, highest\n\n\ndef down_sample(period, source, target):\n \"\"\"\n Downsamples all the csv files located in source folder, and saves the new csv in target folder\n :param period: period [ms] at which you want to sample the time series\n :param source: subfolder with original data\n :param target: subfolder to save the downsampled data\n :return:\n \"\"\"\n\n for filename in os.listdir(source):\n # print(filename)\n\n # --- Step 0: Read csv data into a a Pandas Dataframe ---\n # Do not include the first column that has the time, so we don't overfit the next processes\n # data = genfromtxt((source + filename), delimiter=',', skip_header=True)\n\n data = pd.read_csv(source + filename)\n n_samples = data.shape[0] # rows\n n_channels = data.shape[1] # columns\n\n max_time = data.iloc[-1, 0]\n\n # Create New Dataframe\n downsampled_data = pd.DataFrame()\n headers = pd.read_csv(source + filename, index_col=0, nrows=0).columns.tolist()\n\n # print(headers)\n\n for i in range(n_channels):\n new_value = []\n if i == 0:\n # --- Time Channel\n new_time = []\n\n time = data.iloc[0, 0]\n while time < max_time:\n new_time.append(time)\n time = time + period/1000\n # print(time)\n header = \"Time\"\n downsampled_data[header] = new_time\n\n else:\n # --- The rest of the channels\n new_value = []\n index = 0\n for x in new_time:\n for k in data.iloc[index:, 0]:\n if k > x:\n break\n else:\n index += 1\n\n # Interpolation\n x1 = data.iloc[index-1, 0]\n x2 = data.iloc[index, 0]\n y1 = data.iloc[index-1, i]\n y2 = data.iloc[index, i]\n value = (y1 - y2)*(x2 - x)/(x2 - x1) + y2\n new_value.append(value)\n\n header = headers[i-1]\n\n downsampled_data[header] = new_value\n\n # --- Compare PLots ---\n # plt.plot(data.iloc[:, 0], data.iloc[:, i])\n # plt.plot(new_time, new_value)\n # plt.show()\n\n # print(downsampled_data)\n downsampled_data.to_csv(target + filename, index=False)\n\n\ndef join_csv(name, case, source, target):\n \"\"\"\n Joins csv from different topics but from the same experiment, into a single csv.\n Thus, data is easier to handle, and less prone to make mistakes.\n It does some cropping of the initial or last points, in order to have all the topics have the same size\n :param name: Name of the dataset / experiment\n :param case: Whether Grasp or Pick stage\n :param source:\n :param target:\n :return:\n \"\"\"\n\n if case == 'GRASP/':\n stage = 'grasp'\n elif case == 'PICK/':\n stage = 'pick'\n\n # --- Step 1: Open all the topics from the same experiment that need to be joined ---\n location = source\n topics = ['_wrench', '_f1_imu', '_f1_states', '_f2_imu', '_f2_states', '_f3_imu', '_f3_states']\n\n data_0 = pd.read_csv(location + name + stage + topics[0] + '.csv', header=None, index_col=False)\n data_1 = pd.read_csv(location + name + stage + topics[1] + '.csv', header=None, index_col=False)\n data_2 = pd.read_csv(location + name + stage + topics[2] + '.csv', header=None, index_col=False)\n data_3 = pd.read_csv(location + name + stage + topics[3] + '.csv', header=None, index_col=False)\n data_4 = pd.read_csv(location + name + stage + topics[4] + '.csv', header=None, index_col=False)\n data_5 = pd.read_csv(location + name + stage + topics[5] + '.csv', header=None, index_col=False)\n data_6 = pd.read_csv(location + name + stage + topics[6] + '.csv', header=None, index_col=False)\n\n dataframes = [data_0, data_1, data_2, data_3, data_4, data_5, data_6]\n\n # --- Step 2: Crop initial or last points in order to make all topics have the same length\n # Get the channel with the less sampled points\n smallest = 10000\n for channel in dataframes:\n if channel.shape[0] < smallest:\n smallest = channel.shape[0]\n benchmark = channel\n # print(\"\\nSmallest:\", smallest)\n\n benchmark_first = float(benchmark.iloc[1, 0]) # First Reading\n benchmark_last = float(benchmark.iloc[-1, 0]) # Last Reading\n # print(\"First and last\", benchmark_first, benchmark_last)\n\n count = 0\n\n for channel in dataframes:\n if channel.shape[0] > smallest:\n difference = channel.shape[0] - smallest\n # print(\"The difference is\", difference)\n\n if difference > 5:\n pass\n # If difference of sampled points is bigger than a threshold value, then print this warning to manually\n # check it.\n # print(\"//////////////////////////// WARNING ///////////////////////////\")\n\n # Decide which points to crop: the initial or last ones\n initial_time_offset = abs(float(channel.iloc[1, 0]) - benchmark_first)\n last_time_offset = abs(float(channel.iloc[-1, 0]) - benchmark_last)\n\n if initial_time_offset > last_time_offset:\n # print(\"Remove initial\")\n for i in range(difference):\n new_df = channel.drop([1]).reset_index(drop=True)\n channel = new_df\n else:\n # print(\"Remove last\")\n new_df = channel.iloc[:-difference, :]\n\n dataframes[count] = new_df\n\n count = count + 1\n\n # --- Step 3: Join dataframes from each topic into a single dataframe and save\n df = pd.concat([dataframes[0].iloc[:, 1:], dataframes[1].iloc[:, 1:], dataframes[2].iloc[:, 1:],\n dataframes[3].iloc[:, 1:], dataframes[4].iloc[:, 1:], dataframes[5].iloc[:, 1:],\n dataframes[6].iloc[:, 1:]], axis=1)\n new_file_name = target + name + '_' + str(stage) + '.csv'\n df.to_csv(new_file_name, index=False, header=False)\n\n\ndef crop_csv(size, source, target):\n\n for filename in os.listdir(source):\n\n data = pd.read_csv(source + filename)\n n_samples = data.shape[0]\n difference = n_samples - size\n start = int(difference/2)\n end = start + size\n cropped_data = data.iloc[start:end, :]\n cropped_data.to_csv(target + filename, index=False)\n\n\ndef noise_injection(data, percentage):\n \"\"\"\n Data augmentation technique that simply adds noise to the signal as a random Gaussian noise\n :param percentage: Percentage of the range (Max - Min) of the signal that would be considered in the noise function\n :type data: Dataframe\n :return: New datafram with noise\n \"\"\"\n\n channels = data.shape[1]\n\n df = pd.DataFrame()\n\n for i in range(channels):\n channel = data.iloc[:, i]\n\n # Step 1 - Read min max for each column\n channel_range = abs(min(channel) - max(channel))\n\n # Step 2 - Define a % of noise according to that range\n noise = np.random.normal(0, channel_range * percentage/100, channel.shape)\n new_signal = channel + noise\n\n df[i] = new_signal\n\n # Copy original dataframe header\n df.columns = data.columns\n\n return df\n\n\ndef data_into_labeled_folder(dataset, metadata_location, data_source_folder, target_folder):\n \"\"\"\n Distribute the csv files in labeled folders\n :param dataset:\n :param metadata_location: Folder with metadata files, which have the labels of the experiments\n :param data_source_folder:\n :param target_folder:\n :return:\n \"\"\"\n\n for metadata in (os.listdir(metadata_location)):\n\n # --- Step 1: Get the basic name\n name = str(metadata)\n\n if dataset == '1_proxy_rob537_x1/':\n start = name.index('app')\n end = name.index('k')\n end_2 = name.index('m')\n name = name[start:end + 1] + '_' + name[end + 1:end_2 - 1] + '_'\n\n elif dataset == '3_proxy_winter22_x1/':\n start = name.index('app')\n end = name.index('m')\n name = name[start:end]\n\n elif dataset == '5_real_fall21_x1/':\n start = name.index('r')\n end = name.index('k')\n end_2 = name.index('m')\n name = name[start:end+1] + '_' + name[end + 1:end_2 - 1] + '_'\n\n # --- Step 2: Read label / result from metadata\n rows = []\n with open(metadata_location + metadata) as csv_file:\n # Create a csv object\n csv_reader = csv.reader(csv_file, delimiter=',')\n # Extract each data row one by one\n for row in csv_reader:\n rows.append(row)\n # Read the label\n if rows[1][10] == 's':\n sub_folder = 'success/'\n else:\n sub_folder = 'failed/'\n\n # --- Step 3: S\n\n for filename in os.listdir(data_source_folder):\n\n data_name = str(filename)\n end = data_name.index(\"__\")\n data_name = data_name[:end+1]\n\n if name == data_name:\n # print(\"Meta and data names:\", name, data_name)\n # print(\"\\n\\n\\n\\n\\n\\n\\nMatch!\")\n source = data_source_folder + filename\n target = target_folder + sub_folder + filename\n shutil.copy(source, target)\n\n\ndef create_sets(main, dataset, training_size):\n \"\"\"\n Distributes the data in a Training and Testing set, by keeping the same label ratios\n :param main:\n :param dataset:\n :param training_size: Size of training set from 0 to 1, the remaining goes to the test set\n :return:\n \"\"\"\n\n # Make sure that the augmented data is not divided into training and testing set, otherwise the testing wouldn't\n # take place with unseen data.\n\n\n # stages = ['GRASP/', 'PICK/']\n labels = ['failed/', 'success/']\n augmented_folders = ['augmented x1/']\n\n for augmented_folder in augmented_folders:\n\n for label in labels:\n\n grasp_source_location = main + dataset + 'GRASP/' + 'new_pp5_labeled/' + augmented_folder + label\n pick_source_location = main + dataset + 'PICK/' + 'new_pp5_labeled/' + augmented_folder + label\n\n previous_name = ''\n for filename in os.listdir(grasp_source_location):\n\n # print(filename)\n name = str(filename)\n end = name.index('grasp')\n name = name[:end]\n # print(name)\n\n if name != previous_name:\n # Check name with previous, if different, flip coin\n coin = random.random()\n print(coin)\n\n if coin < training_size:\n grasp_target_location = main + dataset + 'GRASP/' + 'new_pp6_sets/' + augmented_folder + 'training set/' + label\n pick_target_location = main + dataset + 'PICK/' + 'new_pp6_sets/' + augmented_folder + 'training set/' + label\n else:\n grasp_target_location = main + dataset + 'GRASP/' + 'new_pp6_sets/' + augmented_folder + 'validation set/' + label\n pick_target_location = main + dataset + 'PICK/' + 'new_pp6_sets/' + augmented_folder + 'validation set/' + label\n\n previous_name = name\n # print(previous_name)\n else:\n pass\n\n # --- Move data from the Grasp ---\n original = grasp_source_location + filename\n target = grasp_target_location + filename\n shutil.copy(original, target)\n\n # And from the Pick\n filename = filename.replace('grasp', 'pick')\n original = pick_source_location + filename\n target = pick_target_location + filename\n shutil.copy(original, target)\n\n\ndef main():\n\n # Step 1 - Read Data saved as csvs from bagfiles\n\n # Step 2 - Split the data into Grasp and Pick\n # (pp) grasp_and_pick_split.py\n\n # Step 3 - Select the columns to pick\n # (pp) real_pick_delCol.py\n\n main = 'C:/Users/15416/Box/Learning to pick fruit/Apple Pick Data/RAL22 Paper/'\n\n # dataset = '1_proxy_rob537_x1/'\n dataset = '3_proxy_winter22_x1/'\n # dataset = '5_real_fall21_x1/'\n\n stages = ['GRASP/', 'PICK/']\n\n print(\"\\nStep 1: Downsampling...\")\n # for stage in tqdm(stages):\n # location = main + dataset + stage\n # location_1 = location + 'pp1_split/'\n # location_2 = location + 'new_pp2_downsampled/'\n #\n # # --- Step 4: Down sample Data ---\n # period = 15 # Sampling period in [ms]\n #\n # down_sample(period, location_1, location_2)\n #\n # # --- Step 5: Check sizes ---\n # # check_size(location_2)\n\n # --- Step 6: Join Data ---\n # Here we want to end up with a list the size of the medatadafiles\n # Thus makes sense to get the names from the metadata folder\n # (pp) csv_joiner.py\n metadata_loc = main + dataset + 'metadata/'\n\n print(\"\\nStep 2: Joining topics into a single csv...\")\n # for filename in tqdm(sorted(os.listdir(metadata_loc))):\n #\n # # Get the basic name\n # name = str(filename)\n #\n # if dataset == '1_proxy_rob537_x1/':\n # start = name.index('app')\n # end = name.index('k')\n # end_2 = name.index('m')\n # name = name[start:end + 1] + '_' + name[end + 1:end_2 - 1] + '_'\n #\n # elif dataset == '3_proxy_winter22_x1/':\n # start = name.index('app')\n # end = name.index('m')\n # name = name[start:end]\n #\n # elif dataset == '5_real_fall21_x1/':\n # start = name.index('r')\n # end = name.index('k')\n # end_2 = name.index('m')\n # name = name[start:end+1] + '_' + name[end + 1:end_2 - 1] + '_'\n #\n # # print(\"\\nFiles being checked:\")\n # # print(filename)\n # # print(name)\n #\n # for stage in stages:\n # # print(stage)\n # location = main + dataset + stage\n # location_2 = location + 'new_pp2_downsampled/'\n # location_3 = location + 'new_pp3_joined/'\n # join_csv(name, stage, location_2, location_3)\n\n # --- Step 7: Augment Data ---\n\n print(\"\\n Step 3: Augmenting data...\")\n # for stage in tqdm(stages):\n # location = main + dataset + stage\n # location_3 = location + 'new_pp3_joined/'\n # location_4 = location + 'new_pp4_augmented/augmented x20/'\n #\n # for filename in os.listdir(location_3):\n # # print(filename)\n #\n # data = pd.read_csv(location_3 + filename)\n # augmentations = 20\n # end = filename.index('.')\n # for i in range(augmentations):\n # augmented_data = noise_injection(data, augmentations)\n # new_name = filename[:end] + \"_aug_\" + str(i) + \".csv\"\n # augmented_data.to_csv(location_4 + new_name, index=False)\n\n # --- Step 8: Save csvs in subfolders labeled ---\n\n print(\"\\nStep 4: Saving data in labeled folders...\")\n # for stage in tqdm(stages):\n # location = main + dataset + stage\n #\n # if dataset in ['1_proxy_rob537_x1/', '3_proxy_winter22_x1/']:\n # location_4 = location + 'new_pp4_augmented/augmented x1/'\n # elif dataset == '5_real_fall21_x1/':\n # location_4 = location + 'new_pp3_joined/'\n #\n # location_5 = location + 'new_pp5_labeled/augmented x1/'\n # metadata_loc = main + dataset + 'metadata/'\n #\n # data_into_labeled_folder(dataset, metadata_loc, location_4, location_5)\n\n\n # --- Step 9: Sparse data in the training and testing set\n\n print(\"\\nStep 5: Sparsing data in training and testing sets...\")\n training_size = 0.7\n create_sets(main, dataset, training_size)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.random.normal", "pandas.DataFrame", "matplotlib.pyplot.title", "matplotlib.pyplot.boxplot", "pandas.concat", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
kotoyo/nuFATE
[ "349338fdbb99edd5e0075038e65bb4da870ab8b4" ]
[ "src/python/earth.py" ]
[ "\"\"\" Funcitons to evaluate the Earth density.\n\"\"\"\nimport numpy as np\nimport scipy.integrate as integrate\n\nREarth = 6371. # Earth radius in km.\n\ndef rho_earth(theta, x, d = 0):\n \"\"\" Returns the Earth density in gr/cm^3.\n\n Args:\n theta: zenith angle in radians.\n x: position along the trayectory in km.\n d: depth under the Earth's surface. (default set to 0)\n\n Returns:\n rho: density in gr/cm^3\n \"\"\"\n #\ttheta = angle from down vector (0 = direction north pole...if you're at IceCube)\n # piecewise polynomial fit to Reference earth model STW105\n # you could also load a Ref earth model if you want.\n\n r = np.sqrt((REarth - d)**2 + x**2 + 2. * (REarth - d) * x * np.cos(theta))\n\n if r < 1221.:\n p1 = -0.0002177\n p2 = -4.265e-06\n p3 = 1.309e+04\n elif r < 3480:\n p1 = -0.0002409\n p2 = 0.1416\n p3 = 1.234e+04\n elif r < 5721:\n p1 = -3.764e-05\n p2 = -0.1876\n p3 = 6664\n elif r < 5961:\n p1 = 0.\n p2 = -1.269\n p3 = 1.131e+04\n elif r < 6347:\n p1 = 0.\n p2 = -.725\n p3 = 7887.\n elif r < 6356:\n p1 = 0\n p2 = 0\n p3 = 2900\n elif r < 6368:\n p1 = 0\n p2 = 0\n p3 = 2600\n else:\n p1 = 0\n p2 = 0\n p3 = 1020\n\n rho = p1 * r**2 + p2 * r + p3\n\n return rho*1.0e-3 # g/cm^3.1.0e-3 conversion factor from kg/m^3 to g/cm^3\n\ndef get_t_earth(theta, d = 0):\n \"\"\" Returns the Earth column density for a given zenith angle.\n\n Args:\n theta: zenith angle in radians.\n d: depth under the Earth's surface. (default set to 0)\n\n Returns:\n rho: density in g/cm^2\n \"\"\"\n xmax = np.sqrt((REarth - d)**2 * np.cos(theta)**2 + d * (2 * REarth - d)) - (REarth - d) * np.cos(theta)\n kmTocm = 1.0e5\n n = lambda x: rho_earth(theta, x, d) #mass density\n t = integrate.quad(\n lambda x: n(xmax - x), 0, xmax, epsrel=1.0e-3,\n epsabs=1.0e-18)[0] * kmTocm #g/cm^2\n return t\n" ]
[ [ "numpy.cos" ] ]
wahid18benz/selective_search
[ "e928ecbb8e6f64adca3fb00d9b283c4720fb227b" ]
[ "selective_search/structure.py" ]
[ "import numpy as np\nfrom skimage.segmentation import find_boundaries\nfrom scipy.ndimage import find_objects\nfrom . import measure\n\n\nclass HierarchicalGrouping(object):\n def __init__(self, img, img_seg, sim_strategy):\n self.img = img\n self.sim_strategy = sim_strategy\n self.img_seg = img_seg.copy()\n self.labels = np.unique(self.img_seg).tolist()\n\n def build_regions(self):\n self.regions = {}\n lbp_img = measure.generate_lbp_image(self.img)\n for label in self.labels:\n size = (self.img_seg == 1).sum()\n region_slice = find_objects(self.img_seg==label)[0]\n box = tuple([region_slice[i].start for i in (1,0)] +\n [region_slice[i].stop for i in (1,0)])\n\n mask = self.img_seg == label\n color_hist = measure.calculate_color_hist(mask, self.img)\n texture_hist = measure.calculate_texture_hist(mask, lbp_img)\n\n self.regions[label] = {\n 'size': size,\n 'box': box,\n 'color_hist': color_hist,\n 'texture_hist': texture_hist\n }\n\n\n def build_region_pairs(self):\n self.s = {}\n for i in self.labels:\n neighbors = self._find_neighbors(i)\n for j in neighbors:\n if i < j:\n self.s[(i,j)] = measure.calculate_sim(self.regions[i],\n self.regions[j],\n self.img.size,\n self.sim_strategy)\n\n\n def _find_neighbors(self, label):\n \"\"\"\n Parameters\n ----------\n label : int\n label of the region\n Returns\n -------\n neighbors : list\n list of labels of neighbors\n \"\"\"\n\n boundary = find_boundaries(self.img_seg == label,\n mode='outer')\n neighbors = np.unique(self.img_seg[boundary]).tolist()\n\n return neighbors\n\n def get_highest_similarity(self):\n return sorted(self.s.items(), key=lambda i: i[1])[-1][0]\n\n def merge_region(self, i, j):\n\n # generate a unique label and put in the label list\n new_label = max(self.labels) + 1\n self.labels.append(new_label)\n\n # merge blobs and update blob set\n ri, rj = self.regions[i], self.regions[j]\n\n new_size = ri['size'] + rj['size']\n new_box = (min(ri['box'][0], rj['box'][0]),\n min(ri['box'][1], rj['box'][1]),\n max(ri['box'][2], rj['box'][2]),\n max(ri['box'][3], rj['box'][3]))\n value = {\n 'box': new_box,\n 'size': new_size,\n 'color_hist':\n (ri['color_hist'] * ri['size']\n + rj['color_hist'] * rj['size']) / new_size,\n 'texture_hist':\n (ri['texture_hist'] * ri['size']\n + rj['texture_hist'] * rj['size']) / new_size,\n }\n\n self.regions[new_label] = value\n\n # update segmentation mask\n self.img_seg[self.img_seg == i] = new_label\n self.img_seg[self.img_seg == j] = new_label\n\n def remove_similarities(self, i, j):\n\n # mark keys for region pairs to be removed\n key_to_delete = []\n for key in self.s.keys():\n if (i in key) or (j in key):\n key_to_delete.append(key)\n\n for key in key_to_delete:\n del self.s[key]\n\n # remove old labels in label list\n self.labels.remove(i)\n self.labels.remove(j)\n\n def calculate_similarity_for_new_region(self):\n i = max(self.labels)\n neighbors = self._find_neighbors(i)\n\n for j in neighbors:\n # i is larger than j, so use (j,i) instead\n self.s[(j,i)] = measure.calculate_sim(self.regions[i],\n self.regions[j],\n self.img.size,\n self.sim_strategy)\n\n def is_empty(self):\n return True if not self.s.keys() else False\n" ]
[ [ "scipy.ndimage.find_objects", "numpy.unique" ] ]
mikeseven/aimet
[ "63211a4f259b6457c58dfae1097c70acb93319fe" ]
[ "TrainingExtensions/tensorflow/test/python/test_module_identifier.py" ]
[ "# /usr/bin/env python3.5\n# -*- mode: python -*-\n# =============================================================================\n# @@-COPYRIGHT-START-@@\n#\n# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n#\n# @@-COPYRIGHT-END-@@\n# =============================================================================\n\"\"\" This file contains unit tests for testing ModuleIdentifier modules. \"\"\"\n\nimport unittest\nimport logging\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.WARN)\n\nfrom aimet_common.utils import AimetLogger\nfrom aimet_tensorflow.common.module_identifier import StructureModuleIdentifier\nfrom aimet_tensorflow.examples.test_models import keras_model, keras_model_functional, tf_slim_basic_model\n\nlogger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)\nAimetLogger.set_area_logger_level(AimetLogger.LogAreas.Test, logging.DEBUG)\nAimetLogger.set_area_logger_level(AimetLogger.LogAreas.ConnectedGraph, logging.DEBUG)\n\n\nclass TestStructureModuleIdentifier(unittest.TestCase):\n \"\"\" Test StructureModuleIdentifier module \"\"\"\n\n def test_get_op_info(self):\n \"\"\" Test get_op_info() in StructureModuleIdentifier \"\"\"\n my_op_type_set = set()\n current_module_set = set()\n\n tf.compat.v1.reset_default_graph()\n _ = keras_model()\n\n module_identifier = StructureModuleIdentifier(tf.compat.v1.get_default_graph(), [\"conv2d_input\"],\n set(tf.compat.v1.get_default_graph().get_operations()))\n for op_info in module_identifier.op_to_module_dict.values():\n my_op_type_set.add(op_info.op_type)\n current_module_set.add(op_info.module_name)\n\n # Only identifies 2 conv2d, 2 fusedbatchnorm, flatten, and dense\n self.assertEqual(6, len(current_module_set))\n self.assertEqual(4, len(my_op_type_set))\n\n def test_fused_batch_norm_matcher_keras(self):\n \"\"\" Test fused batch norm matchers \"\"\"\n\n tf.compat.v1.reset_default_graph()\n _ = keras_model_functional()\n\n module_identifier = StructureModuleIdentifier(tf.compat.v1.get_default_graph(), [\"input_1\"],\n set(tf.compat.v1.get_default_graph().get_operations()))\n bn_op = tf.compat.v1.get_default_graph().get_operation_by_name('batch_normalization/FusedBatchNormV3')\n self.assertTrue(bn_op in module_identifier.op_to_module_dict.keys())\n self.assertEqual(module_identifier.op_to_module_dict[bn_op].module_name, 'batch_normalization')\n switch_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/batch_normalization_1/cond/'\n 'FusedBatchNormV3/Switch')\n self.assertEqual(module_identifier.op_to_module_dict[switch_op].module_name, 'scope_1/batch_normalization_1')\n\n def test_fused_batch_norm_matcher_slim(self):\n \"\"\" Test fused batch norm matchers \"\"\"\n\n tf.compat.v1.reset_default_graph()\n x = tf.compat.v1.placeholder(tf.float32, [1, 32, 32, 3])\n _ = tf_slim_basic_model(x)\n module_identifier = StructureModuleIdentifier(tf.compat.v1.get_default_graph(), [\"Placeholder\"],\n set(tf.compat.v1.get_default_graph().get_operations()))\n mul_op = tf.compat.v1.get_default_graph().get_operation_by_name('BatchNorm/FusedBatchNormV3')\n self.assertEqual(module_identifier.op_to_module_dict[mul_op].module_name, 'BatchNorm')\n bn_1_merge_op = tf.compat.v1.get_default_graph().get_operation_by_name('BatchNorm_1/cond/Merge')\n self.assertEqual(module_identifier.op_to_module_dict[bn_1_merge_op].module_name, 'BatchNorm_1')\n bn_2_op = tf.compat.v1.get_default_graph().get_operation_by_name('BatchNorm_2/FusedBatchNormV3')\n self.assertTrue(bn_2_op in module_identifier.op_to_module_dict.keys())\n self.assertEqual(module_identifier.op_to_module_dict[bn_2_op].module_name, 'BatchNorm_2')\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.get_default_graph", "tensorflow.logging.set_verbosity", "tensorflow.compat.v1.reset_default_graph" ] ]
banksempire/FDM
[ "f443a73056490cff937fd547d776f01117771b41" ]
[ "fdm/datasources/tushare/model.py" ]
[ "from time import sleep\nfrom datetime import timedelta\nfrom datetime import datetime\n\nimport pandas as pd\nfrom pandas import DataFrame\n\n\nfrom fdm.datasources.metaclass import (_CollectionBase,\n _DbBase,\n _DynCollectionBase)\nfrom .feeder import rebuilder, updater, fs_temp\nfrom .fields import *\n\n# -------------------------------\n# Tushare base class\n# -------------------------------\n\n\nclass _TushareCollectionBase(_CollectionBase):\n method_name = 'blank'\n\n def _rebuild(self, download_function):\n import tushare as ts\n # Drop all data in collection\n self.interface.drop()\n print('{0} droped'.format(self.interface.full_name()))\n # Inititalize data source\n pro = ts.pro_api()\n # Get stock list\n stock_list = DataFrame()\n for status in \"LDP\":\n stock_list = stock_list.append(pro.stock_basic(list_status=status))\n # Download data for each stock code\n for _, value in stock_list.iterrows():\n code = value['ts_code']\n record_len = 1\n # minus one day to prevent imcomplete dataset been downloaded\n enddate = datetime.now()-timedelta(1)\n while record_len != 0:\n df = download_function(code, pro, enddate)\n record_len = df.shape[0]\n if record_len != 0:\n enddate = min(df['trade_date']) - timedelta(1)\n self.interface.insert_many(df)\n\n print('Code: {0} downloaded.'.format(code))\n sleep(0.6)\n return 0\n\n def _update(self, download_function):\n import tushare as ts\n # Inititalize data source\n pro = ts.pro_api()\n # Get last date in DB\n lastdate = self.interface.lastdate()\n # Generate date range business day only\n daterange = pd.date_range(start=lastdate+timedelta(1),\n end=datetime.now(), freq=\"B\")\n print('Total {0} data points need to be downloaded.'.format(\n len(daterange)))\n # Download data for each day\n for i in daterange:\n date = i.strftime('%Y%m%d')\n df = download_function(date, pro)\n self.interface.insert_many(df)\n print('Date: {0} downloaded.'.format(date))\n sleep(0.6)\n return 0\n\n def rebuild(self, buildindex=True):\n self._rebuild(rebuilder(self.method_name))\n if buildindex:\n self.interface.create_indexs(\n [self.interface.date_name, self.interface.code_name])\n return 0\n\n def update(self):\n self._update(updater(self.method_name))\n return 0\n\n\nclass Tushare(_DbBase):\n\n def daily_price(self):\n return self._inti_col(DailyPrice)\n\n def daily_basic(self):\n return self._inti_col(DailyBasic)\n\n def daily_adj(self):\n return self._inti_col(DailyAdjFactor)\n\n def income(self):\n return self._inti_col(IncomeStatement)\n\n def balance_sheet(self):\n return self._inti_col(BalanceSheet)\n\n def cash_flow(self):\n return self._inti_col(CFStatement)\n\n# -------------------------------\n# Trading info\n# -------------------------------\n\n\nclass DailyBasic(_TushareCollectionBase):\n method_name = 'daily_basic'\n\n\nclass DailyPrice(_TushareCollectionBase):\n method_name = 'daily'\n\n\nclass DailyAdjFactor(_TushareCollectionBase):\n method_name = 'adj_factor'\n\n# --------------------------------\n# Financial Statement\n# --------------------------------\n\n\nclass _FSTemp(_DynCollectionBase):\n feeder_func = None\n fields = None\n\n def update(self, codes,\n startdate,\n enddate,\n force_update=False\n ):\n super().update(codes=codes,\n fields=self.fields,\n startdate=startdate,\n enddate=enddate,\n force_update=force_update)\n\n def query(self, codes,\n fields,\n startdate,\n enddate,\n ):\n res = super().query(codes=codes,\n fields=fields,\n startdate=startdate,\n enddate=enddate,\n skip_update=True)\n return res\n\n\nclass IncomeStatement(_FSTemp):\n feeder_func = fs_temp('income', 2)\n fields = income\n\n\nclass BalanceSheet(_FSTemp):\n feeder_func = fs_temp('balancesheet', 2)\n fields = balance\n\n\nclass CFStatement(_FSTemp):\n feeder_func = fs_temp('cashflow', 2)\n fields = cashflow\n" ]
[ [ "pandas.DataFrame" ] ]
shashankpr/DeepSleep
[ "ea33778f74be1314b1be8f34a101294e8395808d" ]
[ "deepsleep/data_generator.py" ]
[ "import os\nimport logging\nimport numpy as np\n\nfrom keras.preprocessing import sequence\n\nfrom utils import threadsafe_generator\nfrom preprocess import PreProcessData\n\nseed = 42\nnp.random.seed(seed)\n\nclass HeartSequenceGenerator(object):\n\n def __init__(self, seq_len, batch_size, n_classes, is_pretrain=False, is_stateful_train=False):\n self.ROOT = os.getcwd()\n self.DATASET_DIR = os.path.join(self.ROOT, \"dataset\")\n self.PRETRAIN_DIR = os.path.join(self.ROOT, \"pretrain_dataset\")\n self.logger = logging.getLogger(__name__)\n\n self.is_pretrain = is_pretrain\n self.is_stateful_train = is_stateful_train\n self.seq_len = seq_len\n self.batch_size = batch_size\n self.n_classes = n_classes\n\n self.preprocess = PreProcessData(seq_len=self.seq_len, batch_size=self.batch_size, n_classes=self.n_classes)\n\n def total_batch_count(self, dataset_files):\n\n n_batches = 0\n for npz_files in dataset_files:\n if self.is_pretrain:\n npz_files = os.path.join(self.PRETRAIN_DIR, npz_files)\n\n datafile = np.load(npz_files)\n sampling_rate = datafile[\"Sampling_rate\"]\n total_samples = datafile[\"Total_samples\"]\n # max_len = 30 * int(np.mean(sampling_rate))\n # epoch_count = int(total_samples / max_len)\n epoch_count = total_samples\n\n else:\n npz_files = os.path.join(self.DATASET_DIR, npz_files)\n datafile = np.load(npz_files)\n epoch_count = datafile['Total_epochs']\n\n current_batch_count = int(epoch_count / self.batch_size)\n remainder_samples = epoch_count % self.batch_size\n if remainder_samples:\n current_batch_count = current_batch_count + 1\n\n n_batches = n_batches + current_batch_count\n # self.logger.debug(\"Current batch count = {}\".format(current_batch_count))\n\n self.logger.debug(\"Total batches to check = {}\".format(n_batches))\n return n_batches\n\n\n @threadsafe_generator\n def generate_sequences(self, subject_files):\n\n while True:\n for files in subject_files:\n if self.is_pretrain:\n npz_file = os.path.join(self.PRETRAIN_DIR, files)\n else:\n npz_file = os.path.join(self.DATASET_DIR, files)\n\n heart_signal_seq, labels_categorical = self._preprocessor(npz_file)\n\n for heart_signal_batch, labels_batch in self._get_data_in_batches(heart_signal_seq, labels_categorical):\n yield (heart_signal_batch, labels_batch)\n\n def validation_sequence(self, subject_files):\n\n val_heart = np.zeros(shape=(1, self.seq_len, 1))\n val_labels = np.zeros(shape=(1, self.n_classes))\n\n np.random.shuffle(subject_files)\n for files in subject_files:\n # Load .npz dataset\n\n if self.is_pretrain:\n npz_file = os.path.join(self.PRETRAIN_DIR, files)\n self.logger.info(\"Extracting Pretrain Validation {}\".format(npz_file))\n else:\n npz_file = os.path.join(self.DATASET_DIR, files)\n self.logger.info(\"Extracting Original Validation data {}\".format(npz_file))\n\n heart_signal_seq, labels_categorical = self._preprocessor(npz_file)\n\n if self.is_stateful_train:\n # make the batch sizes same across each batch\n self.logger.debug(\"Heart shape = {}\".format(heart_signal_seq.shape))\n remaining_samples = heart_signal_seq.shape[0] % self.batch_size\n if remaining_samples:\n heart_signal_seq, labels_categorical = self.preprocess.pad_validation_batches(heart_signal_seq,\n labels_categorical,\n remaining_samples)\n\n val_heart = np.vstack((val_heart, heart_signal_seq))\n val_labels = np.vstack((val_labels, labels_categorical))\n\n val_heart = val_heart[1:]\n val_labels = val_labels[1:]\n\n return val_heart, val_labels\n\n def _preprocessor(self, npz_file):\n\n heart_signal, labels, sampling_rate = self.preprocess.load_data(npz_file)\n # self.logger.debug(\"Heart signal shape = {}\".format(heart_signal.shape))\n\n # Standardize the heart signal for every 30 seconds epoch\n # self.logger.info(\"Standardizing signals...\")\n # heart_signal = self.preprocess.standardize_data(heart_signal)\n # self.logger.debug(\"Heart shape after standardization = {}\".format(heart_signal.shape))\n #\n # # Create sequences\n # self.logger.info(\"Creating and Padding sequences...\")\n # heart_signal_seq, labels_seq = self.preprocess.create_sequences(heart_signal, labels, sampling_rate=sampling_rate)\n # self.logger.debug(\"Heart signal shape = {}\".format(heart_signal_seq.shape))\n #\n #\n # # Pad sequences to get uniform sequence length\n # heart_signal_seq = sequence.pad_sequences(heart_signal_seq, maxlen=self.seq_len, dtype='float32',\n # padding='post',\n # truncating='post')\n # self.logger.debug(\"Heart signal shape = {}\".format(heart_signal_seq.shape))\n\n # Convert labels to categorical format\n self.logger.info(\"Converting labels to categorical...\")\n labels_categorical = self.preprocess.convert_to_categorical(labels)\n\n # # Add extra dimension to suit the requirements for LSTM & CNN\n # if self.is_pretrain:\n # heart_signal_seq = np.expand_dims(heart_signal_seq, 2)\n\n\n self.logger.debug(\"Shape = {}, {}\".format(heart_signal.shape, labels_categorical.shape))\n\n return heart_signal, labels_categorical\n\n def _get_data_in_batches(self, heart_signal_seq, labels_categorical):\n\n if self.is_pretrain:\n indexes = self.preprocess.get_exploration_order(heart_signal=heart_signal_seq, shuffle=True)\n else:\n indexes = self.preprocess.get_exploration_order(heart_signal=heart_signal_seq)\n\n max_batches = self.preprocess.get_current_batch_count(index=indexes)\n for i in range(max_batches):\n if i == max_batches - 1:\n\n # If last batch having lesser samples than batch_size\n batch_indexes = indexes[i * self.batch_size:]\n heart_signal_batch = [heart_signal_seq[k] for k in batch_indexes]\n labels_batch = [labels_categorical[k] for k in batch_indexes]\n\n heart_signal_batch = np.asarray(heart_signal_batch)\n labels_batch = np.asarray(labels_batch)\n\n if self.is_stateful_train:\n # make the batch sizes same across each batch for rendering statefulness\n # Pad last batch\n if heart_signal_batch.shape[0] != self.batch_size:\n heart_signal_batch, labels_batch = self.preprocess.pad_batches(heart_signal=heart_signal_batch, labels=labels_batch)\n else:\n batch_indexes = indexes[i * self.batch_size: (i + 1) * self.batch_size]\n heart_signal_batch = [heart_signal_seq[k] for k in batch_indexes]\n labels_batch = [labels_categorical[k] for k in batch_indexes]\n\n heart_signal_batch = np.asarray(heart_signal_batch)\n labels_batch = np.asarray(labels_batch)\n\n yield heart_signal_batch, labels_batch\n\n\nclass HeartSequenceLoader(object):\n def __init__(self, seq_len, batch_size, n_classes, is_pretrain=False):\n\n self.ROOT = os.getcwd()\n self.DATASET_DIR = os.path.join(self.ROOT, \"dataset\")\n self.PRETRAIN_DIR = os.path.join(self.ROOT, \"pretrain_dataset\")\n self.logger = logging.getLogger(__name__)\n\n self.is_pretrain = is_pretrain\n self.seq_len = seq_len\n self.batch_size = batch_size\n self.n_classes = n_classes\n\n self.preprocess = PreProcessData(seq_len=self.seq_len, batch_size=self.batch_size, n_classes=self.n_classes)\n\n def _preprocessor(self, npz_file):\n\n heart_signal, labels, sampling_rate = self.preprocess.load_data(npz_file)\n\n # Create sequences\n self.logger.info(\"Creating and Padding sequences...\")\n heart_signal_seq, labels_seq = self.preprocess.create_sequences(heart_signal, labels,\n sampling_rate=sampling_rate)\n\n # Standardize the heart signal for every 30 seconds epoch\n heart_signal_seq = self.preprocess.standardize_seq_data(heart_signal_seq)\n\n # Pad sequences to get uniform sequence length\n heart_signal_seq = sequence.pad_sequences(heart_signal_seq, maxlen=self.seq_len, dtype='float32',\n padding='post',\n truncating='post')\n\n # Convert labels to categorical format\n labels_categorical = self.preprocess.convert_to_categorical(labels_seq)\n\n # Add extra dimension to suit the requirements for LSTM & CNN\n heart_signal_seq = np.expand_dims(heart_signal_seq, 2)\n self.logger.debug(\"Shape = {}, {}\".format(heart_signal_seq.shape, labels_categorical.shape))\n\n return heart_signal_seq, labels_categorical\n\n\nclass InferenceHeartSequenceLoader(object):\n def __init__(self, seq_len, batch_size, n_classes, is_pretrain=False):\n self.ROOT = os.getcwd()\n self.TEST_DATASET_DIR = os.path.join(self.ROOT, \"deepsleep/test_dataset\")\n self.PRETRAIN_DIR = os.path.join(self.ROOT, \"pretrain_dataset\")\n self.logger = logging.getLogger(__name__)\n\n self.is_pretrain = is_pretrain\n self.seq_len = seq_len\n self.batch_size = batch_size\n self.n_classes = n_classes\n\n self.preprocess = PreProcessData(seq_len=self.seq_len, batch_size=self.batch_size, n_classes=self.n_classes)\n\n def _preprocessor(self, npz_file):\n heart_signal, labels, sampling_rate = self.preprocess.load_data(npz_file)\n\n # Create sequences\n self.logger.info(\"Creating and Padding sequences...\")\n heart_signal_seq, labels_seq = self.preprocess.create_sequences(heart_signal, labels,\n sampling_rate=sampling_rate)\n\n # Standardize the heart signal for every 30 seconds epoch\n heart_signal_seq = self.preprocess.standardize_seq_data(heart_signal_seq)\n\n # Pad sequences to get uniform sequence length\n heart_signal_seq = sequence.pad_sequences(heart_signal_seq, maxlen=self.seq_len, dtype='float32',\n padding='post',\n truncating='post')\n\n # Convert labels to categorical format\n # labels_categorical = self.preprocess.convert_to_categorical(labels_seq)\n\n # Add extra dimension to suit the requirements for LSTM & CNN\n # heart_signal_seq = np.expand_dims(heart_signal_seq, 2)\n self.logger.debug(\"Shape = {}, {}\".format(heart_signal_seq.shape, labels_seq.shape))\n\n return heart_signal_seq, labels_seq\n\n def get_data(self, subject_file):\n \"\"\"\n\n Args:\n subject_file:\n\n Returns:\n\n \"\"\"\n\n npz_file = os.path.join(self.TEST_DATASET_DIR, subject_file)\n heart_signal_seq, labels = self._preprocessor(npz_file)\n\n return heart_signal_seq, labels\n\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.random.seed", "numpy.load", "numpy.random.shuffle", "numpy.vstack", "numpy.expand_dims" ] ]
sukumar1612/GraphTheoryPaper_DiseaseSim
[ "fafea0306a2179f0cc7d5c9b1a9bc6a81262f7f3" ]
[ "GraphTheoryPaper.py" ]
[ "'''\nModelling and analysis of COVID-19 in India using Graph Theory\nProject by : Arvind, Nishanth, Srivatsan, Sukumar, Thyagarajan\n'''\n\nimport random\nimport secrets\n\nimport matplotlib.pyplot as plt\n\n'''\nGlossary:\n\nVertex of graph - \n\n'trunc_gauss' - Normal distribution for a fixed range from bottom to top \n i.e. in our case bottom = 0, top = n-1\n\nline ( - ) - Input values\n effectiveness - actually 1-effectiveness\n\nnormal_community - An adjacency matrix that corresponds to a community\n where there are no preventive measures against COVID\n\n\ncautious_community - An adjacency matrix that corresponds to a community\n where there are preventive measures against COVID\n\npeople_labels - List of people, here each person is identified by an integer \n (0 to population -1)\n\nmask_list - List of people who wear mask (Randomised)\nsanitize_list - List of people who use sanitiser (Randomised)\nsocial_dist_list - List of people who follow social distancing (Randomised)\n\nprioritise and randomise - sort the 'individuals_at_risk' list in non-decresing order to \n ensure that those who don't take a lot of \n preventive measures are sooner to get infected\n\n - randomise to ensure that the event of infection \n is a random event\n\n'''\n\nif __name__ == \"__main__\":\n population = 100\n\n percent_of_ppl_wear_mask = 0.55\n no_of_ppl_wear_mask = int(percent_of_ppl_wear_mask * population)\n effectiveness_of_mask = 0.7\n\n percent_of_ppl_sanitizer = 0.2\n no_of_ppl_sanitizer = int(percent_of_ppl_sanitizer * population)\n effectiveness_of_sanitizer = 0.3\n\n percent_of_ppl_social_dist = 0.80\n no_of_ppl_social_dist = int(percent_of_ppl_social_dist * population)\n effectiveness_of_social_dist = 0.1\n\n\n def trunc_gauss(mu, sigma, bottom, top):\n a = random.gauss(mu, sigma)\n while (bottom <= a <= top) == False:\n a = random.gauss(mu, sigma)\n return a\n\n\n people_labels = [i for i in range(0, population)]\n\n normal_community = [[0 for i in range(0, population)] for j in range(0, population)]\n cautious_community = [[0 for i in range(0, population)] for j in range(0, population)]\n\n for i in range(0, population):\n for j in range(i, population):\n cautious_community[i][j] = cautious_community[j][i] = normal_community[i][j] = normal_community[j][\n i] = secrets.randbelow(2)\n if (i == j):\n cautious_community[i][j] = cautious_community[j][i] = normal_community[i][j] = normal_community[j][\n i] = 0 # since you can't transmit disease to your self so matrix at i==j is zero\n\n mask_list = random.sample(people_labels, no_of_ppl_wear_mask)\n sanitize_list = random.sample(people_labels, no_of_ppl_sanitizer)\n social_dist_list = random.sample(people_labels, no_of_ppl_social_dist)\n\n for i in range(0, len(mask_list)):\n for j in range(0, population):\n cautious_community[mask_list[i]][j] = cautious_community[mask_list[i]][j] * effectiveness_of_mask\n cautious_community[j][mask_list[i]] = cautious_community[j][mask_list[i]] * effectiveness_of_mask\n\n for i in range(0, len(sanitize_list)):\n for j in range(0, population):\n cautious_community[sanitize_list[i]][j] = cautious_community[sanitize_list[i]][\n j] * effectiveness_of_sanitizer\n cautious_community[j][sanitize_list[i]] = cautious_community[j][\n sanitize_list[i]] * effectiveness_of_sanitizer\n\n for i in range(0, len(social_dist_list)):\n for j in range(0, population):\n cautious_community[social_dist_list[i]][j] = cautious_community[social_dist_list[i]][\n j] * effectiveness_of_social_dist\n cautious_community[j][social_dist_list[i]] = cautious_community[j][\n social_dist_list[i]] * effectiveness_of_social_dist\n\n\n # print(\"Normal commmunity:\\n\",normal_community,\"\\n\")\n # print(\"Cautious community:\\n\",cautious_community,\"\\n\")\n\n def prob(x):\n l1 = [i for i in range(0, int(10000 * x))]\n y = secrets.randbelow(10000)\n if y in l1:\n return 1\n else:\n return 0\n\n\n # Normal Community Graph\n\n adj_list_normal = []\n\n for i in range(population):\n\n l1 = []\n\n for j in range(population):\n\n if (normal_community[i][j] == 1):\n l1.append(j)\n\n adj_list_normal.append(l1)\n\n rrand = trunc_gauss(2, 0.3, 0, population)\n\n tot_infection = [i for i in range(0, population)]\n\n infected = []\n infected.append(0) # infecting first person\n\n carriers_on_day = []\n carriers_on_day.append(0)\n\n dummy = []\n day_count = 0\n\n ppl_inf = {}\n\n # print(\"adjaceny list:\",adj_list_normal)\n\n for k2 in range(10000):\n\n dummy = []\n\n for i in carriers_on_day:\n rrand = int(trunc_gauss(2.7, 0.3, 0, population))\n count1 = min(rrand, len(adj_list_normal[i]))\n j = 0\n k3 = 0\n\n while j < count1 and k3 < len(adj_list_normal[i]):\n\n if (adj_list_normal[i][k3] not in infected):\n infected.append(adj_list_normal[i][k3])\n dummy.append(adj_list_normal[i][k3])\n j += 1\n\n k3 += 1\n\n carriers_on_day.remove(i)\n\n for k in dummy:\n carriers_on_day.append(k)\n\n day_count += 1\n\n if (sorted(infected) == tot_infection):\n break\n if (len(carriers_on_day) == 0):\n break\n\n # print(\"\\nDay number : \",day_count)\n # print(\"People infected : \",infected)\n ppl_inf[day_count] = len(infected)\n\n # print(\"\\nOverall infected : \",infected)\n print(\"\\n\\n\\n\\n\")\n\n dc = day_count\n\n # Cautious community graph\n\n adj_list_normal = []\n\n for i in range(population):\n\n l1 = []\n\n for j in range(population):\n\n if (cautious_community[i][j] != 0):\n l1.append(j)\n\n adj_list_normal.append(l1)\n\n rrand = trunc_gauss(2, 0.3, 0, population)\n\n tot_infection = [i for i in range(0, population)]\n\n infected = []\n infected.append(0) # infecting first person\n\n carriers_on_day = []\n carriers_on_day.append(0)\n\n dummy = []\n day_count = 0\n\n ppl_inf2 = {}\n\n # print(\"adjaceny list:\",adj_list_normal)\n\n for k2 in range(10000):\n\n dummy = []\n\n for i in carriers_on_day:\n rrand = int(trunc_gauss(2.7, 0.3, 0, population))\n count1 = min(rrand, len(adj_list_normal[i]))\n j = 0\n k3 = 0\n\n while j < count1 and k3 < len(adj_list_normal[i]):\n\n if (adj_list_normal[i][k3] not in infected): ###dkajvbdjbvjhbdz\n p = prob(cautious_community[i][adj_list_normal[i][k3]])\n if (p == 1):\n infected.append(adj_list_normal[i][k3])\n dummy.append(adj_list_normal[i][k3])\n j += 1\n\n k3 += 1\n\n carriers_on_day.remove(i)\n\n for k in infected:\n carriers_on_day.append(k)\n\n day_count += 1\n\n if (sorted(infected) == tot_infection):\n break\n if (len(carriers_on_day) == 0):\n break\n\n # print(\"\\nDay number : \",day_count)\n # print(\"People infected : \",infected)\n ppl_inf2[day_count] = len(infected)\n\n # print(\"\\nOverall infected : \",infected)\n print(\"\\n\\n\\n\\n\")\n\n x = []\n y = []\n\n x1 = []\n y1 = []\n\n for k in ppl_inf.keys():\n y.append(ppl_inf[k])\n x.append(k)\n\n for k in ppl_inf2.keys():\n y1.append(ppl_inf2[k])\n x1.append(k)\n\n plt.plot(x, y)\n\n plt.xlabel('no of days')\n\n plt.ylabel('no of people infected')\n\n plt.title('before precautions')\n\n plt.show()\n\n plt.plot(x1, y1)\n\n plt.xlabel('no of days')\n\n plt.ylabel('no of people infected')\n\n plt.title('after precautions')\n\n plt.show()\n\n print(\"longest path is :\", dc)\n print(\"\")\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
mOmUcf/DeepCTR
[ "c8b4630e1278dbcd0876bee2e6519b765a85bc88" ]
[ "deepctr/models/dsin.py" ]
[ "# coding: utf-8\n\"\"\"\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Feng Y, Lv F, Shen W, et al. Deep Session Interest Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1905.06482, 2019.(https://arxiv.org/abs/1905.06482)\n\n\"\"\"\n\nfrom collections import OrderedDict\n\nfrom tensorflow.python.keras.initializers import RandomNormal\nfrom tensorflow.python.keras.layers import (Concatenate, Dense, Embedding,\n Flatten, Input)\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.regularizers import l2\n\nfrom ..inputs import (build_input_features,\n get_embedding_vec_list, get_inputs_list,SparseFeat,VarLenSparseFeat,DenseFeat,embedding_lookup,get_dense_input,combined_dnn_input)\nfrom ..layers.core import DNN, PredictionLayer\nfrom ..layers.sequence import (AttentionSequencePoolingLayer, BiasEncoding,\n BiLSTM, Transformer)\nfrom ..layers.utils import NoMask, concat_fun\n\n\ndef DSIN(dnn_feature_columns, sess_feature_list, embedding_size=8, sess_max_count=5, bias_encoding=False,\n att_embedding_size=1, att_head_num=8, dnn_hidden_units=(200, 80), dnn_activation='sigmoid', dnn_dropout=0,\n dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, init_std=0.0001, seed=1024, task='binary',\n ):\n \"\"\"Instantiates the Deep Session Interest Network architecture.\n\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param sess_feature_list: list,to indicate session feature sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict[\"sparse\"]``\n :param embedding_size: positive integer,sparse feature embedding_size.\n :param sess_max_count: positive int, to indicate the max number of sessions\n :param sess_len_max: positive int, to indicate the max length of each session\n :param bias_encoding: bool. Whether use bias encoding or postional encoding\n :param att_embedding_size: positive int, the embedding size of each attention head\n :param att_head_num: positive int, the number of attention head\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param dnn_activation: Activation function to use in deep net\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n\n \"\"\"\n\n if (att_embedding_size * att_head_num != len(sess_feature_list) * embedding_size):\n raise ValueError(\n \"len(session_feature_lsit) * embedding_size must equal to att_embedding_size * att_head_num ,got %d * %d != %d *%d\" % (\n len(sess_feature_list), embedding_size, att_embedding_size, att_head_num))\n\n\n features = build_input_features(dnn_feature_columns)\n\n sparse_feature_columns = list(filter(lambda x:isinstance(x,SparseFeat),dnn_feature_columns)) if dnn_feature_columns else []\n dense_feature_columns = list(\n filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []\n varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []\n\n\n\n sparse_varlen_feature_columns = []\n history_fc_names = list(map(lambda x: \"sess\" + x, sess_feature_list))\n for fc in varlen_sparse_feature_columns:\n feature_name = fc.name\n if feature_name in history_fc_names:\n continue\n else:\n sparse_varlen_feature_columns.append(fc)\n\n\n inputs_list = list(features.values())\n\n\n user_behavior_input_dict = {}\n for idx in range(sess_max_count):\n sess_input = OrderedDict()\n for i, feat in enumerate(sess_feature_list):\n sess_input[feat] = features[\"sess_\"+str(idx)+\"_\"+feat]\n\n\n user_behavior_input_dict[\"sess_\" + str(idx)] = sess_input\n\n\n user_sess_length = Input(shape=(1,), name='sess_length')\n\n\n\n embedding_dict = {feat.embedding_name: Embedding(feat.dimension, embedding_size,\n embeddings_initializer=RandomNormal(\n mean=0.0, stddev=init_std, seed=seed),\n embeddings_regularizer=l2(\n l2_reg_embedding),\n name='sparse_emb_' +\n str(i) + '-' + feat.name,\n mask_zero=(feat.name in sess_feature_list)) for i, feat in\n enumerate(sparse_feature_columns)}\n\n\n\n query_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,sess_feature_list,sess_feature_list)#queryๆ˜ฏๅ•็‹ฌ็š„\n dnn_input_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,mask_feat_list=sess_feature_list)\n dense_value_list = get_dense_input(features, dense_feature_columns)\n\n query_emb = concat_fun(query_emb_list)\n\n\n dnn_input_emb = concat_fun(dnn_input_emb_list)\n dnn_input_emb = Flatten()(NoMask()(dnn_input_emb))\n\n tr_input = sess_interest_division(embedding_dict, user_behavior_input_dict, sparse_feature_columns,\n sess_feature_list, sess_max_count, bias_encoding=bias_encoding)\n\n Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False,\n use_positional_encoding=(not bias_encoding), seed=seed, supports_masking=True,\n blinding=True)\n sess_fea = sess_interest_extractor(\n tr_input, sess_max_count, Self_Attention)\n\n interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True,\n supports_masking=False)(\n [query_emb, sess_fea, user_sess_length])\n\n lstm_outputs = BiLSTM(len(sess_feature_list) * embedding_size,\n layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea)\n lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)(\n [query_emb, lstm_outputs, user_sess_length])\n\n dnn_input_emb = Concatenate()(\n [dnn_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)])\n\n dnn_input_emb = combined_dnn_input([dnn_input_emb],dense_value_list)\n output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,\n dnn_dropout, dnn_use_bn, seed)(dnn_input_emb)\n output = Dense(1, use_bias=False, activation=None)(output)\n output = PredictionLayer(task)(output)\n\n sess_input_list = []\n # sess_input_length_list = []\n for i in range(sess_max_count):\n sess_name = \"sess_\" + str(i)\n sess_input_list.extend(get_inputs_list(\n [user_behavior_input_dict[sess_name]]))\n # sess_input_length_list.append(user_behavior_length_dict[sess_name])\n\n\n model = Model(inputs=inputs_list+[user_sess_length], outputs=output)\n\n return model\n\n\ndef sess_interest_division(sparse_embedding_dict, user_behavior_input_dict, sparse_fg_list, sess_feture_list,\n sess_max_count,\n bias_encoding=True):\n tr_input = []\n for i in range(sess_max_count):\n sess_name = \"sess_\" + str(i)\n keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input_dict[sess_name],\n sparse_fg_list, sess_feture_list, sess_feture_list)\n # [sparse_embedding_dict[feat](user_behavior_input_dict[sess_name][feat]) for feat in\n # sess_feture_list]\n keys_emb = concat_fun(keys_emb_list)\n tr_input.append(keys_emb)\n if bias_encoding:\n tr_input = BiasEncoding(sess_max_count)(tr_input)\n return tr_input\n\n\ndef sess_interest_extractor(tr_input, sess_max_count, TR):\n tr_out = []\n for i in range(sess_max_count):\n tr_out.append(TR(\n [tr_input[i], tr_input[i]]))\n sess_fea = concat_fun(tr_out, axis=1)\n return sess_fea\n" ]
[ [ "tensorflow.python.keras.layers.Concatenate", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.initializers.RandomNormal", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.regularizers.l2", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.models.Model" ] ]
chaoxu0512/Pushbroom-satellite-image-SRGAN
[ "519754b20e94dfdf2fbdb075cd930e278a39807c" ]
[ "srgan-g1d1-with-lr-decay/train.py" ]
[ "# import ipdb\nimport argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\nimport sys\nimport time\nimport datetime\nimport glob\nimport random\nimport cv2\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nimport torchvision\nimport torchvision.transforms as transforms\n#import torchvision.utils as utils\n#import torchvision.transforms.functional as F\nfrom torchvision.utils import save_image, make_grid\nfrom torchvision.models import vgg19\n\nfrom math import log10\nfrom tqdm import tqdm\nimport pandas as pd\n\nfrom PIL import Image\nfrom visualize import Visualizer\nfrom torchnet.meter import AverageValueMeter\nfrom models import *\nfrom datasets import *\nimport pytorch_ssim\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--epoch\", type=int, default=0, help=\"epoch to start training from\")\nparser.add_argument(\"--n_epochs\", type=int, default=100, help=\"number of epochs of training\")\nparser.add_argument(\"--train_dataset_name\", type=str, default=\"train\", help=\"name of the train dataset\")\nparser.add_argument(\"--val_dataset_name\", type=str, default=\"val\", help=\"name of the val dataset\")\nparser.add_argument(\"--train_batch_size\", type=int, default=128, help=\"size of the train batches\")\nparser.add_argument(\"--val_batch_size\", type=int, default=1, help=\"size of the val batches\")\nparser.add_argument('--generatorLR', type=float, default=0.0002, help='learning rate for generator')\nparser.add_argument('--discriminatorLR', type=float, default=0.0002, help='learning rate for discriminator')\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of second order momentum of gradient\")\nparser.add_argument(\"--decay_epoch\", type=int, default=50, help=\"start lr decay every decay_epoch epochs\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--hr_height\", type=int, default=128, help=\"high res. image height\")\nparser.add_argument(\"--hr_width\", type=int, default=128, help=\"high res. image width\")\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\nparser.add_argument('--scale_factor', default=4, type=int, choices=[2, 4, 8], help='super resolution scale factor') \nparser.add_argument(\"--g_every\", type=int, default=1, help=\"train the generator every g_every batches\")\nparser.add_argument(\"--d_every\", type=int, default=1, help=\"train the discriminator every d_every batches\")\nparser.add_argument(\"--plot_every\", type=int, default=100, help=\"plot using visdom every plot_every samples\")\nparser.add_argument(\"--save_every\", type=int, default=1, help=\"save the model every save_every epochs\")\nopt = parser.parse_args()\nprint(opt)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1, 4, 6\"\nos.makedirs(\"saved_models\", exist_ok=True)\nos.makedirs(\"images\", exist_ok=True)\nvis = Visualizer('SRGAN')\n\n# cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nhr_shape = (opt.hr_height, opt.hr_width)\n\n# Initialize generator and discriminator\ngenerator = GeneratorResNet(in_channels=opt.channels, out_channels=opt.channels, n_residual_blocks=16) # change\ndiscriminator = Discriminator(input_shape=(opt.channels, *hr_shape))\nfeature_extractor = FeatureExtractor()\nprint('# generator parameters:', sum(param.numel() for param in generator.parameters())) # change\nprint('# discriminator parameters:', sum(param.numel() for param in discriminator.parameters())) # change\nprint('# feature_extractor parameters:', sum(param.numel() for param in feature_extractor.parameters())) # change\n# print (generator)\n# print (discriminator)\n# print (feature_extractor)\n\n# Set feature extractor to inference mode\nfeature_extractor.eval()\n\n# Losses\ncriterion_GAN = torch.nn.MSELoss(reduction='none')\ncriterion_content = torch.nn.L1Loss(reduction='none')\n\n# Configure model\ngenerator = nn.DataParallel(generator, device_ids=[0, 1, 2])\ngenerator.to(device)\ndiscriminator = nn.DataParallel(discriminator, device_ids=[0, 1, 2])\ndiscriminator.to(device)\nfeature_extractor = nn.DataParallel(feature_extractor, device_ids=[0, 1, 2])\nfeature_extractor.to(device)\n# criterion_GAN = nn.DataParallel(criterion_GAN, device_ids=[0, 1, 2])\n# criterion_GAN = criterion_GAN.to(device)\n# criterion_content = nn.DataParallel(criterion_content, device_ids=[0, 1, 2])\n# criterion_content = criterion_content.to(device)\n\n\nif opt.epoch != 0:\n # Load pretrained models\n generator.load_state_dict(torch.load(\"saved_models/generator_%d_%d.pth\" % (opt.scale_factor,opt.epoch)))\n discriminator.load_state_dict(torch.load(\"saved_models/discriminator_%d_%d.pth\" % (opt.scale_factor,opt.epoch)))\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.generatorLR, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.discriminatorLR, betas=(opt.b1, opt.b2))\n\nscheduler_G = MultiStepLR(optimizer_G, milestones=[opt.decay_epoch], gamma=0.1)\nscheduler_D = MultiStepLR(optimizer_D, milestones=[opt.decay_epoch], gamma=0.1)\n\n\n# Configure data loader\ntrain_dataloader = DataLoader(\n TrainImageDataset(\"../../Datasets/My_dataset/single_channel_100000/%s\" % opt.train_dataset_name, hr_shape=hr_shape, scale_factor = opt.scale_factor), # change\n batch_size=opt.train_batch_size,\n shuffle=True,\n num_workers=opt.n_cpu,\n)\n\nval_dataloader = DataLoader(\n ValImageDataset(\"../../Datasets/My_dataset/single_channel_100000/%s\" % opt.val_dataset_name, hr_shape=hr_shape, scale_factor = opt.scale_factor), # change\n batch_size=opt.val_batch_size,\n shuffle=False,\n num_workers=opt.n_cpu,\n)\n\nTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor\n\nloss_GAN_meter = AverageValueMeter()\nloss_content_meter = AverageValueMeter()\nloss_G_meter= AverageValueMeter()\nloss_real_meter = AverageValueMeter()\nloss_fake_meter = AverageValueMeter()\nloss_D_meter = AverageValueMeter()\n\n# ----------\n# Training\n# ----------\n\nresults = {'loss_G': [], 'loss_D': [], 'loss_GAN': [],'loss_content': [], 'loss_real': [], 'loss_fake': [], 'psnr': [], 'ssim': []} \n\nepoch_start = time.time()\nfor epoch in range(opt.epoch, opt.n_epochs):\n\n training_results = {'batch_sizes': 0, 'loss_G': 0, 'loss_D': 0, 'loss_GAN': 0, 'loss_content': 0, 'loss_real': 0, 'loss_fake': 0} \n\n generator.train() \n discriminator.train()\n training_out_path = 'training_results/SR_factor_' + str(opt.scale_factor) + '/' + 'epoch_' + str(epoch) + '/'\n os.makedirs(training_out_path, exist_ok=True)\n\n for i, imgs in enumerate(train_dataloader):\n start = time.time()\n\n training_results['batch_sizes'] += opt.train_batch_size \n\n # Configure model input\n imgs_lr = Variable(imgs[\"lr\"].type(Tensor))\n imgs_hr = Variable(imgs[\"hr\"].type(Tensor))\n\n # Adversarial ground truths\n valid = Variable(Tensor(np.ones((imgs_lr.size(0), *discriminator.module.output_shape))), requires_grad=False)\n fake = Variable(Tensor(np.zeros((imgs_lr.size(0), *discriminator.module.output_shape))), requires_grad=False)\n\n # ------------------\n # Train Generators\n # ------------------\n if i % opt.g_every == 0:\n optimizer_G.zero_grad()\n\n # Generate a high resolution image from low resolution input\n gen_hr = generator(imgs_lr)\n\n # Adversarial loss\n loss_GAN = criterion_GAN(discriminator(gen_hr), valid)\n loss_GAN = loss_GAN.mean()\n \n # Content loss\n gen_features = feature_extractor(gen_hr)\n real_features = feature_extractor(imgs_hr)\n loss_content = criterion_content(gen_features, real_features.detach())\n loss_content = loss_content.mean()\n \n # Total loss\n loss_G = loss_content + 1e-3 * loss_GAN\n loss_G = loss_G.mean()\n\n loss_G.backward(torch.ones_like(loss_G))\n optimizer_G.step()\n scheduler_G.step()\n\n\n loss_GAN_meter.add(loss_GAN.item())\n loss_content_meter.add(loss_content.item())\n loss_G_meter.add(loss_G.item())\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n if i % opt.d_every == 0:\n optimizer_D.zero_grad()\n\n # Loss of real and fake images\n loss_real = criterion_GAN(discriminator(imgs_hr), valid)\n loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)\n loss_real = loss_real.mean()\n loss_fake = loss_fake.mean()\n\n # Total loss\n loss_D = (loss_real + loss_fake) / 2\n loss_D = loss_D.mean()\n\n loss_D.backward(torch.ones_like(loss_D))\n optimizer_D.step()\n scheduler_D.step()\n\n loss_real_meter.add(loss_real.item())\n loss_fake_meter.add(loss_fake.item())\n loss_D_meter.add(loss_D.item())\n\n\n # --------------\n # Log Progress\n # --------------\n\n # loss for current batch before optimization \n training_results['loss_G'] += loss_G.item() * opt.train_batch_size \n training_results['loss_D'] += loss_D.item() * opt.train_batch_size \n training_results['loss_GAN'] += loss_GAN.item() * opt.train_batch_size \n training_results['loss_content'] += loss_content.item() * opt.train_batch_size \n training_results['loss_real'] += loss_real.item() * opt.train_batch_size \n training_results['loss_fake'] += loss_fake.item() * opt.train_batch_size\n\n batch_time = time.time() - start \n print('[Epoch %d/%d] [Batch %d/%d] [loss_G: %.4f] [loss_D: %.4f] [loss_GAN: %.4f] [loss_content: %.4f] [loss_real: %.4f] [loss_fake: %.4f] [batch time: %.4fs]' % (\n epoch, opt.n_epochs, i, len(train_dataloader), training_results['loss_G'] / training_results['batch_sizes'],\n training_results['loss_D'] / training_results['batch_sizes'],\n training_results['loss_GAN'] / training_results['batch_sizes'],\n training_results['loss_real'] / training_results['batch_sizes'],\n training_results['loss_content'] / training_results['batch_sizes'],\n training_results['loss_fake'] / training_results['batch_sizes'],\n batch_time))\n\n\n # Save training image and plot loss\n batches_done = epoch * len(train_dataloader) + i\n if batches_done % opt.plot_every == 0: \n# imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=opt.scale_factor)\n# gen_hr = make_grid(gen_hr, nrow=8, normalize=True)\n# imgs_lr = make_grid(imgs_lr, nrow=8, normalize=True)\n# img_grid = torch.cat((imgs_lr, gen_hr), -1)\n# save_image(img_grid, \"images/%d.png\" % batches_done, normalize=True)\n \n imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=opt.scale_factor)\n training_out_imgs_lr_path = training_out_path + \"imgs_lr/\"\n training_out_imgs_hr_path = training_out_path + \"imgs_hr/\"\n training_out_gen_hr_path = training_out_path + \"gen_hr/\"\n os.makedirs(training_out_imgs_lr_path, exist_ok=True)\n os.makedirs(training_out_imgs_hr_path, exist_ok=True)\n os.makedirs(training_out_gen_hr_path, exist_ok=True)\n# save_image(imgs_lr.detach()[:1], training_out_imgs_lr_path + \"imgs_lr_%d.png\" % batches_done, normalize=True)\n# save_image(imgs_hr.data[:1], training_out_imgs_hr_path + \"imgs_hr_%d.png\" % batches_done, normalize=True)\n# save_image(gen_hr.data[:1], training_out_gen_hr_path + \"gen_hr_%d.png\" % batches_done, normalize=True)\n save_image(imgs_lr[:1], training_out_imgs_lr_path + \"imgs_lr_%d.png\" % batches_done, normalize=True)\n save_image(imgs_hr[:1], training_out_imgs_hr_path + \"imgs_hr_%d.png\" % batches_done, normalize=True)\n save_image(gen_hr[:1], training_out_gen_hr_path + \"gen_hr_%d.png\" % batches_done, normalize=True)\n\n gen_hr = make_grid(gen_hr, nrow=8, normalize=True)\n imgs_lr = make_grid(imgs_lr, nrow=8, normalize=True)\n imgs_hr = make_grid(imgs_hr, nrow=8, normalize=True)\n img_grid_gl = torch.cat((gen_hr, imgs_lr), -1)\n img_grid_hg = torch.cat((imgs_hr, gen_hr), -1)\n save_image(img_grid_hg, \"images/%d_hg.png\" % batches_done, normalize=True)\n save_image(img_grid_gl, \"images/%d_gl.png\" % batches_done, normalize=True)\n\n # vis.images(imgs_lr.detach().cpu().numpy()[:1] * 0.5 + 0.5, win='imgs_lr_train')\n # vis.images(gen_hr.data.cpu().numpy()[:1] * 0.5 + 0.5, win='img_gen_train')\n # vis.images(imgs_hr.data.cpu().numpy()[:1] * 0.5 + 0.5, win='img_hr_train')\n vis.plot('loss_G_train', loss_G_meter.value()[0])\n vis.plot('loss_D_train', loss_D_meter.value()[0])\n vis.plot('loss_GAN_train', loss_GAN_meter.value()[0])\n vis.plot('loss_content_train', loss_content_meter.value()[0]) \n vis.plot('loss_real_train', loss_real_meter.value()[0])\n vis.plot('loss_fake_train', loss_fake_meter.value()[0])\n \n loss_GAN_meter.reset()\n loss_content_meter.reset()\n loss_G_meter.reset()\n loss_real_meter.reset()\n loss_fake_meter.reset()\n loss_D_meter.reset()\n\n\n # validate the generator model\n generator.eval()\n valing_out_path = 'valing_results/SR_factor_' + str(opt.scale_factor) + '/' + 'epoch_' + str(epoch) + '/'\n os.makedirs(valing_out_path, exist_ok=True)\n \n with torch.no_grad():\n # val_bar = tqdm(val_dataloader)\n valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_sizes': 0}\n val_images = []\n for i, imgs in enumerate(val_dataloader):\n start = time.time()\n\n valing_results['batch_sizes'] += opt.val_batch_size \n\n # Configure model input\n img_lr, img_hr, img_hr_restore = imgs\n imgs_lr = Variable(imgs[\"lr\"].type(Tensor))\n imgs_hr = Variable(imgs[\"hr\"].type(Tensor))\n img_hr_restore = Variable(imgs[\"hr_restore\"].type(Tensor))\n gen_hr = generator(imgs_lr)\n\n batch_mse = ((gen_hr - imgs_hr) ** 2).data.mean()\n valing_results['mse'] += batch_mse * opt.val_batch_size\n batch_ssim = pytorch_ssim.ssim(gen_hr, imgs_hr).item()\n valing_results['ssims'] += batch_ssim * opt.val_batch_size\n valing_results['psnr'] = 10 * log10(1 / (valing_results['mse'] / valing_results['batch_sizes']))\n valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']\n\n # val_bar.set_description(desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (valing_results['psnr'], valing_results['ssim']), refresh=True) \n print('[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (valing_results['psnr'], valing_results['ssim']))\n val_images.extend(\n [imgs_hr.data.cpu().squeeze(0), gen_hr.data.cpu().squeeze(0),\n img_hr_restore.data.cpu().squeeze(0)])\n \n val_images = torch.stack(val_images) # ๅฐ†list้‡ๆ–ฐๅ †ๆˆ๏ผ”็ปดๅผ ้‡\n # val_images = torch.chunk(val_images, val_images.size(0) // 15) # ่‹ฅ้ชŒ่ฏ้›†ๅคงๅฐไธบ3000๏ผŒๅˆ™3000=15*200,15=3*5,็”Ÿๆˆ็š„ๆฏๅผ ๅ›พ็‰‡ไธญๅ…ฑๆœ‰15ๅผ ๅญๅ›พ\n val_images = torch.chunk(val_images, val_images.size(0) // 3)\n val_save_bar = tqdm(val_images, desc='[saving training results]')\n index = 1\n for image in val_save_bar:\n image = make_grid(image, nrow=3, padding=5, normalize=True)\n save_image(image, valing_out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5, normalize=True)\n index += 1\n \n\n # save loss\\scores\\psnr\\ssim and visualize\n results['loss_G'].append(training_results['loss_G'] / training_results['batch_sizes'])\n results['loss_D'].append(training_results['loss_D'] / training_results['batch_sizes'])\n results['loss_GAN'].append(training_results['loss_GAN'] / training_results['batch_sizes'])\n results['loss_content'].append(training_results['loss_content'] / training_results['batch_sizes'])\n results['loss_real'].append(training_results['loss_real'] / training_results['batch_sizes'])\n results['loss_fake'].append(training_results['loss_fake'] / training_results['batch_sizes'])\n results['psnr'].append(valing_results['psnr'])\n results['ssim'].append(valing_results['ssim']) \n \n vis.plot('loss_G_epoch', results['loss_G'][epoch])\n vis.plot('loss_D_epoch', results['loss_D'][epoch])\n vis.plot('loss_GAN_epoch', results['loss_GAN'][epoch])\n vis.plot('loss_content_epoch', results['loss_content'][epoch])\n vis.plot('loss_real_epoch', results['loss_real'][epoch])\n vis.plot('loss_fake_epoch', results['loss_fake'][epoch])\n vis.plot('psnr_epoch', results['psnr'][epoch])\n vis.plot('ssim_epoch', results['ssim'][epoch])\n\n\n\n # save model parameters\n data_out_path = './statistics/'\n os.makedirs(data_out_path, exist_ok=True)\n if epoch % opt.save_every == 0:\n # save_image(gen_hr.data[:16], 'images/%s.png' % epoch, normalize=True,range=(-1, 1))\n torch.save(generator.state_dict(), \"saved_models/generator_%d_%d.pth\" % (opt.scale_factor,epoch))\n torch.save(discriminator.state_dict(), \"saved_models/discriminator_%d_%d.pth\" % (opt.scale_factor,epoch))\n\n data_frame = pd.DataFrame(\n data={'loss_G': results['loss_G'], 'loss_D': results['loss_D'], \n 'loss_GAN': results['loss_GAN'], 'loss_content': results['loss_content'], \n 'loss_real': results['loss_real'], 'loss_fake': results['loss_fake'],\n 'PSNR': results['psnr'], 'SSIM': results['ssim']},\n # index=range(0, epoch)\n index=None\n )\n data_frame.to_csv(data_out_path + 'SR_factor_' + str(opt.scale_factor) + '_train_results.csv', index_label='Epoch')\n\nelapse_time = time.time() - epoch_start\nelapse_time = datetime.timedelta(seconds=elapse_time)\nprint(\"Training and validating time {}\".format(elapse_time))" ]
[ [ "torch.cat", "torch.stack", "torch.nn.MSELoss", "pandas.DataFrame", "torch.no_grad", "torch.nn.functional.interpolate", "torch.optim.lr_scheduler.MultiStepLR", "torch.nn.L1Loss", "torch.cuda.is_available", "torch.load", "torch.ones_like", "torch.nn.DataParallel" ] ]
stubbi/Cirq
[ "680f897345eb1c71c9242515edda8f04b8594319" ]
[ "examples/bell_inequality.py" ]
[ "\"\"\"\nBell's theorem or inequality proves that entanglement based\nobservations can't be reproduced with any local realist theory [1].\n\nThis example shows Bell's inequality in form of CHSH game where two\nplayers Alice and Bob receive an input bit x and y respectively and\nproduce an output a and b based on the input bit.\nThe goal is to maximize the probability to satisfy the condition [2]:\n a XOR b = x AND y\n\nIn the classical deterministic case, the highest probability\nachievable is 75%. While with quantum correlations, it can\nachieve higher success probability. In the quantum case, two players\nAlice and Bob start with a shared Bell-pair entangled state. The\nrandom input x and y is provided by referee for Alice and Bob. The\nsuccess probability of satisfying the above condition will be\ncos(theta/2)^2 if Alice and Bob measure their entangled qubit in\nmeasurement basis V and W where angle between V and W is theta.\nTherefore, maximum success probability is cos(pi/8)^2 ~ 85.3%\nwhen theta = pi/4.\n\nIn the usual implementation [2], Alice and Bob share the Bell state\nwith the same value and opposite phase. If the input x (y) is 0, Alice (Bob)\nrotates in Y-basis by angle -pi/16 and if the input is 1, Alice (Bob)\nrotates by angle 3pi/16. Here, Alice and Bob start with the entangled\nBell state with same value and phase. The same success probability is\nachieved by following procedure: Alice rotate in X-basis by angle\n-pi/4 followed by controlled-rotation by angle pi/2 in X-basis for\nAlice (Bob) based on input x (y).\n\n[1] https://en.wikipedia.org/wiki/Bell%27s_theorem\n[2] R. de Wolf. Quantum Computing: Lecture Notes\n(arXiv:1907.09415, Section 15.2)\n\n=== EXAMPLE OUTPUT ===\nCircuit:\n(0, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€@โ”€โ”€โ”€X^-0.25โ”€โ”€โ”€Xโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€M('a')โ”€โ”€โ”€\n โ”‚ โ”‚\n(0, 1): โ”€โ”€โ”€Hโ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€@^0.5โ”€โ”€โ”€โ”€M('x')โ”€โ”€โ”€\n โ”‚\n(1, 0): โ”€โ”€โ”€โ”€โ”€โ”€โ”€Xโ”€โ”€โ”€Xโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€M('b')โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\n โ”‚\n(1, 1): โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€@^0.5โ”€โ”€โ”€โ”€โ”€M('y')โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\n\nSimulating 75 repetitions...\n\nResults\na: _1___1_1_1111__11__11______11__1______111_______1_______11___11_11__1_1_1_1\nb: _1_______11_1_1__1_1111_11_11_1_1_____11___1__111__1_1_1_1__111_11_11_1_1_1\nx: 1_1____1______11_1_1_1_11111___111_1__1_1__11_111__1_11_11_11______1____1__\ny: ____1__111_______1___11_111__1_111______111___11_11_11__1_1_1111_1111__1_11\n(a XOR b) == (x AND y):\n 11111_11111_11___11111_111_111_11_111111111_1111111_111_1111111111111111111\nWin rate: 84.0%\n\"\"\"\n\nimport numpy as np\n\nimport cirq\n\n\ndef main():\n # Create circuit.\n circuit = make_bell_test_circuit()\n print('Circuit:')\n print(circuit)\n\n # Run simulations.\n print()\n repetitions = 75\n print(f'Simulating {repetitions} repetitions...')\n result = cirq.Simulator().run(program=circuit, repetitions=repetitions)\n\n # Collect results.\n a = np.array(result.measurements['a'][:, 0])\n b = np.array(result.measurements['b'][:, 0])\n x = np.array(result.measurements['x'][:, 0])\n y = np.array(result.measurements['y'][:, 0])\n outcomes = a ^ b == x & y\n win_percent = len([e for e in outcomes if e]) * 100 / repetitions\n\n # Print data.\n print()\n print('Results')\n print('a:', bitstring(a))\n print('b:', bitstring(b))\n print('x:', bitstring(x))\n print('y:', bitstring(y))\n print('(a XOR b) == (x AND y):\\n ', bitstring(outcomes))\n print(f'Win rate: {win_percent}%')\n\n\ndef make_bell_test_circuit():\n alice = cirq.GridQubit(0, 0)\n bob = cirq.GridQubit(1, 0)\n alice_referee = cirq.GridQubit(0, 1)\n bob_referee = cirq.GridQubit(1, 1)\n\n circuit = cirq.Circuit()\n\n # Prepare shared entangled state.\n circuit.append(\n [\n cirq.H(alice),\n cirq.CNOT(alice, bob),\n ]\n )\n\n # Referees flip coins.\n circuit.append(\n [\n cirq.H(alice_referee),\n cirq.H(bob_referee),\n ]\n )\n\n # Players do a sqrt(X) based on their referee's coin.\n circuit.append(\n [\n cirq.X(alice) ** -0.25,\n cirq.CNOT(alice_referee, alice) ** 0.5,\n cirq.CNOT(bob_referee, bob) ** 0.5,\n ]\n )\n\n # Then results are recorded.\n circuit.append(\n [\n cirq.measure(alice, key='a'),\n cirq.measure(bob, key='b'),\n cirq.measure(alice_referee, key='x'),\n cirq.measure(bob_referee, key='y'),\n ]\n )\n\n return circuit\n\n\ndef bitstring(bits):\n return ''.join('1' if e else '_' for e in bits)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array" ] ]
yougoforward/PyTorch-Encoding
[ "4059cc4055cb81ad655099514455bc07e7535adb" ]
[ "encoding/functions/dist_syncbn.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Hang Zhang\n## Email: [email protected]\n## Copyright (c) 2020\n##\n## LICENSE file in the root directory of this source tree\n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nimport torch\nfrom torch.autograd.function import Function\n\nfrom encoding import cpu\nif torch.cuda.device_count() > 0:\n from encoding import gpu\n\n__all__ = ['dist_syncbatchnorm']\n\nclass dist_syncbatchnorm_(Function):\n @staticmethod\n def forward(ctx, x, gamma, beta, running_mean, running_var, eps, momentum, training, process_group):\n x = x.contiguous()\n ctx.training = training\n ctx.momentum = momentum\n ctx.eps = eps\n ctx.process_group = process_group\n\n if not ctx.training:\n _ex, _var = running_mean.contiguous(), running_var.contiguous()\n _exs = _var + _ex ** 2 \n if x.is_cuda:\n y = gpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps)\n else:\n y = cpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps)\n ctx.save_for_backward(x, _ex, _exs, gamma, beta)\n return y\n\n size = x.numel() // x.size(1)\n if size == 1:\n raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))\n\n if x.is_cuda:\n _ex, _exs = gpu.expectation_forward(x)\n else:\n raise NotImplemented\n\n count = torch.Tensor([1]).to(x.device)\n count_all_reduce = torch.distributed.all_reduce(count, group=process_group, async_op=True)\n _ex_all_reduce = torch.distributed.all_reduce(_ex, group=process_group, async_op=True)\n _exs_all_reduce = torch.distributed.all_reduce(_exs, group=process_group, async_op=True)\n\n count_all_reduce.wait()\n _ex_all_reduce.wait()\n _exs_all_reduce.wait()\n\n _ex = _ex / count\n _exs = _exs / count\n\n # Update running stats\n _var = _exs - _ex ** 2\n running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex)\n running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var)\n\n # Mark in-place modified tensors\n ctx.mark_dirty(running_mean, running_var)\n\n # BN forward + activation\n if x.is_cuda:\n y = gpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps)\n else:\n y = cpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps)\n\n ctx.save_for_backward(x, _ex, _exs, gamma, beta)\n return y\n\n @staticmethod\n def backward(ctx, dz):\n x, _ex, _exs, gamma, beta = ctx.saved_tensors\n dz = dz.contiguous()\n\n # BN backward\n if dz.is_cuda:\n dx, _dex, _dexs, dgamma, dbeta = \\\n gpu.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps)\n else:\n raise NotImplemented\n\n if ctx.training:\n process_group = ctx.process_group\n count = torch.Tensor([1]).to(x.device)\n count_all_reduce = torch.distributed.all_reduce(count, group=process_group, async_op=True)\n _dex_all_reduce = torch.distributed.all_reduce(_dex, group=process_group, async_op=True)\n _dexs_all_reduce = torch.distributed.all_reduce(_dexs, group=process_group, async_op=True)\n\n count_all_reduce.wait()\n _dex_all_reduce.wait()\n _dexs_all_reduce.wait()\n\n _dex = _dex / count\n _dexs = _dexs / count\n\n if x.is_cuda:\n dx_ = gpu.expectation_backward(x, _dex, _dexs)\n else:\n raise NotImplemented\n dx = dx + dx_\n\n return dx, dgamma, dbeta, None, None, None, None, None, None\n\ndist_syncbatchnorm = dist_syncbatchnorm_.apply\n" ]
[ [ "torch.Tensor", "torch.distributed.all_reduce", "torch.cuda.device_count" ] ]
77ph/tgnews
[ "e11ff65f2e8c3fce8978fd38b74bd0e2461583e7" ]
[ "df_news.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 23 12:53:29 2019\n\n@author: innerm\n\n\nsudo apt update && sudo apt -y upgrade\nsudo apt install mariadb-server\nsudo systemctl status mysql\nsudo mysqladmin -u root version\n\nsudo mysql -u root\nMariaDB [(none)]> CREATE USER 'tgnews'@'localhost' IDENTIFIED BY '123456'; \nQuery OK, 0 rows affected (0.761 sec)\n\nMariaDB [(none)]> GRANT ALL PRIVILEGES ON *.* TO 'tgnews'@'localhost' WITH GRANT OPTION; \nQuery OK, 0 rows affected (0.400 sec)\n\nMariaDB [(none)]> exit\n\nmysql -u tgnews -p\"123456\"\nMariaDB [(none)]> create database tgnews;\nQuery OK, 1 row affected (0.463 sec)\n\nMariaDB [(none)]> exit\n\n# https://github.com/PyMySQL/PyMySQL\npip3 install PyMySQL\npip3 install sqlalchemy\n\nhttps://docs.sqlalchemy.org/en/13/core/type_basics.html#module-sqlalchemy.types\nhttps://dev.mysql.com/doc/refman/8.0/en/data-types.html\n\n\"\"\"\n\nimport os\n#import cld2\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n#from shutil import copyfile\n#import mysql.connector as mysql\n\nimport sqlalchemy\nimport numpy as np\nimport time\n\n# path = '/home/innerm/ML/data2'\npath = './20200503'\nstage = 'stage1'\nfileout= stage + '.csv'\nfiles = []\n\n# r=root, d=directories, f = files\nfor r, d, f in os.walk(path):\n for file in f:\n if '.html' in file:\n files.append(os.path.join(r, file))\n\nurl_list=[]\nsite_name_list=[]\ntitle_list=[]\ndesc_list=[]\npubtime_list=[]\ntime_list=[]\ntext_list=[]\n\nfiles1=files\n\nmytime = time.process_time()\n\nfor fname in files1:\n with open(fname, \"r\") as f:\n contents = f.read()\n soup = BeautifulSoup(contents, 'lxml')\n text=soup.getText()\n if text:\n text=text.strip()\n text_list.append(text)\n else:\n text_list.append('')\n url = soup.find(\"meta\", property=\"og:url\")\n if url:\n url_list.append(url['content'])\n else:\n url_list.append('')\n \n site_name = soup.find(\"meta\", property=\"og:site_name\")\n if site_name:\n site_name_list.append(site_name['content'])\n else:\n site_name_list.append('')\n \n title = soup.find(\"meta\", property=\"og:title\")\n if title:\n title_list.append(title['content'])\n else:\n title_list.append('')\n \n desc = soup.find(\"meta\", property=\"og:description\")\n if desc:\n desc_list.append(desc['content'])\n else:\n desc_list.append('')\n \n pubtime = soup.find(\"meta\", property=\"article:published_time\")\n if pubtime:\n pubtime_list.append(pubtime['content'])\n else:\n pubtime_list.append(0)\n \n souptime = soup.find(\"time\")\n if souptime:\n time_list.append(souptime['datetime'])\n else:\n time_list.append(0)\n\n#df=pd.read_csv('files_by_lang-cld2.csv')\n#df_en=df[df['lang']==1]\n\ndf=pd.DataFrame()\n\ndf['files']=pd.Series(files1)\ndf['url']=pd.Series(url_list)\ndf['site_name']=pd.Series(site_name_list) \ndf['title']=pd.Series(title_list)\ndf['desc']=pd.Series(desc_list) \ndf['pubtime']=pd.Series(pubtime_list)\ndf['time']=pd.Series(time_list)\ndf['text']=pd.Series(text_list)\n\n\nprint(\"Total dir read to df seconds \",time.process_time() - mytime)\n\ndataTypeObj = df.dtypes['pubtime']\n \nprint('Data type of pubtime column in the Dataframe :',dataTypeObj)\n\ndataTypeObj = df.dtypes['time']\n \nprint('Data type of pubtime column in the Dataframe :',dataTypeObj) \n\n\nmytime = time.process_time()\n\ndf.to_csv(fileout,mode='w')\n\nprint(\"Total df to .csv seconds \",time.process_time() - mytime)\n\ndatabase_username = 'tgnews'\ndatabase_password = '123456'\ndatabase_ip = '127.0.0.1'\ndatabase_name = 'tgnews'\n\n#engine = sqlalchemy.create_engine('mysql+mysqlconnector://{0}:{1}@{2}/{3}'.format(database_username, database_password, database_ip, database_name),pool_recycle=1)\n\nmytime = time.process_time()\n\nengine = sqlalchemy.create_engine('mysql+pymysql://{0}:{1}@{2}/{3}'.format(database_username, database_password, database_ip, database_name),pool_recycle=1)\n\n# [SQL: INSERT INTO table_name_for_df (`index`, files, url, site_name, title, `desc`, pubtime, time, text) VALUES (%(index)s, %(files)s, %(url)s, %(site_name)s, %(title)s, %(desc)s, %(pubtime)s, %(time)s, %(text)s)]\n# 'pubtime': '2020-05-03T05:20:00+00:00', 'time': '2020-05-03T05:20:00+00:00'\ndf.to_sql(con=engine, name=stage, if_exists='replace', chunksize=20000, \ndtype={'files': sqlalchemy.types.NVARCHAR(length=255), 'url': sqlalchemy.types.NVARCHAR(length=4096),'site_name': sqlalchemy.types.NVARCHAR(length=255),'title': sqlalchemy.types.Text,\n'desc': sqlalchemy.types.Text, pubtime: sqlalchemy.types.Text, 'time': sqlalchemy.types.Text, 'text': sqlalchemy.types.Text(length=4294000000)})\n\nprint(\"Total df to mysql seconds \",time.process_time() - mytime)\n\nprint(\"Export: OK\")\n\nmytime = time.process_time()\n\n### options 1 with chunksize\n### Achtung! https://stackoverflow.com/questions/31837979/pandas-sql-chunksize/31839639#31839639 - result is \"iterator of multiple dataframes.\"\nres = pd.read_sql_table(stage,\n con=engine,\n index_col='index',\n coerce_float=True,\n columns=['files',\n 'url',\n 'site_name',\n 'title',\n 'desc',\n 'pubtime',\n 'time',\n 'text'],\n### Need if pubtime or time datetime type, not just text\n# parse_dates=['pubtime',\n# 'time']\n chunksize=20000)\nfor df1 in res:\n print(df1)\n\ndel res\ndel df1\n\nmytime = time.process_time()\n\nprint(\"Total mysql to df seconds [options 1]\",time.process_time() - mytime)\n\n#### options 2 with SQL_Query without \"pandas\" chunksize\nSQL_Query = \"select * from \" + stage\n#SQL_Query = \"select files,url from \" + stage\ndf1 = pd.read_sql_query(SQL_Query, engine)\n\nprint(df1)\n\nprint(\"Total mysql to df seconds [options 2]\",time.process_time() - mytime)\n\nprint(\"Import: OK\")\n" ]
[ [ "pandas.DataFrame", "pandas.read_sql_query", "pandas.read_sql_table", "pandas.Series" ] ]
larc/SPFN
[ "1c8cc1eb6de38adce0251bfbddfa09a77a8cc159" ]
[ "spfn/lib/network.py" ]
[ "import os, sys\nBASE_DIR = os.path.dirname(__file__)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '..'))\n\nimport architecture\nimport evaluation\nimport fitter_factory\nimport prediction_io\n\nimport time\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport re\nimport subprocess\n\nclass Network(object):\n def __init__(self, n_max_instances, config, is_new_training):\n self.n_max_instances = n_max_instances\n self.config = config\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.global_step = tf.Variable(0)\n\n self.is_training = tf.placeholder(dtype=tf.bool, shape=[])\n self.P = tf.placeholder(dtype=tf.float32, shape=[None, None, 3])\n self.batch_size = tf.shape(self.P)[0]\n\n if config.get_bn_decay_step() < 0:\n self.bn_decay = None\n else:\n self.bn_decay = self.get_batch_norm_decay(self.global_step, self.batch_size, config.get_bn_decay_step())\n tf.summary.scalar('bn_decay', self.bn_decay)\n\n self.list_of_primitives = config.get_list_of_primitives()\n self.gt_dict = evaluation.create_gt_dict(n_max_instances)\n\n if config.use_direct_regression():\n self.pred_dict, direct_loss = architecture.get_direct_regression_model(\n scope='DPPN', \n P=self.P, \n n_max_instances=n_max_instances, \n gt_dict=self.gt_dict,\n is_training=self.is_training, \n bn_decay=self.bn_decay\n )\n self.total_loss = tf.reduce_mean(direct_loss)\n self.total_miou_loss = tf.zeros(shape=[], dtype=tf.float32)\n self.total_normal_loss = tf.zeros(shape=[], dtype=tf.float32)\n self.total_type_loss = tf.zeros(shape=[], dtype=tf.float32)\n self.total_residue_loss = tf.zeros(shape=[], dtype=tf.float32)\n self.total_parameter_loss = tf.zeros(shape=[], dtype=tf.float32)\n else:\n self.pred_dict = architecture.get_per_point_model(\n scope='SPFN', \n P=self.P, \n n_max_instances=n_max_instances, \n is_training=self.is_training, \n bn_decay=self.bn_decay,\n )\n\n eval_dict = evaluation.evaluate(\n self.pred_dict, \n self.gt_dict, \n is_eval=False,\n is_nn=True\n )\n self.collect_losses(eval_dict['loss_dict'])\n\n learning_rate = self.get_learning_rate(\n config.get_init_learning_rate(),\n self.global_step,\n self.batch_size,\n config.get_decay_step(),\n config.get_decay_rate())\n tf.summary.scalar('learning_rate', learning_rate)\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_op = self.create_train_op(learning_rate, self.total_loss)\n\n self.summary = tf.summary.merge_all()\n self.saver = tf.train.Saver(max_to_keep=3)\n\n def create_train_op(self, learning_rate, total_loss):\n # Skip gradient update if any gradient is infinite. This should not happen and is for debug only.\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.optimizer = optimizer\n grads_and_vars = optimizer.compute_gradients(total_loss)\n grads = [g for g, v in grads_and_vars]\n varnames = [v for g, v in grads_and_vars]\n is_finite = tf.ones(dtype=tf.bool, shape=[])\n for g, v in grads_and_vars:\n if g is not None:\n g_is_finite = tf.reduce_any(tf.is_finite(g))\n g_is_finite_cond = tf.cond(g_is_finite, tf.no_op, lambda: tf.Print(g_is_finite, [g], '{} is not finite:'.format(str(g))))\n with tf.control_dependencies([g_is_finite_cond]):\n is_finite = tf.logical_and(is_finite, g_is_finite)\n train_op = tf.cond(\n is_finite, \n lambda: optimizer.apply_gradients(zip(grads, varnames), global_step=self.global_step), \n lambda: tf.Print(is_finite, [is_finite], 'Some gradients are not finite! Skipping gradient backprop.')\n )\n return train_op\n\n def collect_losses(self, loss_dict):\n self.total_loss = tf.zeros(shape=[], dtype=tf.float32)\n\n self.normal_loss_per_data = loss_dict['normal_loss']\n self.total_normal_loss = tf.reduce_mean(self.normal_loss_per_data)\n normal_loss_multiplier = self.config.get_normal_loss_multiplier()\n if normal_loss_multiplier > 0:\n tf.summary.scalar('total_normal_loss', self.total_normal_loss)\n self.total_loss += normal_loss_multiplier * self.total_normal_loss\n\n self.type_loss_per_data = loss_dict['type_loss']\n self.total_type_loss = tf.reduce_mean(self.type_loss_per_data)\n type_loss_multiplier = self.config.get_type_loss_multiplier()\n if type_loss_multiplier > 0:\n tf.summary.scalar('total_type_loss', self.total_type_loss)\n self.total_loss += type_loss_multiplier * self.total_type_loss\n\n self.miou_loss_per_data = loss_dict['avg_miou_loss']\n self.miou_loss_per_instance = loss_dict['miou_loss']\n self.total_miou_loss = tf.reduce_mean(self.miou_loss_per_data)\n miou_loss_multiplier = self.config.get_miou_loss_multiplier()\n if miou_loss_multiplier > 0:\n tf.summary.scalar('total_miou_loss', self.total_miou_loss)\n self.total_loss += miou_loss_multiplier * self.total_miou_loss\n\n self.residue_loss_per_data = loss_dict['avg_residue_loss']\n self.residue_loss_per_instance = loss_dict['residue_loss']\n self.total_residue_loss = tf.reduce_mean(self.residue_loss_per_data)\n residue_loss_multiplier = self.config.get_residue_loss_multiplier()\n if residue_loss_multiplier > 0:\n tf.summary.scalar('total_residue_loss', self.total_residue_loss)\n self.total_loss += residue_loss_multiplier * self.total_residue_loss\n\n self.parameter_loss_per_data = loss_dict['avg_parameter_loss']\n self.parameter_loss_per_instance = loss_dict['parameter_loss']\n self.total_parameter_loss = tf.reduce_mean(self.parameter_loss_per_data)\n parameter_loss_multiplier = self.config.get_parameter_loss_multiplier()\n if parameter_loss_multiplier > 0:\n tf.summary.scalar('total_parameter_loss', self.total_parameter_loss)\n self.total_loss += parameter_loss_multiplier * self.total_parameter_loss\n\n self.total_loss *= self.config.get_total_loss_multiplier()\n tf.summary.scalar('total_loss', self.total_loss)\n\n def train(self, sess, train_data, val_data, n_epochs, val_interval, snapshot_interval, model_dir, log_dir):\n assert n_epochs > 0\n\n train_writer = tf.summary.FileWriter(os.path.join(log_dir, 'train'), sess.graph)\n val_writer = tf.summary.FileWriter(os.path.join(log_dir, 'val'), sess.graph)\n if not os.path.exists(model_dir): \n os.makedirs(model_dir)\n if not os.path.exists(self.config.get_val_prediction_dir()):\n os.makedirs(self.config.get_val_prediction_dir())\n print('Training started.')\n\n start_time = time.time()\n for epoch in range(1, n_epochs + 1):\n for batch in train_data.create_iterator():\n feed_dict = self.create_feed_dict(batch, is_training=True)\n step, _, summary, loss = sess.run([self.global_step, self.train_op, self.summary, self.total_loss], feed_dict=feed_dict)\n\n elapsed_min = (time.time() - start_time) / 60\n print('Epoch: {:d} | Step: {:d} | Batch Loss: {:6f} | Elapsed: {:.2f}m'.format(epoch, step, loss, elapsed_min))\n\n if step >= self.config.get_writer_start_step():\n train_writer.add_summary(summary, step)\n \n if step % val_interval == 0:\n print('Start validating...')\n msg = 'Epoch: {:d} | Step: {:d}'.format(epoch, step)\n\n remain_min = (n_epochs * train_data.n_data - step) * elapsed_min / step\n\n predict_result = self.predict_and_save(sess, val_data, save_dir=os.path.join(self.config.get_val_prediction_dir(), 'step{}'.format(step)))\n msg = predict_result['msg']\n msg = 'Validation: ' + msg + ' | Elapsed: {:.2f}m, Remaining: {:.2f}m'.format(elapsed_min, remain_min)\n print(msg)\n # clean up old predictions\n prediction_n_keep = self.config.get_val_prediction_n_keep()\n if prediction_n_keep != -1:\n self.clean_predictions_earlier_than(step=step, prediction_dir=self.config.get_val_prediction_dir(), n_keep=prediction_n_keep)\n if step >= self.config.get_writer_start_step():\n val_writer.add_summary(predict_result['summary'], step)\n \n if step % snapshot_interval == 0:\n print('Saving snapshot at step {:d}...'.format(step))\n self.saver.save(sess, os.path.join(model_dir, 'tf_model.ckpt'), global_step=step)\n print('Done saving model at step {:d}.'.format(step))\n\n train_writer.close()\n val_writer.close();\n elapsed_min = (time.time() - start_time) / 60\n print('Training finished.')\n print('Elapsed: {:.2f}m.'.format(elapsed_min))\n print('Saved {}.'.format(self.saver.save(sess, os.path.join(model_dir, 'tf_model.ckpt'), global_step=step)))\n\n def format_loss_result(self, losses):\n msg = ''\n msg += 'Total Loss: {:6f}'.format(losses['total_loss'])\n msg += ', MIoU Loss: {:6f}'.format(losses['total_miou_loss'])\n msg += ', Normal Loss: {:6f}'.format(losses['total_normal_loss'])\n msg += ', Type Loss: {:6f}'.format(losses['total_type_loss'])\n msg += ', Parameter Loss: {:6f}'.format(losses['total_parameter_loss'])\n msg += ', Residue Loss: {:6f}'.format(losses['total_residue_loss'])\n return msg\n\n def clean_predictions_earlier_than(self, step, prediction_dir, n_keep):\n prog = re.compile('step([0-9]+)')\n arr = []\n for f in os.listdir(prediction_dir):\n if os.path.isdir(os.path.join(prediction_dir, f)):\n m = prog.match(f)\n if m is not None:\n arr.append((int(m.group(1)), f))\n arr.sort(key=lambda pr: pr[0])\n for pr in arr[:-n_keep]:\n subprocess.run(['rm', '-r', os.path.join(prediction_dir, pr[1])])\n\n def predict_and_save(self, sess, dset, save_dir):\n print('Predicting and saving predictions to {}...'.format(save_dir))\n losses = {\n 'total_loss': 0.0, \n 'total_miou_loss': 0.0, \n 'total_normal_loss': 0.0,\n 'total_type_loss': 0.0,\n 'total_residue_loss': 0.0, \n 'total_parameter_loss': 0.0\n }\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n for batch in dset.create_iterator():\n feed_dict = self.create_feed_dict(batch, is_training=False)\n loss_dict = {\n 'total_loss': self.total_loss,\n 'total_miou_loss': self.total_miou_loss,\n 'total_normal_loss': self.total_normal_loss,\n 'total_type_loss': self.total_type_loss,\n 'total_residue_loss': self.total_residue_loss,\n 'total_parameter_loss': self.total_parameter_loss,\n }\n pred_result, loss_result = sess.run([self.pred_dict, loss_dict], feed_dict=feed_dict)\n\n for key in losses.keys():\n losses[key] += loss_result[key] * dset.last_step_size\n prediction_io.save_batch_nn(\n nn_name=self.config.get_nn_name(),\n pred_result=pred_result, \n basename_list=dset.get_last_batch_basename_list(), \n save_dir=save_dir,\n W_reduced=False,\n )\n print('Finished {}/{}'.format(dset.get_last_batch_range()[1], dset.n_data), end='\\r')\n losses.update((x, y / dset.n_data) for x, y in losses.items())\n msg = self.format_loss_result(losses)\n open(os.path.join(save_dir, 'test_loss.txt'), 'w').write(msg)\n summary = tf.Summary()\n for x, y in losses.items():\n summary.value.add(tag=x, simple_value=y)\n return {\n 'msg': msg,\n 'summary': summary,\n }\n\n def simple_predict_and_save(self, sess, pc, pred_h5_file):\n feed_dict = {\n self.P: np.expand_dims(pc, axis=0), # 1xNx3\n self.is_training: False\n }\n pred_result = sess.run(self.pred_dict, feed_dict=feed_dict)\n prediction_io.save_single_nn(\n nn_name=self.config.get_nn_name(),\n pred_result=pred_result, \n pred_h5_file=pred_h5_file,\n W_reduced=False,\n )\n \n def create_feed_dict(self, batch, is_training):\n feed_dict = {\n self.P : batch['P'], \n self.is_training: is_training,\n }\n evaluation.fill_gt_dict_with_batch_data(feed_dict, self.gt_dict, batch)\n return feed_dict\n \n def get_batch_norm_decay(self, global_step, batch_size, bn_decay_step):\n BN_INIT_DECAY = 0.5\n BN_DECAY_RATE = 0.5\n BN_DECAY_CLIP = 0.99\n\n bn_momentum = tf.train.exponential_decay(\n BN_INIT_DECAY,\n global_step*batch_size,\n bn_decay_step,\n BN_DECAY_RATE,\n staircase=True)\n\n bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)\n return bn_decay\n\n def get_learning_rate(self, init_learning_rate, global_step, batch_size, decay_step, decay_rate):\n learning_rate = tf.train.exponential_decay(\n init_learning_rate,\n global_step*batch_size,\n decay_step,\n decay_rate,\n staircase=True)\n return learning_rate\n\n\n" ]
[ [ "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.logical_and", "tensorflow.compat.v1.is_finite", "numpy.expand_dims", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.summary.merge_all", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.Print", "tensorflow.compat.v1.Summary", "tensorflow.compat.v1.train.exponential_decay", "tensorflow.compat.v1.minimum", "tensorflow.compat.v1.Variable" ] ]
imenbenmhd/MiniProject
[ "046f288c0553454a8696128001f4be7ff2c57d39" ]
[ "tgibm/algorithm.py" ]
[ "import numpy as np\nfrom . import database\nfrom . import preprocessor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\n\nnormalization = [\"MinMaxScaler_\", \"Standard_Scaler\", \"PolynomialFeaturesScaler\"]\n\n\ndef normalize(X, norm):\n \"\"\"\n chose a preproccessing method to apply to the data\n\n Parameters:\n\n X : np.ndarray\n The data to normalize\n\n norm : int\n The index of normalization list to know which preprocessing method to use.\n\n Returns:\n numpy.ndarray,\n a 2D array same shape as the input but normalized.\n\n \"\"\"\n\n degree = 2\n if norm == 2:\n normalization_to_call = getattr(preprocessor, normalization[2])\n normalized_set = normalization_to_call(X, scale=\"minmax\", degree=degree)\n elif norm == 3:\n normalization_to_call = getattr(preprocessor, normalization[2])\n normalized_set = normalization_to_call(X, scale=\"z-norm\", degree=degree)\n else:\n normalization_to_call = getattr(preprocessor, normalization[norm])\n normalized_set = normalization_to_call(X)\n return normalized_set\n\n\ndef regression(data, norm, model):\n \"\"\"\n apply the regression model to the data with a specific normalization method as preprocessing\n\n Parameters:\n\n data : int\n The index of data_base list to know which data to load.\n\n norm : int\n The index of normalization list to know which preprocessing method to use.\n\n model : string\n Which regression model to apply.\n\n Returns: \n list of np.array,\n A list of the values of the predicted attribute for every protocol.\n \n list of np.array,\n A list of the true values of the test set to compare with the prediction.\n\n \"\"\"\n\n y_predicted = []\n y_tested = []\n\n for i in range(len(database.seeds)):\n training_set = database.extract(data, i, 0)\n testing_set = database.extract(data, i, 1)\n normalized_train = normalize(training_set, norm)\n normalized_test = normalize(testing_set, norm)\n\n y_train = normalized_train[:, -1]\n y_test = normalized_test[:, -1]\n\n if model == \"LinearRegression\":\n regressor = LinearRegression()\n if model == \"Regressiontree\":\n regressor = DecisionTreeRegressor()\n\n regressor.fit(normalized_train, y_train)\n y_predict = regressor.predict(normalized_test)\n y_tested.append(y_test)\n y_predicted.append(y_predict)\n\n return y_tested, y_predicted\n # return for the 3 seeds" ]
[ [ "sklearn.linear_model.LinearRegression", "sklearn.tree.DecisionTreeRegressor" ] ]
kg16/ODETTE
[ "ee407f766a93b09abea9ab8713a890386496b9c4" ]
[ "event_dataset.py" ]
[ "import os\nimport random\nfrom collections import Counter\nimport numpy as np\nimport torch\nimport copy\nimport corenlp\nimport stanfordnlp\n\nclass EventReader:\n\n def __init__(self):\n pass\n\n def read_events(self, root_dir, files):\n '''\n :param root_dir: Directory containing .tsv files with token-level event annotations\n :param files: List of files to be included in the dataset (used to pass train/dev/test splits)\n '''\n reader = open(os.path.join(root_dir, files), \"r\")\n file_list = []\n for line in reader:\n file_list.append(line.strip()+\".tsv\")\n reader.close()\n\n sentences = []\n events = []\n\n cur_sentence = []\n cur_events = []\n for file in file_list:\n reader = open(os.path.join(root_dir, file), \"r\")\n for line in reader:\n if len(line) > 0:\n if line == '\\n':\n if cur_sentence:\n sentences.append(cur_sentence)\n events.append(cur_events)\n cur_sentence = []\n cur_events = []\n else:\n if len(line.strip().split('\\t')) == 1:\n continue\n word, event = line.strip().split('\\t')\n cur_sentence.append(word.lower())\n cur_events.append(event)\n if cur_sentence:\n sentences.append(cur_sentence)\n events.append(cur_events)\n\n return sentences, events\n\n def create_padded_batches(self, sentences, events, batch_size, use_cuda, shuffle, is_bert=False, inst_weights=None):\n combined = list(zip(sentences, events))\n if inst_weights is not None:\n combined = list(zip(sentences, events, inst_weights))\n if shuffle:\n random.shuffle(combined)\n shuffled_sents, shuffled_events = [], []\n if inst_weights is not None:\n shuffled_sents, shuffled_events, shuffled_weights = zip(*combined)\n shuffled_weights = list(shuffled_weights)\n else:\n shuffled_sents, shuffled_events = zip(*combined)\n shuffled_events = list(shuffled_events)\n shuffled_sents = list(shuffled_sents)\n\n batches = []\n\n for i in range(0, len(shuffled_sents), batch_size):\n start = i\n end = min(len(shuffled_sents), start+batch_size)\n cur_sents = shuffled_sents[start:end]\n cur_events = shuffled_events[start:end]\n cur_weights = None\n if inst_weights is not None:\n cur_weights = shuffled_weights[start:end]\n\n if cur_weights is not None:\n combined = list(zip(cur_sents, cur_events, cur_weights))\n else:\n combined = list(zip(cur_sents, cur_events))\n combined = list(reversed(sorted(combined, key=lambda x: len(x[0]))))\n if cur_weights is not None:\n cur_sents, cur_events, cur_weights = zip(*combined)\n cur_weights = list(cur_weights)\n else:\n cur_sents, cur_events = zip(*combined)\n cur_sents = list(cur_sents)\n cur_events = list(cur_events)\n cur_lengths = [len(x) for x in cur_sents]\n cur_masks = []\n\n max_seq_len = cur_lengths[0]\n for i in range(len(cur_sents)):\n if not is_bert:\n cur_sents[i] = cur_sents[i] + [0] * (max_seq_len - cur_lengths[i])\n else:\n cur_sents[i] = cur_sents[i] + [[0] * len(cur_sents[i][0]) for j in range(max_seq_len - cur_lengths[i])]\n cur_events[i] = cur_events[i] + [0] * (max_seq_len - cur_lengths[i])\n cur_masks.append([1] * cur_lengths[i] + [0] * (max_seq_len - cur_lengths[i]))\n\n if not is_bert:\n if not use_cuda:\n if cur_weights is not None:\n batches.append([torch.LongTensor(cur_sents), torch.FloatTensor(cur_events), torch.LongTensor(cur_lengths), torch.FloatTensor(cur_masks), torch.FloatTensor(cur_weights)])\n else:\n batches.append([torch.LongTensor(cur_sents), torch.FloatTensor(cur_events), torch.LongTensor(cur_lengths), torch.FloatTensor(cur_masks)])\n else:\n if cur_weights is not None:\n batches.append([torch.cuda.LongTensor(cur_sents), torch.cuda.FloatTensor(cur_events), torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks), torch.cuda.FloatTensor(cur_weights)])\n else:\n batches.append([torch.cuda.LongTensor(cur_sents), torch.cuda.FloatTensor(cur_events), torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks)])\n else:\n if not use_cuda:\n if cur_weights is not None:\n batches.append([torch.FloatTensor(cur_sents), torch.FloatTensor(cur_events), torch.LongTensor(cur_lengths), torch.FloatTensor(cur_masks), torch.FloatTensor(cur_weights)])\n else:\n batches.append([torch.FloatTensor(cur_sents), torch.FloatTensor(cur_events), torch.LongTensor(cur_lengths), torch.FloatTensor(cur_masks)])\n else:\n #for i in range(len(cur_masks)):\n # print('{} {} {}'.format(len(cur_sents[i]), len(cur_events[i]), len(cur_masks[i])))\n if cur_weights is not None:\n batches.append([torch.cuda.FloatTensor(cur_sents), torch.cuda.FloatTensor(cur_events), torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks), torch.cuda.FloatTensor(cur_weights)])\n else:\n batches.append([torch.cuda.FloatTensor(cur_sents), torch.cuda.FloatTensor(cur_events), torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks)])\n\n return batches\n\n def create_pos_padded_batches(self, sentences, pos, events, batch_size, use_cuda, shuffle):\n combined = list(zip(sentences, pos, events))\n if shuffle:\n random.shuffle(combined)\n shuffled_sents, shuffled_pos, shuffled_events = zip(*combined)\n shuffled_events = list(shuffled_events)\n shuffled_pos = list(shuffled_pos)\n shuffled_sents = list(shuffled_sents)\n\n batches = []\n for i in range(0, len(shuffled_sents), batch_size):\n start = i\n end = min(len(shuffled_sents), start+batch_size)\n cur_sents = shuffled_sents[start:end]\n cur_pos = shuffled_pos[start:end]\n cur_events = shuffled_events[start:end]\n\n combined = list(zip(cur_sents, cur_pos, cur_events))\n combined = list(reversed(sorted(combined, key=lambda x: len(x[0]))))\n cur_sents, cur_pos, cur_events = zip(*combined)\n cur_sents = list(cur_sents)\n cur_pos = list(cur_pos)\n cur_events = list(cur_events)\n cur_lengths = [len(x) for x in cur_sents]\n cur_masks = []\n\n max_seq_len = cur_lengths[0]\n\n for i in range(len(cur_sents)):\n cur_sents[i] = cur_sents[i] + [0] * (max_seq_len - cur_lengths[i])\n cur_pos[i] = cur_pos[i] + [0] * (max_seq_len - cur_lengths[i])\n cur_events[i] = cur_events[i] + [0] * (max_seq_len - cur_lengths[i])\n cur_masks.append([1] * cur_lengths[i] + [0] * (max_seq_len - cur_lengths[i]))\n\n if not use_cuda:\n batches.append([torch.LongTensor(cur_sents), torch.LongTensor(cur_pos), torch.FloatTensor(cur_events), torch.LongTensor(cur_lengths), torch.FloatTensor(cur_masks)])\n else:\n batches.append([torch.cuda.LongTensor(cur_sents), torch.cuda.LongTensor(cur_pos), torch.cuda.FloatTensor(cur_events), torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks)])\n\n return batches\n\n def construct_vocab(self, sequences):\n counter = Counter()\n for sequence in sequences:\n counter.update(sequence)\n vocab = [\"<PAD>\", \"<UNK>\"] + list(counter.keys())\n print(len(vocab))\n vocab = dict(list(zip(vocab, range(len(vocab)))))\n return vocab\n\n def construct_integer_sequences(self, sequences, vocab):\n int_sequences = []\n for sequence in sequences:\n new_sequence = []\n for word in sequence:\n if word in vocab:\n new_sequence.append(vocab[word])\n else:\n if \"<UNK>\" in vocab:\n new_sequence.append(vocab[\"<UNK>\"])\n else:\n new_sequence.append(vocab[\"<unk>\"]) # For compatibility of this function with models where unk is lowercase\n int_sequences.append(new_sequence)\n return int_sequences\n\n\n# Write a sentence-reader class to read sentences with their domain\nclass SentenceReader:\n\n def __init__(self):\n pass\n\n def read_unlabeled_sents(self, root_dir):\n sentences = []\n domains = []\n for file in os.listdir(root_dir):\n reader = open(os.path.join(root_dir, file), \"r\")\n for line in reader:\n if line == '\\n':\n continue\n sentences.append([x.lower() for x in line.strip().split()])\n domains.append(0)\n return sentences, domains\n\n def read_unlabeled_sents_as_docs(self, root_dir):\n files = []\n filenames = []\n for file in os.listdir(root_dir):\n filenames.append(file)\n cur_file = []\n reader = open(os.path.join(root_dir, file), \"r\")\n for line in reader:\n if line == \"\\n\":\n continue\n cur_file.append([x.lower() for x in line.strip().split()])\n files.append(cur_file)\n reader.close()\n return filenames, files\n\n def read_labeled_sents(self, sents):\n new_sents = copy.deepcopy(sents)\n domains = [1]*len(new_sents)\n return new_sents, domains\n\n def create_padded_batches(self, sentences, domains, batch_size, use_cuda, shuffle, is_bert=False):\n combined = list(zip(sentences, domains))\n if shuffle:\n random.shuffle(combined)\n shuffled_sents, shuffled_domains = zip(*combined)\n shuffled_domains = list(shuffled_domains)\n shuffled_sents = list(shuffled_sents)\n\n batches = []\n for i in range(0, len(shuffled_sents), batch_size):\n start = i\n end = min(len(shuffled_sents), start + batch_size)\n cur_sents = shuffled_sents[start:end]\n cur_domains = shuffled_domains[start:end]\n\n combined = list(zip(cur_sents, cur_domains))\n combined = list(reversed(sorted(combined, key=lambda x: len(x[0]))))\n cur_sents, cur_domains = zip(*combined)\n cur_sents = list(cur_sents)\n cur_domains = list(cur_domains)\n cur_lengths = [len(x) for x in cur_sents]\n cur_masks = []\n\n max_seq_len = cur_lengths[0]\n\n for i in range(len(cur_sents)):\n if not is_bert:\n cur_sents[i] = cur_sents[i] + [0] * (max_seq_len - cur_lengths[i])\n else:\n cur_sents[i] = cur_sents[i] + [[0] * len(cur_sents[i][0]) for j in range(max_seq_len - cur_lengths[i])]\n cur_masks.append([1] * cur_lengths[i] + [0] * (max_seq_len - cur_lengths[i]))\n\n if not is_bert:\n if not use_cuda:\n batches.append(\n [torch.LongTensor(cur_sents), torch.LongTensor(cur_domains), torch.LongTensor(cur_lengths),\n torch.FloatTensor(cur_masks)])\n else:\n batches.append([torch.cuda.LongTensor(cur_sents), torch.cuda.LongTensor(cur_domains),\n torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks)])\n else:\n if not use_cuda:\n batches.append(\n [torch.FloatTensor(cur_sents), torch.LongTensor(cur_domains), torch.LongTensor(cur_lengths),\n torch.FloatTensor(cur_masks)])\n else:\n batches.append([torch.cuda.FloatTensor(cur_sents), torch.cuda.LongTensor(cur_domains),\n torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks)])\n\n return batches\n\n def create_pos_padded_batches(self, sentences, pos, domains, batch_size, use_cuda, shuffle):\n combined = list(zip(sentences, pos, domains))\n if shuffle:\n random.shuffle(combined)\n shuffled_sents, shuffled_pos, shuffled_domains = zip(*combined)\n shuffled_domains = list(shuffled_domains)\n shuffled_pos = list(shuffled_pos)\n shuffled_sents = list(shuffled_sents)\n\n batches = []\n for i in range(0, len(shuffled_sents), batch_size):\n start = i\n end = min(len(shuffled_sents), start + batch_size)\n cur_sents = shuffled_sents[start:end]\n cur_pos = shuffled_pos[start:end]\n cur_domains = shuffled_domains[start:end]\n\n combined = list(zip(cur_sents, cur_pos, cur_domains))\n combined = list(reversed(sorted(combined, key=lambda x: len(x[0]))))\n cur_sents, cur_pos, cur_domains = zip(*combined)\n cur_sents = list(cur_sents)\n cur_pos = list(cur_pos)\n cur_domains = list(cur_domains)\n cur_lengths = [len(x) for x in cur_sents]\n cur_masks = []\n\n max_seq_len = cur_lengths[0]\n\n for i in range(len(cur_sents)):\n cur_sents[i] = cur_sents[i] + [0] * (max_seq_len - cur_lengths[i])\n cur_pos[i] = cur_pos[i] + [0] * (max_seq_len - cur_lengths[i])\n cur_masks.append([1] * cur_lengths[i] + [0] * (max_seq_len - cur_lengths[i]))\n\n if not use_cuda:\n batches.append(\n [torch.LongTensor(cur_sents), torch.LongTensor(cur_pos), torch.LongTensor(cur_domains), torch.LongTensor(cur_lengths),\n torch.FloatTensor(cur_masks)])\n else:\n batches.append([torch.cuda.LongTensor(cur_sents), torch.cuda.LongTensor(cur_pos), torch.cuda.LongTensor(cur_domains),\n torch.cuda.LongTensor(cur_lengths), torch.cuda.FloatTensor(cur_masks)])\n\n return batches\n\n# TODO: Replace models_dir with local directory containing Stanford CoreNLP English model download\nclass Parser:\n \n def __init__(self):\n self.pipeline = stanfordnlp.Pipeline(processors='tokenize,pos', lang='en', tokenize_pretokenized=True, \nmodels_dir=\"/data/hulab/kgoyal6/odette/stanford-corenlp-4.3.1\", treebank=\"en_ewt\")\n\n def parse_sequences(self, sequences):\n parse_outputs = []\n for sequence in sequences:\n annotations = self.pipeline([sequence])\n pos_tags = []\n for sentence in annotations.sentences:\n for token in sentence.words:\n pos_tags.append(token.pos)\n parse_outputs.append(pos_tags)\n return parse_outputs\n" ]
[ [ "torch.cuda.LongTensor", "torch.FloatTensor", "torch.LongTensor", "torch.cuda.FloatTensor" ] ]
fakhraddinJ/BigData
[ "85e5bfaca9b5062c2a706d734cc9d79e483f707c" ]
[ "2_tweets1_2_3.py" ]
[ "\"\"\"\r\nCreated on Thu Dec 22 19:31:52 2016\r\n\r\n@author: Fakhraddin Jaf\r\n\"\"\"\r\n \r\n#%% \r\nimport collections #High-performance container datatypes\r\nimport pandas as pd\r\n#pandas is an open source, BSD-licensed library providing high-performance,\r\n#easy-to-use data structures and data analysis tools for the Python programming language.\r\n\r\n\r\n#%% \r\n#Reads each column of the Tweet's file into seperate DataFrame.\r\ndf = pd.read_csv('tweets.tsv', \\\r\n names=['UID', 'user', 'tweet', 'date', 'latitude', 'longitude', 'location_name'],\r\n dtype=str, delimiter='\\t')\r\n\r\n#%% \r\nprint(\"\\n------------------ Task 1 ---------------------\\n\"\\\r\n \"Top 10 Trends:\\n\")\r\n#Creating a new string from df.tweet's DataFrame.\r\nall_tweets_str =( \" \".join(df.tweet)).split()\r\n\r\n#List of words need to be filter out:\r\ncommon_words = [\r\n\"rt\",\"RT\",\"in\",\"to\", \"at\",\"a\", \"is\", \"if\",\"of\",\"-\",\\\r\n\"with\",\"on\",\"but\", \"and\", \"the\", \"not\", \"use\",\"for\",\\\r\n\"will\", \"I\", \"be\", \"have\",\"has\",\"this\",\"so\",\"&\",\"The\"\\\r\n]\r\n\r\n#Creating a filtered list of tweets' words:\r\nall_tweets_words = [ \r\nx for x in all_tweets_str if x not in common_words and 'http' not in x ]\r\n\r\n# Making a list of (count, unique) tuples: \r\nuniques = []\r\nfor word in all_tweets_words:\r\n if word not in uniques:\r\n uniques.append(word)\r\n\r\ncounts = []\r\nfor unique in uniques:\r\n count = 0\r\n for word in all_tweets_words: \r\n if word == unique: \r\n count += 1 \r\n counts.append((count, unique))\r\n\r\ncounts.sort() # Sorting the list puts the lowest counts first.\r\ncounts.reverse() # Reverse it, putting the highest counts first.\r\n\r\n# Print the ten words with the highest counts.\r\nfor i in range(min(10, len(counts))):\r\n count, word = counts[i]\r\n print(\"%d) \" %(i+1) +\"%s -----> %d\" % (word, count))\r\n\r\n#%%\r\nprint(\"\\n\\n------------------ Task 2 ---------------------\")\r\n#Counting and printing the most active user:\r\nuser_numbers = collections.Counter(df.user)\r\nmost_active_user = (max(user_numbers, key=user_numbers.get))\r\nprint(\"Most active user: %s\" %most_active_user)\r\nprint(\"Number of his/her tweets: %s\" %user_numbers.get(most_active_user))\r\n\r\n\r\n#%%\r\nprint (\"\\n\\n------------------ Task 3 ---------------------\")\r\n#Counting the length of each tweet and making a list of them:\r\nTweet_Length = []\r\nfor s in df.tweet:\r\n Tweet_Length.append(len(s))\r\n\r\n#Getting the index of shortest tweet in the \"df\" DataFrame, \r\n#and printing the requested values of that line :\r\nind = Tweet_Length.index(min(Tweet_Length))\r\nprint(\"Shortest tweet is: \\\"%s\" %df.tweet[ind] + \"\\\"\")\r\nprint(\"By user: %s\" %df.user[ind])\r\nprint(\"Date: %s\" %df.date[ind])\r\nprint(\"Location: %s\" %df.location_name[ind])\r\nprint(\"Latitude: %s\" %df.latitude[ind])\r\nprint(\"Longitude: %s\\n\\n\" %df.longitude[ind])\r\n" ]
[ [ "pandas.read_csv" ] ]
bobbyscharmann/flypy
[ "39dce7decd9633e7d90bb4c77472c8c40aeda61c" ]
[ "examples/two_layer_nn.py" ]
[ "# Simple two layer neural network for playing with\n# Source: https://iamtrask.github.io/2015/07/12/basic-python-network/\nimport numpy as np\n\n# sigmoid function\ndef nonlin(x,deriv=False):\n if(deriv==True):\n return x*(1-x)\n return 1/(1+np.exp(-x))\n \n# input dataset\nX = np.array([ [0,0,1],\n [0,1,1],\n [1,0,1],\n [1,1,1] ])\n \n# output dataset \ny = np.array([[0,0,1,1]]).T\n\n# seed random numbers to make calculation\n# deterministic (just a good practice)\nnp.random.seed(1)\n\n# initialize weights randomly with mean 0\nsyn0 = 2*np.random.random((3,1)) - 1\n\nfor iter in range(100000):\n\n # forward propagation\n l0 = X\n l1 = nonlin(np.dot(l0,syn0))\n\n # how much did we miss?\n l1_error = y - l1\n\n # multiply how much we missed by the \n # slope of the sigmoid at the values in l1\n l1_delta = l1_error * nonlin(l1, True)\n\n # update weights\n syn0 += np.dot(l0.T,l1_delta)\n\nprint(\"Output After Training:\", l1)\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.random.seed", "numpy.exp", "numpy.random.random" ] ]
mfkiwl/Strain_2D
[ "a204dcafb804efb3050510ae7cb970bf657a22ed" ]
[ "Strain_Tools/strain/utilities.py" ]
[ "# A set of utility functions used throughout the Strain_2D library\nimport numpy as np\n\n\ndef get_float_range(string_range):\n \"\"\"\n :param string_range: format \"-125/-121/32/35\"\n :type string_range: string\n :returns: list of floats\n :rtype: list\n \"\"\"\n number_strings = string_range.split('/')\n float_range = [float(number_strings[0]), float(number_strings[1]),\n float(number_strings[2]), float(number_strings[3])];\n if float_range[1] <= float_range[0]:\n raise ValueError(\"Error! Given range is invalid\", float_range);\n if float_range[3] <= float_range[2]:\n raise ValueError(\"Error! Given range is invalid\", float_range);\n return float_range;\n\n\ndef get_string_range(float_range, x_buffer=0, y_buffer=0):\n \"\"\"\n Buffer is for the possible interface between pixel-node-registered and gridline-node-registered files\n\n :param float_range: list, [w, e, s, n]\n :type float_range: list\n :param x_buffer: possible interface between pixel-node-registered etc.\n :type x_buffer: float\n :param y_buffer: possible interface between pixel-node-registered etc.\n :type y_buffer: float\n :returns: string range\n :rtype: string\n \"\"\"\n string_range = str(float_range[0] - x_buffer) + '/' + str(float_range[1] + x_buffer) + '/' + \\\n str(float_range[2] - y_buffer) + '/' + str(float_range[3] + y_buffer);\n return string_range;\n\n\ndef get_float_inc(string_inc):\n \"\"\"\n :param string_inc: string, e.g., '0.04/0.04'\n :type string_inc: string\n :returns: list of floats\n :rtype: list\n \"\"\"\n number_incs = string_inc.split('/')\n float_inc = [float(number_incs[0]), float(number_incs[1])];\n return float_inc;\n\n\ndef get_string_inc(float_inc):\n \"\"\"\n :type float_inc: list\n :returns: string separated by slash, e.g., '0.04/0.04'\n :rtype: string\n \"\"\"\n string_inc = str(float_inc[0]) + '/' + str(float_inc[1]);\n return string_inc;\n\n\ndef get_gmt_range_inc(lons, lats):\n \"\"\"\n Take lons and lats associated with pixel-node-registered files read into Python using xarray.ds\n\n :param lons: list of pixel centers lons\n :type lons: np.array\n :param lats: list of pixel centers lats\n :type lats: np.array\n :returns: string range, string inc\n :rtype: string, string\n \"\"\"\n lon_inc = np.round(lons[1] - lons[0], 6)\n edge_of_west_pixel = np.round(lons[0] - lon_inc/2, 5);\n edge_of_east_pixel = np.round(lons[-1] + lon_inc/2, 5);\n lat_inc = np.round(lats[1] - lats[0], 6);\n edge_of_south_pixel = np.round(lats[0] - lat_inc/2, 5);\n edge_of_north_pixel = np.round(lats[-1] + lat_inc/2, 5);\n gmt_range_string = str(edge_of_west_pixel) + '/' + str(edge_of_east_pixel) + '/' + str(edge_of_south_pixel) + \\\n '/' + str(edge_of_north_pixel);\n gmt_inc_string = str(lon_inc) + '/' + str(lat_inc);\n return gmt_range_string, gmt_inc_string;\n\n\ndef mask_by_value(grid1, grid_maskingbasis, cutoff_value):\n \"\"\"\n Implement NAN-mask for one grid in all places where values are smaller than cutoff value in corresponding grid.\n\n :param grid1: usually azimuth deviations\n :type grid1: 2D array\n :param grid_maskingbasis: usually I2nd\n :type grid_maskingbasis: 2D array\n :param cutoff_value: cutoff for nans\n :type cutoff_value: float\n :returns: masked grid\n :rtype: 2D array\n \"\"\"\n (y, x) = np.shape(grid1);\n masked_vals = np.zeros(np.shape(grid1));\n for i in range(x):\n for j in range(y):\n if abs(grid_maskingbasis[j][i]) > cutoff_value:\n masked_vals[j][i] = grid1[j][i];\n else:\n masked_vals[j][i] = np.nan;\n return masked_vals;\n\n\n# --------- DEFENSIVE PROGRAMMING FOR COMPARING MULTIPLE GRIDS ------------------ #\n\ndef check_coregistered_shapes(strain_values_ds):\n \"\"\"\n Make sure arrays are of the same dimensions before attempting to produce any statistics\n\n :param strain_values_ds: xarray.DataSet of strain values from different calculations\n :returns: None\n \"\"\"\n x = np.array(strain_values_ds['x']);\n y = np.array(strain_values_ds['y']);\n for varname, da in strain_values_ds.data_vars.items():\n nparray = np.array(strain_values_ds[varname]);\n arrayshape = np.shape(nparray);\n assert (arrayshape == (len(y), len(x)), ValueError(\n \"Error! Not all arrays have the same shape! Cannot compare.\"));\n print(\"All methods have the same shape.\");\n return;\n\n\n# --------- GRID AND NAMED TUPLE UTILITIES ------------------ #\n\ndef make_grid(coordbox, inc):\n \"\"\"\n Assumption is a pixel-node-registered grid.\n :param coordbox: [float, float, float, float] corresponding to [W, E, S, N]\n :type coordbox: list\n :param inc: [float, float] corresponding to [xinc, yinc]\n :type inc: list\n :returns: 1d array of lons, 1d array of lats, 2d array of zeros\n \"\"\"\n lonmin, lonmax = coordbox[0], coordbox[1]\n latmin, latmax = coordbox[2], coordbox[3]\n lons = np.arange(lonmin, lonmax+0.00001, inc[0])\n lats = np.arange(latmin, latmax+0.00001, inc[1])\n grid = np.zeros((len(lats), len(lons)));\n return lons, lats, grid\n\n\ndef getVels(velField):\n \"\"\"Read velocities from a NamedTuple\"\"\"\n lon, lat, e, n, se, sn = [], [], [], [], [], [];\n for item in velField:\n lon.append(item.elon)\n lat.append(item.nlat)\n e.append(item.e)\n n.append(item.n)\n se.append(item.se)\n sn.append(item.sn)\n return np.array(lon), np.array(lat), np.array(e), np.array(n), np.array(se), np.array(sn)\n" ]
[ [ "numpy.round", "numpy.array", "numpy.arange", "numpy.shape" ] ]
Jh123x/Orbital
[ "6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9" ]
[ "gym_invaders/trainingDQN.py" ]
[ "#Python Modules\nimport os\n# Neural Network Modules\nimport torch\n#Gym Environment Dependencies\nimport gym\n\n# In house dependencies\nimport gym_game\nfrom ai_invader.agent import DQNAgent\nfrom ai_invader.model import DQNCNN\n\n'''\nExample of training script\n'''\n#Retrieve Training Environment\n# env = gym.make(\"Invader-v0\")\ndef make_env():\n envir = gym.make(\"Classic-v0\")\n return envir\ndef main():\n print(\"The size of frame is: \", make_env().observation_space.shape)\n print(\"No. of Actions: \", make_env().action_space.n)\n\n # Initialise device (Uses cuda if possible to speed up training)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # Print cuda\n print('Device:', device)\n\n # Shape of nn\n INPUT_SHAPE = (4, 84, 84)\n\n # Determine rendering of GUI\n\n # AI vars\n ACTION_SIZE = 6\n SEED = 0\n GAMMA = 0.99 # discount factor\n BUFFER_SIZE = 10000 # replay buffer size\n BATCH_SIZE = 64 # Update batch size\n LR = 0.0001 # learning rate\n TAU = 1e-3 # for soft update of target parameters\n UPDATE_EVERY = 100 # how often to update the network\n REPLAY = 6 * BATCH_SIZE # After which thershold replay to be started\n\n agent = DQNAgent(INPUT_SHAPE, ACTION_SIZE, SEED, device, DQNCNN, GAMMA, LR, TAU, BATCH_SIZE,\n UPDATE_EVERY, REPLAY,BUFFER_SIZE, make_env, path = 'model', num_epochs = 0)\n agent.train(3,render = True)\n\n#Initialise the DQNagent\nif __name__ == '__main__':\n main()\n\n\n\n" ]
[ [ "torch.cuda.is_available" ] ]
nizamphoenix/kaggle
[ "a9c993d0441a6d9260d605a630f95d938e6329db" ]
[ "Jigsaw2020/read_data.py" ]
[ "import pandas as pd\n\ndef read_data():\n df_train = pd.read_csv(\"/kaggle/input/jigsaw-public-dataset-/train.csv\", usecols=[\"comment_text\", \"toxic\"])\n df_train = df_train.sample(frac=1).reset_index(drop=True)#shuffling\n df_valid = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv')\n test = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/test.csv')\n sub = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')\n return df_train,df_valid,test\n" ]
[ [ "pandas.read_csv" ] ]
edazizovv/financial_news_re
[ "d7950da28d77ade29628f3b2fa266f5059527f52" ]
[ "just_chillin.py" ]
[ "#\nimport pytz\nimport pandas\nimport datetime\nfrom matplotlib import pyplot\n\n#\nfrom m_utils.transform import lag_it\nfrom mpydge.wrap.new_data import DataHandler, MaskHandler\nfrom mpydge.chaotic.the_new_pipe import SimplePipe\nfrom new_insane import Insane, Neakt\nfrom new_insane import XBR\nfrom new_insane import MAE, R2_adj\nfrom reporter import reported_n_muted\n\nfrom sell_stone import MutualInfoRazor\n\nprint(\"Process Started:\\t {0}\".format(datetime.datetime.utcnow().replace(tzinfo=pytz.utc)))\n#\nd = './result.csv'\n\ndataset = pandas.read_csv(d, sep=';')\n\ndataset = dataset.set_index(['time'])\n# dataset = dataset.sort_index(ascending=True)\n\n# dataset = lag_it(dataset, n_lags=1, exactly=False)\ndataset = dataset.dropna()\n\ndogtag = dataset.copy()\ndrops = ['id', 'title', 'news_time'] + ['ticker'] # ticker should be embedded!\ndogtag = dogtag.drop(columns=drops)\ncols_to_drop = [x for x in dogtag.columns.values if ('LAG0' in x and 'close' not in x)]\ndogtag = dogtag.drop(columns=cols_to_drop)\n\ntarget = 'close_LAG0'\nqualitative = []\nquantitative = [x for x in dogtag.columns.values if 'LAG0' not in x]\n\nX_names = [qualitative + quantitative + [target], qualitative + quantitative]\nY_names = [target, target]\noutputs_0 = qualitative + quantitative + [target]\noutput_spec = [{x: 'float64' for x in outputs_0}, {target: 'float64'}]\n\"\"\"\ng_mask = [x in quantitative for x in X_names[0]] # !\ntarget_mask = [x == target for x in X_names[0]]\n\n# they are similar both for target and factors, remove redundant, pls\nmaskeds = [{Insane(my_name='Nothing'): g_mask, Insane(my_name='Nothing'): target_mask},\n {Insane(my_name='TanhLnPct'): g_mask, Insane(my_name='TanhLnPct'): target_mask},\n {Insane(my_name='LnPct'): g_mask, Insane(my_name='LnPct'): target_mask},\n {Insane(my_name='Whiten'): g_mask, Insane(my_name='Whiten'): target_mask},\n {Insane(my_name='TanhWhiten'): g_mask, Insane(my_name='TanhWhiten'): target_mask}]\n\nmaskeds_coded = ['Nothing', 'TanhLnPct', 'LnPct', 'Whiten', 'TanhWhiten']\n\n\nj = 0\nnk_args = {'masked': maskeds[j], 'coded': maskeds_coded[j]}\nnk = Neakt(**nk_args)\n\nnk.fit(X=dogtag.values, Y=None)\npk = nk.predict(X=dogtag.values)\n\"\"\"\n\nprint('doin the stuff')\n\n\"\"\"\npre_kwargs = {'percentile': 50}\nmir = MutualInfoRazor(**pre_kwargs)\nmir.fit(X=dogtag[qualitative + quantitative].values, Y=dogtag[target].values)\n\"\"\"\n\nfrom sklearn.feature_selection import mutual_info_regression\nmired = mutual_info_regression(X=dogtag[qualitative + quantitative].values,\n y=dogtag[target].values,\n discrete_features='auto',\n n_neighbors=3,\n copy=True)\n" ]
[ [ "pandas.read_csv", "sklearn.feature_selection.mutual_info_regression" ] ]
adrianurdar/100DaysOfCode-Bootcamp
[ "af6340a75979f15cb26687931c64aa8e072de242" ]
[ "Day-025/Squirrel-Census/main.py" ]
[ "import pandas\n\n# Read the file\ndata = pandas.read_csv(\"2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv\")\n\n# Count each fur color\ngray_squirrel_count = len(data[data[\"Primary Fur Color\"] == \"Gray\"])\ncinnamon_squirrel_count = len(data[data[\"Primary Fur Color\"] == \"Cinnamon\"])\nblack_squirrel_count = len(data[data[\"Primary Fur Color\"] == \"Black\"])\n\n# Create data dict\ndata_dict = {\n \"Fur color\": [\"Gray\", \"Cinnamon\", \"Black\"],\n \"Count\": [gray_squirrel_count, cinnamon_squirrel_count, black_squirrel_count]\n}\n\n# Save the new file\ndf = pandas.DataFrame(data_dict)\ndf.to_csv(\"squirrel_count.csv\")\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
xarion/EPM
[ "3d375093f21d482cb504791b783af2a41f044cbb" ]
[ "features.py" ]
[ "import os\nfrom glob import glob\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms as T\nfrom torchvision.transforms import Normalize\n\nfrom config import USE_CUDA, IMAGE_MEAN, IMAGE_STD\nfrom dataset import get_standard_image_transform\nfrom models import create_model, EncodingSavingHook\n\n\ndef save_features(model_name, image_class, dataset, dataset_name):\n encoding_saving_hook = EncodingSavingHook(dataset_name, model_name, image_class)\n model, features = create_model(model_name)\n features.register_forward_hook(encoding_saving_hook.hook)\n\n dataloader = DataLoader(dataset, batch_size=50, pin_memory=True)\n\n for i, (images, labels) in enumerate(iter(dataloader)):\n if USE_CUDA:\n images = images.cuda()\n model(images)\n encoding_saving_hook.save_encodings()\n\n\ndef save_features_from_folder(model_name, image_class, dataset_name, folder=\"./\"):\n dataset = PerturbedImagesDataset(folder)\n save_features(model_name, image_class, dataset, dataset_name)\n\n\nclass PerturbedImagesDataset(Dataset):\n\n def __init__(self, root_dir):\n self.root_dir = root_dir\n self.files_list = np.array([f for f in glob(root_dir + \"/*.jpg\")])\n super().__init__()\n self.transform = get_standard_image_transform()\n\n def __len__(self):\n return len(self.files_list)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = os.path.join(self.root_dir,\n self.files_list[idx])\n image = Image.open(img_name).convert('RGB')\n torch_image_data = self.transform(image)\n return torch_image_data, 0\n" ]
[ [ "torch.is_tensor", "torch.utils.data.DataLoader" ] ]
jmixon11/StockPredictionAlg
[ "30fc2b01a26b3d55265d59d83b546014d9cfdc25" ]
[ "ExampleMPC.py" ]
[ "from Tkinter import *\nfrom sklearn import tree\n\ndef click():\n\tentered_text=textentry.get()\n\toutput.delete(0.0, END)\n\tword1 = \"Close: \"\n\toutput.insert(END, word1)\n\tclose = clf1.predict([[entered_text]])\n\toutput.insert(END, close)\n\tword2 = \" High: \"\n\toutput.insert(END, word2)\n\thigh = clf2.predict([[entered_text]])\n\toutput.insert(END, high)\n\tword3 = \" Low: \"\n\toutput.insert(END, word3)\n\tlow = clf3.predict([[entered_text]])\n\toutput.insert(END, low)\n\nwindow = Tk()\nwindow.title(\"The STAC App\")\nwindow.configure(width=600, height=700, bg=\"blue\")\n\nLabel (window, text=\"Stock: MPC\", bg=\"blue\", fg=\"white\", font=\"plump 44 bold\") .grid(row=1, column=0, sticky=W)\nLabel (window, text=\"Enter Today's Opening Price:\", bg=\"blue\", fg=\"white\", font=\"plump 20 bold\") .grid(row=3, column=0, sticky=W)\n\ntextentry = Entry(window, width=30, bg=\"white\")\ntextentry.grid(row=4, column=0, sticky=W)\n\nButton(window, text=\"Submit\", command=click, font=\"plump 18\") .grid(row=4, column=0, sticky=E)\n\nLabel (window, text=\"Estimates:\", bg=\"blue\", fg=\"white\", font=\"plump 20 bold\") .grid(row=6, column=0, sticky=W)\n\noutput = Text(window, width=60, height=3, bg=\"white\")\noutput.grid (row=7, column=0, columnspan=2, sticky=W)\n\nclf1 = tree.DecisionTreeClassifier()\n\nO = [[73.86],\t[73.46], [74.50],\t[74.01],\t[73.64],\n\t [74.71],\t[75.61],\t[75.61],\t[77.89],\t[78.94],\n\t [78.84],\t[79.52],\t[80.00],\t[81.76],\t[82.27],\n\t [79.78],\t[79.40],\t[78.77],\t[77.01],\t[77.90],\n\t [78.17],\t[77.77],\t[79.16],\t[79.98],\t[80.58],\n\t [77.60],\t[77.10],\t[77.85],\t[77.84],\t[77.72],\n\t [77.94],\t[76.17],\t[76.43],\t[77.23],\t[76.90],\n\t [76.28],\t[73.28],\t[74.28],\t[75.47],\t[82.56],\n\t [80.70],\t[79.19],\t[80.32],\t[79.98],\t[79.40],\n\t [78.80],\t[79.07],\t[77.79],\t[74.98],\t[74.46],\n\t [74.30],\t[73.71],\t[72.94],\t[72.47],\t[72.71],\n\t [73.24],\t[71.59],\t[72.70],\t[72.95],\t[72.23],\n\t [73.13],\t[73.88],\t[72.10],\t[73.40],\t[72.37],\n\t [71.81],\t[69.70],\t[69.40],\t[68.51],\t[69.56],\n\t [69.65],\t[69.38],\t[70.24],\t[69.73],\t[68.12],\n\t [66.96],\t[66.62],\t[64.88],\t[64.38],\t[64.27],\n\t [65.96],\t[66.16],\t[66.92],\t[66.88],\t[66.53],\n\t [66.29],\t[67.45],\t[67.60],\t[68.16],\t[65.46],\n\t [64.67],\t[64.48],\t[63.45],\t[64.91],\t[66.96],\n\t [62.93],\t[66.08],\t[68.33],\t[69.77],\t[69.24],\n\t [69.05],\t[71.51],\t[71.29],\t[72.08],\t[73.37],\n\t [73.31],\t[72.04],\t[71.66],\t[71.78],\t[71.11],\n\t [71.60],\t[70.47],\t[69.72],\t[68.79],\t[69.39],\n\t [69.07],\t[68.87],\t[68.66],\t[67.46],\t[66.11],\n\t [66.47],\t[66.43],\t[66.72],\t[66.55],\t[66.70],\n\t [65.70],\t[65.06],\t[66.03],\t[65.87],\t[64.95],\n\t [64.16],\t[64.30],\t[64.82],\t[64.65],\t[64.19],\n\t [63.10],\t[64.32],\t[63.42],\t[62.92],\t[62.82],\n\t [61.69],\t[62.01],\t[62.29],\t[61.95],\t[62.16],\n\t [62.35],\t[62.19],\t[62.11],\t[61.85],\t[61.51],\n\t [60.45],\t[62.32],\t[61.79],\t[61.98],\t[62.62],\n\t [63.29],\t[62.58],\t[62.34],\t[62.26],\t[61.06],\n\t [60.24],\t[59.78],\t[58.50],\t[57.68],\t[57.52],\n\t [56.96],\t[56.54],\t[57.48],\t[57.30],\t[56.42],\n\t [56.52],\t[55.74],\t[56.42],\t[56.89],\t[56.00],\n\t [56.31],\t[56.72],\t[56.13],\t[55.80],\t[56.48],\n\t [56.16],\t[55.53],\t[55.94],\t[55.46],\t[55.26],\n\t [54.61],\t[55.36],\t[55.00],\t[54.24],\t[54.43],\n\t [54.38],\t[53.75],\t[53.06],\t[52.36],\t[53.27],\n\t [54.00],\t[53.95],\t[53.48],\t[52.66],\t[52.63],\n\t [51.64],\t[53.58],\t[52.55],\t[52.55],\t[51.53],\n\t [52.17],\t[52.33],\t[51.47],\t[50.26],\t[49.43],\n\t [49.71],\t[50.07],\t[49.62],\t[50.72],\t[52.00],\n\t [52.25],\t[52.08],\t[52.03],\t[53.04],\t[54.43],\n\t [54.90],\t[55.59],\t[55.98],\t[56.05],\t[56.15],\n\t [56.11],\t[55.16],\t[55.92],\t[54.27],\t[56.39],\n\t [56.14],\t[55.57],\t[55.34],\t[55.76],\t[54.90],\n\t [54.62],\t[53.86],\t[53.81],\t[53.68],\t[54.74],\n\t [53.92],\t[53.95],\t[52.87],\t[53.14],\t[52.83],\n\t [52.63],\t[52.14],\t[52.89],\t[52.12],\t[51.40],\n\t [51.64],\t[51.19],\t[52.12],\t[52.93],\t[53.66],\n\t [53.81],\t[53.31],\t[53.10],\t[54.87],\t[54.48],\n\t [54.90],\t[53.55],\t[52.89],\t[52.84],\t[52.58],\n\t [52.57],\t[52.86],\t[52.43]]\n\nC = ['72.62',\t'72.48', '73.82',\t'74.52',\t'74.79',\n\t '73.56',\t'75.19',\t'74.88',\t'75.46',\t'77.77',\n\t '78.99',\t'79.15',\t'78.76',\t'80.33',\t'81.93',\n\t '81.92',\t'79.03',\t'79.55',\t'77.61',\t'77.00',\n\t '79.11',\t'78.87',\t'78.49',\t'78.85',\t'79.53',\n\t '80.71',\t'76.73',\t'77.29',\t'78.34',\t'77.33',\n\t '77.54',\t'77.11',\t'75.31',\t'76.50',\t'76.94',\n\t '77.46',\t'75.84',\t'72.90',\t'74.91',\t'81.43',\n\t '82.93',\t'80.23',\t'79.78',\t'79.85',\t'79.76',\n\t '79.40',\t'78.78',\t'78.55',\t'77.52',\t'74.59',\n\t '74.25',\t'74.02',\t'73.70',\t'71.67',\t'72.45',\n\t '73.41',\t'72.57',\t'72.60',\t'71.80',\t'73.11',\n\t '71.82',\t'72.88',\t'73.68',\t'71.28',\t'72.97',\n\t '73.21',\t'71.46',\t'69.21',\t'69.68',\t'68.63',\n\t '69.36',\t'69.24',\t'69.27',\t'70.22',\t'69.03',\n\t '67.68',\t'67.39',\t'66.16',\t'65.19',\t'64.79',\n\t '64.06',\t'65.60',\t'66.29',\t'66.81',\t'66.46',\n\t '66.27',\t'66.25',\t'67.93',\t'67.80',\t'68.16',\n\t '66.08',\t'64.77',\t'63.84',\t'62.79',\t'65.09',\n\t '65.69',\t'64.58',\t'67.41',\t'68.69',\t'69.27',\n\t '69.13',\t'70.15',\t'71.74',\t'71.08',\t'71.59',\n\t '73.12',\t'73.16',\t'71.89',\t'71.49',\t'71.98',\n\t '70.78',\t'71.42',\t'70.25',\t'69.51',\t'69.00',\n\t '69.39',\t'69.34',\t'68.68',\t'68.61',\t'67.18',\n\t '65.98',\t'66.40',\t'66.42',\t'66.84',\t'66.36',\n\t '66.33',\t'65.66',\t'64.75',\t'66.01',\t'65.75',\n\t '64.66',\t'64.15',\t'64.49',\t'64.46',\t'64.75',\n\t '64.00',\t'63.46',\t'64.54',\t'63.30',\t'62.84',\n\t '62.63',\t'61.40',\t'62.22',\t'61.88',\t'62.03',\n\t '62.01',\t'62.29',\t'61.94',\t'62.27',\t'62.01',\n\t '61.50',\t'62.87',\t'62.20',\t'61.72',\t'61.66',\n\t '63.22',\t'63.11',\t'62.35',\t'62.20',\t'62.17',\n\t '60.89',\t'59.74',\t'59.06',\t'58.47',\t'57.27',\n \t '56.44',\t'56.99',\t'56.34',\t'57.49',\t'57.10',\n\t '56.75',\t'56.51',\t'55.72',\t'56.37',\t'56.55',\n\t '56.22',\t'56.25',\t'56.14',\t'56.02',\t'56.25',\n\t '56.31',\t'56.14',\t'55.89',\t'56.08',\t'55.47',\n\t '55.24',\t'55.38',\t'55.53',\t'54.73',\t'54.37',\n\t '54.43',\t'54.20',\t'53.58',\t'52.96',\t'52.34',\n\t '53.21',\t'53.77',\t'53.75',\t'53.42',\t'52.63',\n\t '52.37',\t'51.79',\t'54.28',\t'52.45',\t'52.33',\n\t '51.43',\t'52.52',\t'51.72',\t'51.13',\t'50.17',\n\t '49.67',\t'49.45',\t'50.11',\t'49.70',\t'51.06',\n\t '51.89',\t'52.31',\t'52.16',\t'52.21',\t'52.99',\n\t '54.44',\t'54.90',\t'55.79',\t'55.89',\t'56.20',\n\t '56.53',\t'55.99',\t'54.98',\t'56.15',\t'55.66',\n\t '56.02',\t'55.56',\t'55.34',\t'55.40',\t'55.55',\n\t '54.83',\t'54.31',\t'53.81',\t'53.84',\t'53.76',\n \t '54.20',\t'54.08',\t'54.07',\t'52.85',\t'52.90',\n\t '53.10',\t'52.33',\t'51.81',\t'52.62',\t'51.85',\n\t '51.33',\t'51.50',\t'51.17',\t'51.73',\t'52.85',\n\t '54.29',\t'53.77',\t'52.99',\t'53.35',\t'55.03',\n\t '54.47',\t'54.71',\t'53.43',\t'52.97',\t'53.09',\n\t '52.80',\t'52.70',\t'53.13']\n\nclf1 = clf1.fit(O, C)\n\nclf2 = tree.DecisionTreeClassifier()\n\nH = ['74.18',\t'73.80', '74.91',\t'75.15',\t'75.23',\n\t '75.46',\t'76.19',\t'75.79',\t'78.31',\t'78.94',\n\t '79.34',\t'79.61',\t'80.15',\t'82.20',\t'83.33',\n\t '82.03',\t'82.69',\t'80.47',\t'78.84',\t'77.97',\n\t '79.26',\t'78.93',\t'80.36',\t'80.11',\t'80.72',\n\t '80.92',\t'77.27',\t'78.64',\t'78.60',\t'77.97',\n\t '78.25',\t'78.11',\t'76.92',\t'78.97',\t'77.51',\n\t '77.72',\t'77.63',\t'74.43',\t'79.01',\t'82.63',\n\t '83.27',\t'80.28',\t'81.63',\t'80.22',\t'80.04',\n\t '79.81',\t'79.98',\t'78.82',\t'77.92',\t'75.00',\n\t '74.62',\t'74.47',\t'74.13',\t'72.99',\t'73.46',\n\t '73.95',\t'72.82',\t'72.78',\t'73.38',\t'73.64',\n\t '73.43',\t'74.92',\t'74.06',\t'74.48',\t'73.89',\n\t '73.88',\t'71.82',\t'69.57',\t'69.98',\t'69.77',\n\t '70.02',\t'70.29',\t'70.48',\t'70.68',\t'69.07',\n\t '68.11',\t'67.87',\t'66.49',\t'65.42',\t'65.58',\n\t '65.96',\t'67.08',\t'67.05',\t'67.17',\t'67.65',\n\t '67.48',\t'67.88',\t'68.43',\t'68.82',\t'68.42',\n\t '66.40',\t'65.70',\t'64.58',\t'65.34',\t'67.64',\n\t '65.94',\t'67.43',\t'69.45',\t'70.49',\t'69.64',\n\t '69.58',\t'71.74',\t'72.02',\t'72.23',\t'73.53',\n\t '73.50',\t'73.20',\t'72.03',\t'72.52',\t'72.48',\n\t '71.83',\t'71.50',\t'70.43',\t'69.63',\t'69.55',\n\t '69.68',\t'69.45',\t'69.05',\t'68.96',\t'67.25',\n\t '66.74',\t'66.56',\t'66.88',\t'67.07',\t'66.85',\n\t '66.79',\t'65.86',\t'66.12',\t'66.38',\t'66.19',\n \t '65.04',\t'64.41',\t'65.29',\t'65.08',\t'64.94',\n\t '64.07',\t'64.46',\t'65.41',\t'64.32',\t'63.06',\n\t '62.78',\t'62.24',\t'62.38',\t'62.45',\t'62.25',\n\t '62.40',\t'62.36',\t'62.53',\t'62.45',\t'62.57',\n\t '61.81',\t'63.08',\t'62.55',\t'62.28',\t'62.73',\n\t '63.41',\t'63.12',\t'62.53',\t'62.78',\t'62.34',\n\t '61.90',\t'60.16',\t'59.57',\t'58.93',\t'57.92',\n\t '57.14',\t'57.27',\t'57.49',\t'57.74',\t'57.14',\n\t '56.86',\t'56.52',\t'56.57',\t'56.92',\t'56.90',\n\t '56.54',\t'57.02',\t'56.49',\t'56.40',\t'56.90',\n\t '56.69',\t'56.31',\t'56.27',\t'56.54',\t'55.77',\n\t '55.78',\t'55.65',\t'55.81',\t'55.01',\t'54.89',\n\t '54.70',\t'54.33',\t'53.73',\t'53.12',\t'53.33',\n\t '54.08',\t'54.48',\t'53.84',\t'53.70',\t'53.10',\n\t '52.56',\t'53.65',\t'54.32',\t'52.86',\t'52.54',\n\t '52.52',\t'52.97',\t'52.20',\t'51.54',\t'50.38',\n\t '49.96',\t'50.10',\t'50.54',\t'50.89',\t'52.06',\n\t '52.36',\t'52.69',\t'52.34',\t'53.04',\t'54.69',\n\t '55.06',\t'55.70',\t'56.45',\t'56.26',\t'56.69',\n\t '56.81',\t'56.20',\t'56.20',\t'56.46',\t'56.61',\n\t '56.64',\t'55.87',\t'55.57',\t'55.83',\t'55.71',\n\t '54.84',\t'54.49',\t'54.05',\t'53.88',\t'54.80',\n\t '54.57',\t'54.51',\t'54.15',\t'53.52',\t'53.07',\n\t '53.36',\t'52.61',\t'53.02',\t'52.86',\t'52.35',\n\t '52.06',\t'51.65',\t'52.12',\t'52.995',\t'53.81',\n\t '54.37',\t'53.77',\t'53.33',\t'54.90',\t'55.05',\n\t '55.20',\t'54.97',\t'53.96',\t'53.17',\t'53.16',\n\t '53.16',\t'53.13',\t'53.57']\n\nclf2 = clf2.fit(O, H)\n\nclf3 = tree.DecisionTreeClassifier()\n\nL = ['72.56',\t'72.23', '73.30',\t'73.71',\t'73.25',\n\t '73.27',\t'75.08',\t'74.44',\t'75.22',\t'76.84',\n\t '78.03',\t'78.35',\t'78.76',\t'79.73',\t'81.55',\n\t '79.00',\t'79.03',\t'77.96',\t'76.53',\t'75.93',\n\t '77.55',\t'77.21',\t'78.25',\t'78.59',\t'79.10',\n\t '77.24',\t'75.93',\t'76.72',\t'77.52',\t'76.99',\n\t '76.62',\t'75.84',\t'74.28',\t'76.46',\t'76.09',\n\t '74.52',\t'72.94',\t'70.46',\t'74.10',\t'81.31',\n\t '79.96',\t'78.18',\t'79.12',\t'78.88',\t'79.14',\n\t '78.21',\t'78.51',\t'76.60',\t'74.78',\t'73.97',\n\t '73.42',\t'73.41',\t'72.86',\t'71.65',\t'71.53',\n\t '72.73',\t'70.31',\t'71.38',\t'70.84',\t'72.13',\n\t '71.59',\t'72.40',\t'72.08',\t'71.17',\t'72.11',\n\t '71.71',\t'69.44',\t'68.72',\t'68.50',\t'67.45',\n\t '69.15',\t'68.76',\t'69.17',\t'69.43',\t'67.39',\n\t '66.68',\t'66.23',\t'64.60',\t'64.10',\t'64.14',\n\t '64.05',\t'65.59',\t'66.06',\t'65.76',\t'66.41',\n\t '66.13',\t'65.83',\t'66.98',\t'66.88',\t'65.18',\n\t '64.29',\t'64.02',\t'61.46',\t'62.77',\t'64.94',\n\t '62.26',\t'63.23',\t'66.81',\t'67.12',\t'68.21',\n\t '67.81',\t'69.97',\t'71.19',\t'71.01',\t'71.29',\n\t '72.64',\t'71.72',\t'71.09',\t'71.16',\t'70.92',\n\t '70.61',\t'70.47',\t'69.35',\t'68.45',\t'68.91',\n\t '69.01',\t'68.51',\t'68.40',\t'67.41',\t'65.98',\n\t '65.96',\t'66.16',\t'66.36',\t'66.41',\t'66.22',\n\t '65.42',\t'64.76',\t'64.68',\t'65.73',\t'64.70',\n\t '64.06',\t'63.80',\t'64.27',\t'64.33',\t'63.95',\n\t '63.00',\t'63.27',\t'63.15',\t'62.61',\t'61.59',\n\t '61.44',\t'60.95',\t'61.65',\t'61.60',\t'61.58',\n\t '61.26',\t'61.80',\t'61.75',\t'61.64',\t'61.15',\n\t '60.10',\t'61.95',\t'61.71',\t'61.61',\t'60.90',\n\t '62.56',\t'62.14',\t'62.10',\t'61.90',\t'60.87',\n\t '60.07',\t'58.83',\t'58.25',\t'57.40',\t'55.87',\n\t '55.89',\t'56.54',\t'56.26',\t'57.15',\t'56.17',\n\t '56.40',\t'55.73',\t'55.68',\t'56.29',\t'55.80',\n\t '56.14',\t'56.18',\t'56.04',\t'55.71',\t'56.14',\n\t '55.90',\t'55.25',\t'55.53',\t'55.28',\t'55.15',\n\t '54.61',\t'55.07',\t'54.98',\t'54.13',\t'54.26',\n\t '54.03',\t'53.38',\t'52.93',\t'52.26',\t'52.31',\n\t '52.70',\t'53.58',\t'52.59',\t'52.52',\t'52.34',\n\t '51.49',\t'50.52',\t'52.50',\t'52.00',\t'51.37',\n\t '51.12',\t'52.03',\t'51.36',\t'50.19',\t'49.33',\n\t '49.30',\t'49.45',\t'49.43',\t'49.68',\t'50.91',\n\t '51.76',\t'52.06',\t'51.69',\t'52.13',\t'52.83',\n\t '54.31',\t'54.85',\t'55.66',\t'55.75',\t'56.06',\n\t '55.90',\t'55.02',\t'54.93',\t'53.51',\t'55.64',\n\t '55.95',\t'55.33',\t'54.84',\t'54.81',\t'54.74',\n\t '54.22',\t'53.61',\t'53.60',\t'53.22',\t'53.72',\n\t '53.83',\t'53.91',\t'52.68',\t'52.62',\t'52.23',\n\t '52.62',\t'51.70',\t'51.72',\t'51.85',\t'51.08',\n\t '51.25',\t'50.74',\t'51.09',\t'51.18',\t'52.47',\n\t '53.81',\t'53.01',\t'52.75',\t'53.29',\t'54.34',\n\t '53.85',\t'53.46',\t'52.86',\t'52.47',\t'52.42',\n\t '52.44',\t'52.44',\t'52.38']\n\nclf3 = clf3.fit(O, L)\n\nwindow.mainloop()\n" ]
[ [ "sklearn.tree.DecisionTreeClassifier" ] ]
EmilioCC/gti770-student-framework
[ "3cd72da8fe78c7ecfc26c9e688cbe1b7deee353a" ]
[ "tests/commons/helpers/dataset/strategies/galaxy_dataset/test_galaxyDataSetLabelStrategy.py" ]
[ "from unittest import TestCase\n\nimport os\nimport numpy as np\n\nfrom commons.helpers.dataset.context import Context\nfrom commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy\n\n\nclass TestGalaxyDataSetLabelStrategy(TestCase):\n\n def setUp(self):\n self.path = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\"\n\n def test_load_dataset_no_oneHot(self):\n galaxy_data_set_strategy = GalaxyDataSetLabelStrategy()\n context = Context(galaxy_data_set_strategy)\n dataset = context.load_dataset(csv_file=self.path, one_hot=False, validation_size=np.float32(0.2))\n self.assertTrue(dataset.train._num_examples == 25091)\n\n def test_load_dataset_with_oneHot(self):\n galaxy_data_set_strategy = GalaxyDataSetLabelStrategy()\n context = Context(galaxy_data_set_strategy)\n dataset = context.load_dataset(csv_file=self.path, one_hot=True, validation_size=np.float32(0.2))\n self.assertTrue(dataset.train._num_examples == 25091)\n" ]
[ [ "numpy.float32" ] ]
Shubhranshu-Shekhar/pysindy
[ "e8a428da3dd13956a86b278094bdf2eb92eeeb7e" ]
[ "pysindy/optimizers/stlsq.py" ]
[ "import warnings\n\nimport numpy as np\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import ridge_regression\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom pysindy.optimizers import BaseOptimizer\n\n\nclass STLSQ(BaseOptimizer):\n \"\"\"Sequentially thresholded least squares algorithm.\n\n Attempts to minimize the objective function\n :math:`\\\\|y - Xw\\\\|^2_2 + \\\\alpha \\\\|w\\\\|^2_2`\n by iteratively performing least squares and masking out\n elements of the weight that are below a given threshold.\n\n Parameters\n ----------\n threshold : float, optional (default 0.1)\n Minimum magnitude for a coefficient in the weight vector.\n Coefficients with magnitude below the threshold are set\n to zero.\n\n alpha : float, optional (default 0.05)\n Optional L2 (ridge) regularization on the weight vector.\n\n max_iter : int, optional (default 20)\n Maximum iterations of the optimization algorithm.\n\n ridge_kw : dict, optional\n Optional keyword arguments to pass to the ridge regression.\n\n fit_intercept : boolean, optional (default False)\n Whether to calculate the intercept for this model. If set to false, no\n intercept will be used in calculations.\n\n normalize : boolean, optional (default False)\n This parameter is ignored when fit_intercept is set to False. If True,\n the regressors X will be normalized before regression by subtracting\n the mean and dividing by the l2-norm.\n\n copy_X : boolean, optional (default True)\n If True, X will be copied; else, it may be overwritten.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Weight vector(s).\n\n ind_ : array, shape (n_features,) or (n_targets, n_features)\n Array of 0s and 1s indicating which coefficients of the\n weight vector have not been masked out.\n\n history_ : list\n History of ``coef_``. ``history_[k]`` contains the values of\n ``coef_`` at iteration k of sequentially thresholded least-squares.\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.integrate import odeint\n >>> from pysindy import SINDy\n >>> from pysindy.optimizers import STLSQ\n >>> lorenz = lambda z,t : [10*(z[1] - z[0]),\n >>> z[0]*(28 - z[2]) - z[1],\n >>> z[0]*z[1] - 8/3*z[2]]\n >>> t = np.arange(0,2,.002)\n >>> x = odeint(lorenz, [-8,8,27], t)\n >>> opt = STLSQ(threshold=.1, alpha=.5)\n >>> model = SINDy(optimizer=opt)\n >>> model.fit(x, t=t[1]-t[0])\n >>> model.print()\n x0' = -9.999 1 + 9.999 x0\n x1' = 27.984 1 + -0.996 x0 + -1.000 1 x1\n x2' = -2.666 x1 + 1.000 1 x0\n \"\"\"\n\n def __init__(\n self,\n threshold=0.1,\n alpha=0.05,\n max_iter=20,\n ridge_kw=None,\n normalize=False,\n fit_intercept=False,\n copy_X=True,\n ):\n super(STLSQ, self).__init__(\n max_iter=max_iter,\n normalize=normalize,\n fit_intercept=fit_intercept,\n copy_X=copy_X,\n )\n\n if threshold < 0:\n raise ValueError(\"threshold cannot be negative\")\n if alpha < 0:\n raise ValueError(\"alpha cannot be negative\")\n\n self.threshold = threshold\n self.alpha = alpha\n self.ridge_kw = ridge_kw\n\n def _sparse_coefficients(self, dim, ind, coef, threshold):\n \"\"\"Perform thresholding of the weight vector(s)\"\"\"\n c = np.zeros(dim)\n c[ind] = coef\n big_ind = np.abs(c) >= threshold\n c[~big_ind] = 0\n return c, big_ind\n\n def _regress(self, x, y):\n \"\"\"Perform the ridge regression\"\"\"\n kw = self.ridge_kw or {}\n coef = ridge_regression(x, y, self.alpha, **kw)\n self.iters += 1\n return coef\n\n def _no_change(self):\n \"\"\"Check if the coefficient mask has changed after thresholding\"\"\"\n this_coef = self.history_[-1].flatten()\n if len(self.history_) > 1:\n last_coef = self.history_[-2].flatten()\n else:\n last_coef = np.zeros_like(this_coef)\n return all(bool(i) == bool(j) for i, j in zip(this_coef, last_coef))\n\n def _reduce(self, x, y):\n \"\"\"Iterates the thresholding. Assumes an initial guess is saved in\n self.coef_ and self.ind_\n \"\"\"\n ind = self.ind_\n n_samples, n_features = x.shape\n n_targets = y.shape[1]\n n_features_selected = np.sum(ind)\n\n for _ in range(self.max_iter):\n if np.count_nonzero(ind) == 0:\n warnings.warn(\n \"Sparsity parameter is too big ({}) and eliminated all \"\n \"coefficients\".format(self.threshold)\n )\n coef = np.zeros((n_targets, n_features))\n break\n\n coef = np.zeros((n_targets, n_features))\n for i in range(n_targets):\n if np.count_nonzero(ind[i]) == 0:\n warnings.warn(\n \"Sparsity parameter is too big ({}) and eliminated all \"\n \"coefficients\".format(self.threshold)\n )\n continue\n coef_i = self._regress(x[:, ind[i]], y[:, i])\n coef_i, ind_i = self._sparse_coefficients(\n n_features, ind[i], coef_i, self.threshold\n )\n coef[i] = coef_i\n ind[i] = ind_i\n\n self.history_.append(coef)\n if np.sum(ind) == n_features_selected or self._no_change():\n # could not (further) select important features\n break\n else:\n warnings.warn(\n \"STLSQ._reduce did not converge after {} iterations.\".format(\n self.max_iter\n ),\n ConvergenceWarning,\n )\n try:\n coef\n except NameError:\n coef = self.coef_\n warnings.warn(\n \"STLSQ._reduce has no iterations left to determine coef\",\n ConvergenceWarning,\n )\n self.coef_ = coef\n self.ind_ = ind\n\n @property\n def complexity(self):\n check_is_fitted(self)\n\n return np.count_nonzero(self.coef_) + np.count_nonzero(\n [abs(self.intercept_) >= self.threshold]\n )\n" ]
[ [ "numpy.zeros_like", "sklearn.utils.validation.check_is_fitted", "numpy.count_nonzero", "numpy.zeros", "sklearn.linear_model.ridge_regression", "numpy.sum", "numpy.abs" ] ]
Pseudomanifold/Skeleton_Persistence
[ "e93cc079548a4e7e95af7652157e29276cb280fe" ]
[ "analyse_matches.py" ]
[ "#!/usr/bin/env python3\n\nfrom collections import defaultdict\n\nimport math\nimport numpy\nimport os\nimport sys\n\nthreshold = 2.0\n\ndef distance( a,b,c,d ):\n return math.sqrt( (a-c)**2 + (b-d)**2 )\n\nfor filename in sys.argv[1:]:\n name = os.path.splitext( os.path.basename(filename) )[0]\n distances = list()\n matches = defaultdict(list)\n\n with open(filename) as f:\n for line in f:\n (a,b,c,d) = [ int(x) for x in line.split() ]\n matches[ (c,d) ].append( (a,b) )\n distances.append( distance(a,b,c,d) )\n\n print(\"Processed %s:\" % filename )\n print(\" Minimum distance: %f\" % min( distances ) )\n print(\" Mean distance : %f\" % numpy.mean( distances ) )\n print(\" Median distance : %f\" % numpy.median( distances ) )\n print(\" Maximum distance: %f\" % max( distances ) )\n print(\" Quantile <= 2 : %f\" % ( len( [ x for x in distances if x <= 2 ] ) / len(distances) ) )\n\n # Sort potential assignments based on their distance to the source\n # point. Afterwards, only the first entry is used to denote all of\n # the greedy matches.\n for c,d in sorted( matches.keys() ):\n matches[(c,d)] = sorted( matches[(c,d)], key = lambda ab : distance(c,d,ab[0], ab[1] ) )\n\n #\n # Greedy matches\n #\n with open(\"/tmp/\" + name + \"_matched.txt\", \"w\") as f:\n for c,d in sorted( matches.keys() ):\n a,b = matches[(c,d)][0]\n print(\"%d\\t%d\\t%d\\t%d\" % (a,b,c,d ), file=f)\n\n #\n # Unmatched\n #\n with open(\"/tmp/\" + name + \"_unmatched.txt\", \"w\") as f:\n for c,d in sorted( matches.keys() ):\n nonMatches = matches[(c,d)][1:]\n for a,b in nonMatches:\n print(\"%d\\t%d\" % (a,b), file=f)\n" ]
[ [ "numpy.median", "numpy.mean" ] ]
quangtm199/DeepFace
[ "aae47ca199ef1b33824d4c26ace3047ed02b35ae" ]
[ "deepface/commons/functions.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport base64\nfrom pathlib import Path\nfrom PIL import Image\nimport requests\n\nfrom deepface.detectors import FaceDetector\n\nimport tensorflow as tf\ntf_version = tf.__version__\ntf_major_version = int(tf_version.split(\".\")[0])\ntf_minor_version = int(tf_version.split(\".\")[1])\n\nif tf_major_version == 1:\n\timport keras\n\tfrom keras.preprocessing.image import load_img, save_img, img_to_array\n\tfrom keras.applications.imagenet_utils import preprocess_input\n\tfrom keras.preprocessing import image\nelif tf_major_version == 2:\n\tfrom tensorflow import keras\n\tfrom tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array\n\tfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\n\tfrom tensorflow.keras.preprocessing import image\n\n#--------------------------------------------------\n\ndef initialize_input(img1_path, img2_path = None):\n\n\tif type(img1_path) == list:\n\t\tbulkProcess = True\n\t\timg_list = img1_path.copy()\n\telse:\n\t\tbulkProcess = False\n\n\t\tif (\n\t\t\t(type(img2_path) == str and img2_path != None) #exact image path, base64 image\n\t\t\tor (isinstance(img2_path, np.ndarray) and img2_path.any()) #numpy array\n\t\t):\n\t\t\timg_list = [[img1_path, img2_path]]\n\t\telse: #analyze function passes just img1_path\n\t\t\timg_list = [img1_path]\n\n\treturn img_list, bulkProcess\n\ndef initialize_folder():\n\thome = get_deepface_home()\n\n\tif not os.path.exists(home+\"/.deepface\"):\n\t\tos.makedirs(home+\"/.deepface\")\n\t\tprint(\"Directory \", home, \"/.deepface created\")\n\n\tif not os.path.exists(home+\"/.deepface/weights\"):\n\t\tos.makedirs(home+\"/.deepface/weights\")\n\t\tprint(\"Directory \", home, \"/.deepface/weights created\")\n\ndef get_deepface_home():\n\treturn str(os.getenv('DEEPFACE_HOME', default=Path.home()))\n\ndef loadBase64Img(uri):\n encoded_data = uri.split(',')[1]\n nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return img\n\ndef load_image(img):\n\texact_image = False; base64_img = False; url_img = False\n\n\tif type(img).__module__ == np.__name__:\n\t\texact_image = True\n\n\telif len(img) > 11 and img[0:11] == \"data:image/\":\n\t\tbase64_img = True\n\n\telif len(img) > 11 and img.startswith(\"http\"):\n\t\turl_img = True\n\n\t#---------------------------\n\n\tif base64_img == True:\n\t\timg = loadBase64Img(img)\n\n\telif url_img:\n\t\timg = np.array(Image.open(requests.get(img, stream=True).raw))\n\n\telif exact_image != True: #image path passed as input\n\t\tif os.path.isfile(img) != True:\n\t\t\traise ValueError(\"Confirm that \",img,\" exists\")\n\n\t\timg = cv2.imread(img)\n\n\treturn img\n\ndef detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_detection = True, align = True):\n\n\timg_region = [0, 0, img.shape[0], img.shape[1]]\n\n\t#----------------------------------------------\n\t#people would like to skip detection and alignment if they already have pre-processed images\n\tif detector_backend == 'skip':\n\t\treturn img, img_region\n\n\t#----------------------------------------------\n\n\t#detector stored in a global variable in FaceDetector object.\n\t#this call should be completed very fast because it will return found in memory\n\t#it will not build face detector model in each call (consider for loops)\n\tface_detector = FaceDetector.build_model(detector_backend)\n\n\ttry:\n\t\tdetected_face, img_region = FaceDetector.detect_face(face_detector, detector_backend, img, align)\n\texcept: #if detected face shape is (0, 0) and alignment cannot be performed, this block will be run\n\t\tdetected_face = None\n\n\tif (isinstance(detected_face, np.ndarray)):\n\t\treturn detected_face, img_region\n\telse:\n\t\tif detected_face == None:\n\t\t\tif enforce_detection != True:\n\t\t\t\treturn img, img_region\n\t\t\telse:\n\t\t\t\traise ValueError(\"Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.\")\n\ndef normalize_input(img, normalization = 'base'):\n\n\t#issue 131 declares that some normalization techniques improves the accuracy\n\n\tif normalization == 'base':\n\t\treturn img\n\telse:\n\t\t#@trevorgribble and @davedgd contributed this feature\n\n\t\timg *= 255 #restore input in scale of [0, 255] because it was normalized in scale of [0, 1] in preprocess_face\n\n\t\tif normalization == 'raw':\n\t\t\tpass #return just restored pixels\n\n\t\telif normalization == 'Facenet':\n\t\t\tmean, std = img.mean(), img.std()\n\t\t\timg = (img - mean) / std\n\n\t\telif(normalization==\"Facenet2018\"):\n\t\t\t# simply / 127.5 - 1 (similar to facenet 2018 model preprocessing step as @iamrishab posted)\n\t\t\timg /= 127.5\n\t\t\timg -= 1\n\n\t\telif normalization == 'VGGFace':\n\t\t\t# mean subtraction based on VGGFace1 training data\n\t\t\timg[..., 0] -= 93.5940\n\t\t\timg[..., 1] -= 104.7624\n\t\t\timg[..., 2] -= 129.1863\n\n\t\telif(normalization == 'VGGFace2'):\n\t\t\t# mean subtraction based on VGGFace2 training data\n\t\t\timg[..., 0] -= 91.4953\n\t\t\timg[..., 1] -= 103.8827\n\t\t\timg[..., 2] -= 131.0912\n\n\t\telif(normalization == 'ArcFace'):\n\t\t\t#Reference study: The faces are cropped and resized to 112ร—112,\n\t\t\t#and each pixel (ranged between [0, 255]) in RGB images is normalised\n\t\t\t#by subtracting 127.5 then divided by 128.\n\t\t\timg -= 127.5\n\t\t\timg /= 128\n\n\t#-----------------------------\n\n\treturn img\n\ndef preprocess_face(img, target_size, grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False, align = True):\n\n\t#img might be path, base64 or numpy array. Convert it to numpy whatever it is.\n\timg = load_image(img)\n\t# cv2.imshow(img)\n\t# cv2.waitKey(0)\n\tbase_img = img.copy()\n\t\n\timg, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection, align = align)\n\t# cv2.imshow(\"img\",img)\n\t# cv2.waitKey(0)\n\t#--------------------------\n\n\tif img.shape[0] == 0 or img.shape[1] == 0:\n\t\tif enforce_detection == True:\n\t\t\traise ValueErtarget_sizeror(\"Detected face shape is \", img.shape,\". Consider to set enforce_detection argument to False.\")\n\t\telse: #restore base image\n\t\t\timg = base_img.copy()\n\t#--------------------------\n\t#post-processing\n\tif grayscale == True:\n\t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\t#---------------------------------------------------\n\t#resize image to expected shape\n\n\t# img = cv2.resize(img, target_size) #resize causes transformation on base image, adding black pixels to resize will not deform the base image\n\n\tprint(img.shape)\n\tif img.shape[0] > 0 and img.shape[1] > 0:\n\t\tfactor_0 = target_size[0] / img.shape[0]\n\t\tfactor_1 = target_size[1] / img.shape[1]\n\t\tfactor = min(factor_0, factor_1)\n\n\t\tdsize = (int(img.shape[1] * factor), int(img.shape[0] * factor))\n\t\timg = cv2.resize(img, dsize)\n\n\t\t# Then pad the other side to the target size by adding black pixels\n\t\tdiff_0 = target_size[0] - img.shape[0]\n\t\tdiff_1 = target_size[1] - img.shape[1]\n\t\tif grayscale == False:\n\t\t\t# Put the base image in the middle of the padded image\n\t\t\timg = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')\n\t\telse:\n\t\t\timg = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')\n\n\t#------------------------------------------\n\n\t#double check: if target image is not still the same size with target.\n\tif img.shape[0:2] != target_size:\n\t\timg = cv2.resize(img, target_size)\n\n\t#---------------------------------------------------\n\n\t#normalizing the image pixels\n\n\timg_pixels = image.img_to_array(img) #what this line doing? must?\n\timg_pixels = np.expand_dims(img_pixels, axis = 0)\n\timg_pixels /= 255 #normalize input in [0, 1]\n\n\t#---------------------------------------------------\n\n\tif return_region == True:\n\t\treturn img_pixels, region\n\telse:\n\t\treturn img_pixels\nimport torch\n\n# def preprocess_face_In(img, target_size, grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False, align = True):\n\n# \t#img might be path, base64 or numpy array. Convert it to numpy whatever it is.\n# \timg = load_image(img)\n\n# \tbase_img = img.copy()\n\t\n# \timg, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection, align = align)\n\n# \t#--------------------------\n# \tprint(\"img.shape[0]\",img.shape[0])\n# \tif img.shape[0] == 0 or img.shape[1] == 0:\n# \t\tif enforce_detection == True:\n# \t\t\traise ValueErtarget_sizeror(\"Detected face shape is \", img.shape,\". Consider to set enforce_detection argument to False.\")\n# \t\telse: #restore base image\n# \t\t\timg = base_img.copy()\n# \t#--------------------------\n# \t#post-processing\n# \tif grayscale == True:\n# \t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# \t#---------------------------------------------------\n# \t#resize image to expected shape\n\n# \t# img = cv2.resize(img, target_size) #resize causes transformation on base image, adding black pixels to resize will not deform the base image\n# \t# cv2.imwrite(\"2.jpg\",img)\n# \t# cv2.imshow(\"img\",img)\n# \t# cv2.waitKey(0)\n# \t# print(img.shape)\n# \tif img.shape[0] > 0 and img.shape[1] > 0:\n# \t\tfactor_0 = target_size[0] / img.shape[0]\n# \t\tfactor_1 = target_size[1] / img.shape[1]\n# \t\tfactor = min(factor_0, factor_1)\n\n# \t\tdsize = (int(img.shape[1] * factor), int(img.shape[0] * factor))\n# \t\timg = cv2.resize(img, dsize)\n\n# \t\t# Then pad the other side to the target size by adding black pixels\n# \t\tdiff_0 = target_size[0] - img.shape[0]\n# \t\tdiff_1 = target_size[1] - img.shape[1]\n# \t\tif grayscale == False:\n# \t\t\t# Put the base image in the middle of the padded image\n# \t\t\timg = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')\n# \t\telse:\n# \t\t\timg = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')\n\n# \t#------------------------------------------\n\n# \t#double check: if target image is not still the same size with target.\n# \tif img.shape[0:2] != target_size:\n# \t\timg = cv2.resize(img, target_size)\n\n# \t#---------------------------------------------------\n\n# \t# img_pixels = image.img_to_array(img) #what this line doing? must?\n# \t# img_pixels = np.expand_dims(img_pixels, axis = 0)\n# \t# img_pixels /= 255 #normalize input in [0, 1]\n\n\n# \t#normalizing the image pixels\n\n# \timg_pixels = image.img_to_array(img) #what this line doing? must?\n\n\t\n\t\n# \timg_pixels = np.expand_dims(img_pixels, axis = 0)\n\t\n# \timg_pixels /= 255\n# \t# img_pixels = np.transpose(img_pixels, (0,3,1,2))\n# \t# img_pixels = torch.from_numpy(img_pixels).float()\n\n# \t# print(\"img_pixels\",img_pixels.shape)\n# \t# img_pixels = image.img_to_array(img) #what this line doing? must?\n\t\n# \t# img_pixels /= 255 #normalize input in [0, 1]\n\n# \t# img_pixels = image.img_to_array(img) #what this line doing? must?\n# \t# img_pixels = np.expand_dims(img_pixels, axis = 0)\n# \t# img_pixels /= 255 #normalize input in [0, 1]\n\n\n# \t#---------------------------------------------------\n# \t# cv2.imshow(\"img_pixels\",img_pixels)\n# \t# cv2.waitKey(0)\n# \tif return_region == True:\n# \t\treturn img_pixels, region\n# \telse:\n# \t\treturn img_pixels\n\n\ndef find_input_shape(model):\n\n\t#face recognition models have different size of inputs\n\t#my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.\n\n\tinput_shape = model.layers[0].input_shape\n\n\tif type(input_shape) == list:\n\t\tinput_shape = input_shape[0][1:3]\n\telse:\n\t\tinput_shape = input_shape[1:3]\n\n\t#----------------------(img)\n\t# cv2.waitKey(0)\n\t#issue 289: it seems that tf 2.5 expects you to resize images with (x, y)\n\t#whereas its older versions expect (y, x)\n\n\tif tf_major_version == 2 and tf_minor_version >= 5:\n\t\tx = input_shape[0]; y = input_shape[1]\n\t\tinput_shape = (y, x)\n\n\t#----------------------\n\n\tif type(input_shape) == list: #issue 197: some people got array here instead of tuple\n\t\tinput_shape = tuple(input_shape)\n\n\treturn input_shape\nif __name__==\"__main__\":\n\timg1 = preprocess_face(\"/home/quang/Documents/FACE/deepface/tests/dataset/img1.jpg\", 240)" ]
[ [ "numpy.pad", "tensorflow.keras.preprocessing.image.img_to_array", "numpy.expand_dims" ] ]
matescharnitzky/deep-learning-from-scratch
[ "2b0bf568550bfb7ebca7fa7b434da19b6114c5cc" ]
[ "mate_scratch/joelnet/loss.py" ]
[ "\nfrom numpy import ndarray\nimport numpy as np\n\n\nclass Loss:\n def loss(self, prediction: ndarray, actual: ndarray) -> float:\n raise NotImplementedError\n\n def grad(self, prediction: ndarray, actual: ndarray) -> ndarray:\n raise NotImplementedError\n\n\nclass MSE(Loss):\n \"\"\"\n Cost function for regression problems.\n \"\"\"\n \n def loss(self, prediction: ndarray, actual: ndarray) -> float:\n return np.sum((prediction - actual) ** 2) / prediction.shape[0]\n\n def grad(self, prediction: ndarray, actual: ndarray) -> ndarray:\n return 2.0 * (prediction - actual) / prediction.shape[0]" ]
[ [ "numpy.sum" ] ]
DavidykZhao/mlflow
[ "c71bae2365177e57d7b50004c5b9964bd455bacc" ]
[ "mlflow/pyfunc/__init__.py" ]
[ "\"\"\"\nThe ``python_function`` model flavor serves as a default model interface for MLflow Python models.\nAny MLflow Python model is expected to be loadable as a ``python_function`` model.\n\nIn addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format\n<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from\nthis format. The format is self contained in the sense that it includes all necessary information\nfor anyone to load it and use it. Dependencies are either stored directly with the model or\nreferenced via a Conda environment.\n\nThe ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models\nusing frameworks and inference logic that may not be natively included in MLflow. See\n:ref:`pyfunc-create-custom`.\n\n.. _pyfunc-inference-api:\n\n*************\nInference API\n*************\n\nPython function models are loaded as an instance of :py:class:`PyFuncModel\n<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model\nmetadata (MLmodel file). You can score the model by calling the :py:func:`predict()\n<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::\n\n predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.(Series | DataFrame)]\n\n\n.. _pyfunc-filesystem-format:\n\n*****************\nFilesystem format\n*****************\n\nThe Pyfunc format is defined as a directory structure containing all required data, code, and\nconfiguration::\n\n ./dst-path/\n ./MLmodel: configuration\n <code>: code packaged with the model (specified in the MLmodel file)\n <data>: data packaged with the model (specified in the MLmodel file)\n <env>: Conda environment definition (specified in the MLmodel file)\n\nThe directory structure may contain additional contents that can be referenced by the ``MLmodel``\nconfiguration.\n\n.. _pyfunc-model-config:\n\nMLModel configuration\n#####################\n\nA Python model contains an ``MLmodel`` file in **python_function** format in its root with the\nfollowing parameters:\n\n- loader_module [required]:\n Python module that can load the model. Expected as module identifier\n e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.\n The imported module must contain a function with the following signature::\n\n _load_pyfunc(path: string) -> <pyfunc model implementation>\n\n The path argument is specified by the ``data`` parameter and may refer to a file or\n directory. The model implementation is expected to be an object with a\n ``predict`` method with the following signature::\n\n predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.(Series | DataFrame)]\n\n- code [optional]:\n Relative path to a directory containing the code packaged with this model.\n All files and directories inside this directory are added to the Python path\n prior to importing the model loader.\n\n- data [optional]:\n Relative path to a file or directory containing model data.\n The path is passed to the model loader.\n\n- env [optional]:\n Relative path to an exported Conda environment. If present this environment\n should be activated prior to running the model.\n\n- Optionally, any additional parameters necessary for interpreting the serialized model in\n ``pyfunc`` format.\n\n.. rubric:: Example\n\n::\n\n tree example/sklearn_iris/mlruns/run1/outputs/linear-lr\n\n::\n\n โ”œโ”€โ”€ MLmodel\n โ”œโ”€โ”€ code\n โ”‚ย ย  โ”œโ”€โ”€ sklearn_iris.py\n โ”‚\n โ”œโ”€โ”€ data\n โ”‚ย ย  โ””โ”€โ”€ model.pkl\n โ””โ”€โ”€ mlflow_env.yml\n\n::\n\n cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel\n\n::\n\n python_function:\n code: code\n data: data/model.pkl\n loader_module: mlflow.sklearn\n env: mlflow_env.yml\n main: sklearn_iris\n\n.. _pyfunc-create-custom:\n\n******************************\nCreating custom Pyfunc models\n******************************\n\nMLflow's persistence modules provide convenience functions for creating models with the\n``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and\nmore); however, they do not cover every use case. For example, you may want to create an MLflow\nmodel with the ``pyfunc`` flavor using a framework that MLflow does not natively support.\nAlternatively, you may want to build an MLflow model that executes custom logic when evaluating\nqueries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``\nprovides utilities for creating ``pyfunc`` models from arbitrary code and model data.\n\nThe :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows\nfor creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts\nthat the logic may require.\n\nAn `artifact` is a file or directory, such as a serialized model or a CSV. For example, a\nserialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.\n\n.. _pyfunc-create-custom-workflows:\n\nWorkflows\n#########\n\n:meth:`save_model()` and :meth:`log_model()` support the following workflows:\n\n1. Programmatically defining a new MLflow model, including its attributes and artifacts.\n\n Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can\n automatically download artifacts from their URIs and create an MLflow model directory.\n\n In this case, you must define a Python class which inherits from :class:`~PythonModel`,\n defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is\n specified via the ``python_model`` parameter; it is automatically serialized and deserialized\n as a Python class, including all of its attributes.\n\n2. Interpreting pre-existing data as an MLflow model.\n\n If you already have a directory containing model data, :meth:`save_model()` and\n :meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter\n specifies the local filesystem path to the directory containing model data.\n\n In this case, you must provide a Python module, called a `loader module`. The\n loader module defines a ``_load_pyfunc()`` method that performs the following tasks:\n\n - Load data from the specified ``data_path``. For example, this process may include\n deserializing pickled Python objects or models or parsing CSV files.\n\n - Construct and return a pyfunc-compatible model wrapper. As in the first\n use case, this wrapper must define a ``predict()`` method that is used to evaluate\n queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.\n\n The ``loader_module`` parameter specifies the name of your loader module.\n\n For an example loader module implementation, refer to the `loader module\n implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/\n 74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.\n\n.. _pyfunc-create-custom-selecting-workflow:\n\nWhich workflow is right for my use case?\n########################################\n\nWe consider the first workflow to be more user-friendly and generally recommend it for the\nfollowing reasons:\n\n- It automatically resolves and collects specified model artifacts.\n\n- It automatically serializes and deserializes the ``python_model`` instance and all of\n its attributes, reducing the amount of user logic that is required to load the model\n\n- You can create Models using logic that is defined in the ``__main__`` scope. This allows\n custom models to be constructed in interactive environments, such as notebooks and the Python\n REPL.\n\nYou may prefer the second, lower-level workflow for the following reasons:\n\n- Inference logic is always persisted as code, rather than a Python object. This makes logic\n easier to inspect and modify later.\n\n- If you have already collected all of your model data in a single location, the second\n workflow allows it to be saved in MLflow format directly, without enumerating constituent\n artifacts.\n\"\"\"\n\nimport importlib\n\nimport numpy as np\nimport os\nimport pandas\nimport yaml\nfrom copy import deepcopy\nimport logging\n\nfrom typing import Any, Union\nimport mlflow\nimport mlflow.pyfunc.model\nimport mlflow.pyfunc.utils\nfrom mlflow.models import Model, ModelSignature, ModelInputExample\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.models.utils import _save_example\nfrom mlflow.pyfunc.model import PythonModel, PythonModelContext # pylint: disable=unused-import\nfrom mlflow.pyfunc.model import get_default_conda_env\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.types import DataType, Schema\nfrom mlflow.utils import PYTHON_VERSION, get_major_minor_py_version\nfrom mlflow.utils.annotations import deprecated\nfrom mlflow.utils.file_utils import TempDir, _copy_file_or_tree\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.protos.databricks_pb2 import (\n INVALID_PARAMETER_VALUE,\n RESOURCE_ALREADY_EXISTS,\n RESOURCE_DOES_NOT_EXIST,\n)\n\nFLAVOR_NAME = \"python_function\"\nMAIN = \"loader_module\"\nCODE = \"code\"\nDATA = \"data\"\nENV = \"env\"\nPY_VERSION = \"python_version\"\n\n_logger = logging.getLogger(__name__)\n\n\ndef add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):\n \"\"\"\n Add a ``pyfunc`` spec to the model configuration.\n\n Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model\n flavor out of an existing directory structure. For example, other model flavors can use this to\n specify how to use their output as a ``pyfunc``.\n\n NOTE:\n\n All paths are relative to the exported model root directory.\n\n :param model: Existing model.\n :param loader_module: The module to be used to load the model.\n :param data: Path to the model data.\n :param code: Path to the code dependencies.\n :param env: Conda environment.\n :param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.\n Values must be YAML-serializable.\n :return: Updated model configuration.\n \"\"\"\n parms = deepcopy(kwargs)\n parms[MAIN] = loader_module\n parms[PY_VERSION] = PYTHON_VERSION\n if code:\n parms[CODE] = code\n if data:\n parms[DATA] = data\n if env:\n parms[ENV] = env\n return model.add_flavor(FLAVOR_NAME, **parms)\n\n\ndef _load_model_env(path):\n \"\"\"\n Get ENV file string from a model configuration stored in Python Function format.\n Returned value is a model-relative path to a Conda Environment file,\n or None if none was specified at model save time\n \"\"\"\n return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)\n\n\ndef _enforce_type(name, values: pandas.Series, t: DataType):\n \"\"\"\n Enforce the input column type matches the declared in model input schema.\n\n The following type conversions are allowed:\n\n 1. np.object -> string\n 2. int -> long (upcast)\n 3. float -> double (upcast)\n 4. int -> double (safe conversion)\n\n Any other type mismatch will raise error.\n \"\"\"\n if values.dtype == np.object and t not in (DataType.binary, DataType.string):\n values = values.infer_objects()\n\n if t == DataType.string and values.dtype == np.object:\n # NB: strings are by default parsed and inferred as objects, but it is\n # recommended to use StringDtype extension type if available. See\n #\n # `https://pandas.pydata.org/pandas-docs/stable/user_guide/text.html`\n #\n # for more detail.\n try:\n return values.astype(t.to_pandas(), errors=\"raise\")\n except ValueError:\n raise MlflowException(\n \"Failed to convert column {0} from type {1} to {2}.\".format(name, values.dtype, t)\n )\n\n # NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand\n # side of the comparison operator. It works, however, if pandas type is on the left hand side.\n # That is because pandas is aware of numpy.\n if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:\n # The types are already compatible => conversion is not necessary.\n return values\n\n if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:\n # NB: bytes in numpy have variable itemsize depending on the length of the longest\n # element in the array (column). Since MLflow binary type is length agnostic, we ignore\n # itemsize when matching binary columns.\n return values\n\n numpy_type = t.to_numpy()\n if values.dtype.kind == numpy_type.kind:\n is_upcast = values.dtype.itemsize <= numpy_type.itemsize\n elif values.dtype.kind == \"u\" and numpy_type.kind == \"i\":\n is_upcast = values.dtype.itemsize < numpy_type.itemsize\n elif values.dtype.kind in (\"i\", \"u\") and numpy_type == np.float64:\n # allow (u)int => double conversion\n is_upcast = values.dtype.itemsize <= 6\n else:\n is_upcast = False\n\n if is_upcast:\n return values.astype(numpy_type, errors=\"raise\")\n else:\n # NB: conversion between incompatible types (e.g. floats -> ints or\n # double -> float) are not allowed. While supported by pandas and numpy,\n # these conversions alter the values significantly.\n def all_ints(xs):\n return all([pandas.isnull(x) or int(x) == x for x in xs])\n\n hint = \"\"\n if (\n values.dtype == np.float64\n and numpy_type.kind in (\"i\", \"u\")\n and values.hasnans\n and all_ints(values)\n ):\n hint = (\n \" Hint: the type mismatch is likely caused by missing values. \"\n \"Integer columns in python can not represent missing values and are therefore \"\n \"encoded as floats. The best way to avoid this problem is to infer the model \"\n \"schema based on a realistic data sample (training dataset) that includes missing \"\n \"values. Alternatively, you can declare integer columns as doubles (float64) \"\n \"whenever these columns may have missing values. See `Handling Integers With \"\n \"Missing Values <https://www.mlflow.org/docs/latest/models.html#\"\n \"handling-integers-with-missing-values>`_ for more details.\"\n )\n\n raise MlflowException(\n \"Incompatible input types for column {0}. \"\n \"Can not safely convert {1} to {2}.{3}\".format(name, values.dtype, numpy_type, hint)\n )\n\n\ndef _enforce_schema(pdf: pandas.DataFrame, input_schema: Schema):\n \"\"\"\n Enforce column names and types match the input schema.\n\n For column names, we check there are no missing columns and reorder the columns to match the\n ordering declared in schema if necessary. Any extra columns are ignored.\n\n For column types, we make sure the types match schema or can be safely converted to match the\n input schema.\n \"\"\"\n if isinstance(pdf, list):\n pdf = pandas.DataFrame(pdf)\n if not isinstance(pdf, pandas.DataFrame):\n message = \"Expected input to be DataFrame or list. Found: %s\" % type(pdf).__name__\n raise MlflowException(message)\n\n if input_schema.has_column_names():\n # make sure there are no missing columns\n col_names = input_schema.column_names()\n expected_names = set(col_names)\n actual_names = set(pdf.columns)\n missing_cols = expected_names - actual_names\n extra_cols = actual_names - expected_names\n # Preserve order from the original columns, since missing/extra columns are likely to\n # be in same order.\n missing_cols = [c for c in col_names if c in missing_cols]\n extra_cols = [c for c in pdf.columns if c in extra_cols]\n if missing_cols:\n message = (\n \"Model input is missing columns {0}.\"\n \" Note that there were extra columns: {1}\".format(missing_cols, extra_cols)\n )\n raise MlflowException(message)\n else:\n # The model signature does not specify column names => we can only verify column count.\n if len(pdf.columns) < len(input_schema.columns):\n message = (\n \"Model input is missing input columns. The model signature declares \"\n \"{0} input columns but the provided input only has \"\n \"{1} columns. Note: the columns were not named in the signature so we can \"\n \"only verify their count.\"\n ).format(len(input_schema.columns), len(pdf.columns))\n raise MlflowException(message)\n col_names = pdf.columns[: len(input_schema.columns)]\n col_types = input_schema.column_types()\n new_pdf = pandas.DataFrame()\n for i, x in enumerate(col_names):\n new_pdf[x] = _enforce_type(x, pdf[x], col_types[i])\n return new_pdf\n\n\nPyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]\n\n\nclass PyFuncModel(object):\n \"\"\"\n MLflow 'python function' model.\n\n Wrapper around model implementation and metadata. This class is not meant to be constructed\n directly. Instead, instances of this class are constructed and returned from\n py:func:`mlflow.pyfunc.load_model`.\n\n ``model_impl`` can be any Python object that implements the `Pyfunc interface\n <https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is\n returned by invoking the model's ``loader_module``.\n\n ``model_meta`` contains model metadata loaded from the MLmodel file.\n \"\"\"\n\n def __init__(self, model_meta: Model, model_impl: Any):\n if not hasattr(model_impl, \"predict\"):\n raise MlflowException(\"Model implementation is missing required predict method.\")\n if not model_meta:\n raise MlflowException(\"Model is missing metadata.\")\n self._model_meta = model_meta\n self._model_impl = model_impl\n\n def predict(self, data: pandas.DataFrame) -> PyFuncOutput:\n \"\"\"\n Generate model predictions.\n\n If the model contains signature, enforce the input schema first before calling the model\n implementation with the sanitized input. If the pyfunc model does not include model schema,\n the input is passed to the model implementation as is. See `Model Signature Enforcement\n <https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details.\"\n\n :param data: Model input as pandas.DataFrame.\n :return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.\n \"\"\"\n input_schema = self.metadata.get_input_schema()\n if input_schema is not None:\n data = _enforce_schema(data, input_schema)\n return self._model_impl.predict(data)\n\n @property\n def metadata(self):\n \"\"\"Model metadata.\"\"\"\n if self._model_meta is None:\n raise MlflowException(\"Model is missing metadata.\")\n return self._model_meta\n\n def __repr__(self):\n info = {}\n if self._model_meta is not None:\n if hasattr(self._model_meta, \"run_id\") and self._model_meta.run_id is not None:\n info[\"run_id\"] = self._model_meta.run_id\n if (\n hasattr(self._model_meta, \"artifact_path\")\n and self._model_meta.artifact_path is not None\n ):\n info[\"artifact_path\"] = self._model_meta.artifact_path\n info[\"flavor\"] = self._model_meta.flavors[FLAVOR_NAME][\"loader_module\"]\n return yaml.safe_dump({\"mlflow.pyfunc.loaded_model\": info}, default_flow_style=False)\n\n\ndef load_model(model_uri: str, suppress_warnings: bool = True) -> PyFuncModel:\n \"\"\"\n Load a model stored in Python function format.\n\n :param model_uri: The location, in URI format, of the MLflow model. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model\n loading process will be suppressed. If ``False``, these warning\n messages will be emitted.\n \"\"\"\n local_path = _download_artifact_from_uri(artifact_uri=model_uri)\n model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))\n\n conf = model_meta.flavors.get(FLAVOR_NAME)\n if conf is None:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=FLAVOR_NAME),\n RESOURCE_DOES_NOT_EXIST,\n )\n model_py_version = conf.get(PY_VERSION)\n if not suppress_warnings:\n _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)\n if CODE in conf and conf[CODE]:\n code_path = os.path.join(local_path, conf[CODE])\n mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path)\n data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path\n model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)\n return PyFuncModel(model_meta=model_meta, model_impl=model_impl)\n\n\n@deprecated(\"mlflow.pyfunc.load_model\", 1.0)\ndef load_pyfunc(model_uri, suppress_warnings=False):\n \"\"\"\n Load a model stored in Python function format.\n\n :param model_uri: The location, in URI format, of the MLflow model. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model\n loading process will be suppressed. If ``False``, these warning\n messages will be emitted.\n \"\"\"\n return load_model(model_uri, suppress_warnings)\n\n\ndef _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):\n \"\"\"\n Compares the version of Python that was used to save a given model with the version\n of Python that is currently running. If a major or minor version difference is detected,\n logs an appropriate warning.\n \"\"\"\n if model_py_version is None:\n _logger.warning(\n \"The specified model does not have a specified Python version. It may be\"\n \" incompatible with the version of Python that is currently running: Python %s\",\n PYTHON_VERSION,\n )\n elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):\n _logger.warning(\n \"The version of Python that the model was saved in, `Python %s`, differs\"\n \" from the version of Python that is currently running, `Python %s`,\"\n \" and may be incompatible\",\n model_py_version,\n PYTHON_VERSION,\n )\n\n\ndef spark_udf(spark, model_uri, result_type=\"double\"):\n \"\"\"\n A Spark UDF that can be used to invoke the Python function formatted model.\n\n Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names\n are ordinals (0, 1, ...). On some versions of Spark, it is also possible to wrap the input in a\n struct. In that case, the data will be passed as a DataFrame with column names given by the\n struct definition (e.g. when invoked as my_udf(struct('x', 'y'), the model will ge the data as a\n pandas DataFrame with 2 columns 'x' and 'y').\n\n The predictions are filtered to contain only the columns that can be represented as the\n ``result_type``. If the ``result_type`` is string or array of strings, all predictions are\n converted to string. If the result type is not an array type, the left most column with\n matching type is returned.\n\n .. code-block:: python\n :caption: Example\n\n predict = mlflow.pyfunc.spark_udf(spark, \"/my/local/model\")\n df.withColumn(\"prediction\", predict(\"name\", \"age\")).show()\n\n :param spark: A SparkSession object.\n :param model_uri: The location, in URI format, of the MLflow model with the\n :py:mod:`mlflow.pyfunc` flavor. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param result_type: the return type of the user-defined function. The value can be either a\n ``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive\n type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.\n The following classes of result type are supported:\n\n - \"int\" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an\n ``int32`` or an exception if there is none.\n\n - \"long\" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an\n ``int64`` or an exception if there is none.\n\n - ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested\n size.\n\n - \"float\" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to\n ``float32`` or an exception if there is none.\n\n - \"double\" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to\n ``double`` or an exception if there is none.\n\n - ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or\n an exception if there are no numeric columns.\n\n - \"string\" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.\n\n - ``ArrayType(StringType)``: All columns converted to ``string``.\n\n :return: Spark UDF that applies the model's ``predict`` method to the data and returns a\n type specified by ``result_type``, which by default is a double.\n \"\"\"\n\n # Scope Spark import to this method so users don't need pyspark to use non-Spark-related\n # functionality.\n from mlflow.pyfunc.spark_model_cache import SparkModelCache\n from pyspark.sql.functions import pandas_udf\n from pyspark.sql.types import _parse_datatype_string\n from pyspark.sql.types import ArrayType, DataType as SparkDataType\n from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType\n\n if not isinstance(result_type, SparkDataType):\n result_type = _parse_datatype_string(result_type)\n\n elem_type = result_type\n if isinstance(elem_type, ArrayType):\n elem_type = elem_type.elementType\n\n supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]\n\n if not any([isinstance(elem_type, x) for x in supported_types]):\n raise MlflowException(\n message=\"Invalid result_type '{}'. Result type can only be one of or an array of one \"\n \"of the following types types: {}\".format(str(elem_type), str(supported_types)),\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n with TempDir() as local_tmpdir:\n local_model_path = _download_artifact_from_uri(\n artifact_uri=model_uri, output_path=local_tmpdir.path()\n )\n archive_path = SparkModelCache.add_local_model(spark, local_model_path)\n\n def predict(*args):\n model = SparkModelCache.get_or_load(archive_path)\n input_schema = model.metadata.get_input_schema()\n pdf = None\n\n for x in args:\n if type(x) == pandas.DataFrame:\n if len(args) != 1:\n raise Exception(\n \"If passing a StructType column, there should be only one \"\n \"input column, but got %d\" % len(args)\n )\n pdf = x\n if pdf is None:\n args = list(args)\n if input_schema is None:\n names = [str(i) for i in range(len(args))]\n else:\n names = input_schema.column_names()\n if len(args) > len(names):\n args = args[: len(names)]\n if len(args) < len(names):\n message = (\n \"Model input is missing columns. Expected {0} input columns {1},\"\n \" but the model received only {2} unnamed input columns\"\n \" (Since the columns were passed unnamed they are expected to be in\"\n \" the order specified by the schema).\".format(len(names), names, len(args))\n )\n raise MlflowException(message)\n pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)\n\n result = model.predict(pdf)\n\n if not isinstance(result, pandas.DataFrame):\n result = pandas.DataFrame(data=result)\n\n elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type\n\n if type(elem_type) == IntegerType:\n result = result.select_dtypes(\n [np.byte, np.ubyte, np.short, np.ushort, np.int32]\n ).astype(np.int32)\n elif type(elem_type) == LongType:\n result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int, np.long])\n\n elif type(elem_type) == FloatType:\n result = result.select_dtypes(include=(np.number,)).astype(np.float32)\n\n elif type(elem_type) == DoubleType:\n result = result.select_dtypes(include=(np.number,)).astype(np.float64)\n\n if len(result.columns) == 0:\n raise MlflowException(\n message=\"The the model did not produce any values compatible with the requested \"\n \"type '{}'. Consider requesting udf with StringType or \"\n \"Arraytype(StringType).\".format(str(elem_type)),\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n if type(elem_type) == StringType:\n result = result.applymap(str)\n\n if type(result_type) == ArrayType:\n return pandas.Series(result.to_numpy().tolist())\n else:\n return result[result.columns[0]]\n\n return pandas_udf(predict, result_type)\n\n\ndef save_model(\n path,\n loader_module=None,\n data_path=None,\n code_path=None,\n conda_env=None,\n mlflow_model=None,\n python_model=None,\n artifacts=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n **kwargs\n):\n \"\"\"\n save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\\\n mlflow_model=Model(), python_model=None, artifacts=None)\n\n Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the\n local filesystem.\n\n For information about the workflows that this method supports, please see :ref:`\"workflows for\n creating custom pyfunc models\" <pyfunc-create-custom-workflows>` and\n :ref:`\"which workflow is right for my use case?\" <pyfunc-create-custom-selecting-workflow>`.\n Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the\n parameters for the first workflow: ``python_model``, ``artifacts``, cannot be\n specified together.\n\n :param path: The path to which to save the Python model.\n :param loader_module: The name of the Python module that is used to load the model\n from ``data_path``. This module must define a method with the prototype\n ``_load_pyfunc(data_path)``. If not ``None``, this module and its\n dependencies must be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n :param data_path: Path to a file or directory containing model data.\n :param code_path: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. This decsribes the environment this model should\n be run in. If ``python_model`` is not ``None``, the Conda environment must\n at least specify the dependencies contained in\n :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the\n model. The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'cloudpickle==0.5.8'\n ]\n }\n :param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the\n **python_function** flavor.\n :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is\n serialized using the CloudPickle library. Any dependencies of the class\n should be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n Note: If the class is imported from another module, as opposed to being\n defined in the ``__main__`` scope, the defining module should also be\n included in one of the listed locations.\n :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs\n are resolved to absolute filesystem paths, producing a dictionary of\n ``<name, absolute_path>`` entries. ``python_model`` can reference these\n resolved entries as the ``artifacts`` property of the ``context`` parameter\n in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`\n and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.\n For example, consider the following ``artifacts`` dictionary::\n\n {\n \"my_file\": \"s3://my-bucket/path/to/my/file\"\n }\n\n In this case, the ``\"my_file\"`` artifact is downloaded from S3. The\n ``python_model`` can then refer to ``\"my_file\"`` as an absolute filesystem\n path via ``context.artifacts[\"my_file\"]``.\n\n If ``None``, no artifacts are added to the model.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n \"\"\"\n mlflow_model = kwargs.pop(\"model\", mlflow_model)\n if len(kwargs) > 0:\n raise TypeError(\"save_model() got unexpected keyword arguments: {}\".format(kwargs))\n if code_path is not None:\n if not isinstance(code_path, list):\n raise TypeError(\"Argument code_path should be a list, not {}\".format(type(code_path)))\n\n first_argument_set = {\n \"loader_module\": loader_module,\n \"data_path\": data_path,\n }\n second_argument_set = {\n \"artifacts\": artifacts,\n \"python_model\": python_model,\n }\n first_argument_set_specified = any([item is not None for item in first_argument_set.values()])\n second_argument_set_specified = any([item is not None for item in second_argument_set.values()])\n if first_argument_set_specified and second_argument_set_specified:\n raise MlflowException(\n message=(\n \"The following sets of parameters cannot be specified together: {first_set_keys}\"\n \" and {second_set_keys}. All parameters in one set must be `None`. Instead, found\"\n \" the following values: {first_set_entries} and {second_set_entries}\".format(\n first_set_keys=first_argument_set.keys(),\n second_set_keys=second_argument_set.keys(),\n first_set_entries=first_argument_set,\n second_set_entries=second_argument_set,\n )\n ),\n error_code=INVALID_PARAMETER_VALUE,\n )\n elif (loader_module is None) and (python_model is None):\n msg = (\n \"Either `loader_module` or `python_model` must be specified. A `loader_module` \"\n \"should be a python module. A `python_model` should be a subclass of PythonModel\"\n )\n raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)\n\n if os.path.exists(path):\n raise MlflowException(\n message=\"Path '{}' already exists\".format(path), error_code=RESOURCE_ALREADY_EXISTS\n )\n os.makedirs(path)\n if mlflow_model is None:\n mlflow_model = Model()\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n if first_argument_set_specified:\n return _save_model_with_loader_module_and_data_path(\n path=path,\n loader_module=loader_module,\n data_path=data_path,\n code_paths=code_path,\n conda_env=conda_env,\n mlflow_model=mlflow_model,\n )\n elif second_argument_set_specified:\n return mlflow.pyfunc.model._save_model_with_class_artifacts_params(\n path=path,\n python_model=python_model,\n artifacts=artifacts,\n conda_env=conda_env,\n code_paths=code_path,\n mlflow_model=mlflow_model,\n )\n\n\ndef log_model(\n artifact_path,\n loader_module=None,\n data_path=None,\n code_path=None,\n conda_env=None,\n python_model=None,\n artifacts=None,\n registered_model_name=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n):\n \"\"\"\n Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow\n artifact for the current run.\n\n For information about the workflows that this method supports, see :ref:`Workflows for\n creating custom pyfunc models <pyfunc-create-custom-workflows>` and\n :ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.\n You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``\n and the parameters for the first workflow: ``python_model``, ``artifacts`` together.\n\n :param artifact_path: The run-relative artifact path to which to log the Python model.\n :param loader_module: The name of the Python module that is used to load the model\n from ``data_path``. This module must define a method with the prototype\n ``_load_pyfunc(data_path)``. If not ``None``, this module and its\n dependencies must be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n :param data_path: Path to a file or directory containing model data.\n :param code_path: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. This decsribes the environment this model should\n be run in. If ``python_model`` is not ``None``, the Conda environment must\n at least specify the dependencies contained in\n :func:`get_default_conda_env()`. If `None`, the default\n :func:`get_default_conda_env()` environment is added to the\n model. The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'cloudpickle==0.5.8'\n ]\n }\n\n :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is\n serialized using the CloudPickle library. Any dependencies of the class\n should be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n Note: If the class is imported from another module, as opposed to being\n defined in the ``__main__`` scope, the defining module should also be\n included in one of the listed locations.\n :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs\n are resolved to absolute filesystem paths, producing a dictionary of\n ``<name, absolute_path>`` entries. ``python_model`` can reference these\n resolved entries as the ``artifacts`` property of the ``context`` parameter\n in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`\n and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.\n For example, consider the following ``artifacts`` dictionary::\n\n {\n \"my_file\": \"s3://my-bucket/path/to/my/file\"\n }\n\n In this case, the ``\"my_file\"`` artifact is downloaded from S3. The\n ``python_model`` can then refer to ``\"my_file\"`` as an absolute filesystem\n path via ``context.artifacts[\"my_file\"]``.\n\n If ``None``, no artifacts are added to the model.\n :param registered_model_name: Note:: Experimental: This argument may change or be removed in a\n future release without warning. If given, create a model\n version under ``registered_model_name``, also creating a\n registered model if one with the given name does not exist.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n \"\"\"\n return Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.pyfunc,\n loader_module=loader_module,\n data_path=data_path,\n code_path=code_path,\n python_model=python_model,\n artifacts=artifacts,\n conda_env=conda_env,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n )\n\n\ndef _save_model_with_loader_module_and_data_path(\n path, loader_module, data_path=None, code_paths=None, conda_env=None, mlflow_model=Model()\n):\n \"\"\"\n Export model as a generic Python function model.\n :param path: The path to which to save the Python model.\n :param loader_module: The name of the Python module that is used to load the model\n from ``data_path``. This module must define a method with the prototype\n ``_load_pyfunc(data_path)``.\n :param data_path: Path to a file or directory containing model data.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in.\n :return: Model configuration containing model info.\n \"\"\"\n\n code = None\n data = None\n\n if data_path is not None:\n model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir=\"data\")\n data = model_file\n\n if code_paths is not None:\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=\"code\")\n code = \"code\"\n\n conda_env_subpath = \"mlflow_env.yml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n mlflow.pyfunc.add_to_model(\n mlflow_model, loader_module=loader_module, code=code, data=data, env=conda_env_subpath\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n return mlflow_model\n\n\nloader_template = \"\"\"\n\nimport importlib\nimport os\nimport sys\n\ndef load_pyfunc():\n {update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')\n\n\"\"\"\n" ]
[ [ "pandas.isnull", "pandas.DataFrame" ] ]
martingu11/endas
[ "e58be74e844efa14cbd86aba5e76dbc44fe690de" ]
[ "examples/lorenz95.py" ]
[ "\"\"\"\nData assimilation example on the Lorenz 95 dynamical system.\n\nTo run from console, use:\n\n cd examples\n python3 -i lorenz95.py\n\nThe ``-i`` switch is important if you are using regular Python- it enables the interactive mode on the interpreter and\nallows the figures that are generated at the end to stay visible after the script ends. Anaconda apparently does not\nhave this issue and the -i switch isn't needed.\n\"\"\"\n\n\n# Make sure the endas package can be found when running from the examples folder without having to install it first\nimport sys, os.path\n\nimport math\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nfrom endas import cov, obs\nfrom endas import ensemble\nfrom endas import algorithms as alg\nfrom endas.cov import DiagonalCovariance\nfrom endas.localization import DomainLocalization, GenericStateSpace1d, taper\n\nfrom scipy import linalg\n\nfrom models import lorenz\nfrom utils import make_data\n\n\n# Avoid non-deterministic output for testing\nnp.random.seed(1234)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Problem setup\n# ----------------------------------------------------------------------------------------------------------------------\n\n# State space dimension\nn = 40\n\n# Ensemble size\nN = 20\n\n# Number of observations we will skip before we assimilate one. 1 step = 30min, 12 = 6h\nobs_skip = 6\n\n# Smoother lag as the number of time steps. Lag 0 disables smoothing (i.e. only the filtering solution is obtained)\nlag = 50\n\n# Model is the Lorenz-95 dynamic system\nmodel = lorenz.Lorenz95(n)\n\n# Set up localization implementation. For this synthetic case we will use the generic 1d partitioning of the state\n# space and the Gaspari-Cohn function with correlation length 2 for distance-based observation covariance tapering.\nls = DomainLocalization(ssp=GenericStateSpace1d(n), taper_fn=taper.GaspariCohn(2))\n\n# Filters/smoothers we are going to run\n\nkf = alg.KalmanFilter(model=model.__call__, model_tl=model.dot, model_adj=model.adjdot, lag=lag, forgetting_factor=1.0)\nenkfs = [\n #('EnKF-noloc', alg.EnsembleKalmanFilter(variant=alg.EnKF(), ensemble_size=N, lag=lag, forgetting_factor=1.0)),\n ('EnKF-loc', alg.EnsembleKalmanFilter(variant=alg.EnKF(), ensemble_size=N, lag=lag, forgetting_factor=1.0, loc_strategy=ls)),\n #('ETKF-noloc', alg.EnsembleKalmanFilter(variant=alg.ESTKF(), ensemble_size=N, lag=lag, forgetting_factor=1.0)),\n ('ETKF-loc', alg.EnsembleKalmanFilter(variant=alg.ESTKF(), ensemble_size=N, lag=lag, loc_strategy=ls))\n]\n\n# Observation operator: We will observe the last 3 variables in every 5 state vector variables, i.e. 24 out of 40\n# in this case\nk = 24\nHmat = np.zeros((k, n))\nhi = np.arange(k, dtype=np.int32)\nHmat[hi, 5*(hi // 3) + (hi % 3) + 2] = 1\nH = obs.MatrixObservationOp(Hmat)\nobservedStates = 5*(hi // 3) + (hi % 3) + 2\n\nk = 40\nH = obs.MatrixObservationOp(np.eye(n))\nobservedStates = np.arange(0, k)\n\n\n\n# Initial error covariance\nsig_clim = 3.6414723\nP0 = cov.DiagonalCovariance(np.ones(n) * (0.5*sig_clim)**2)\n\n# Model error covariance\nQ = cov.DiagonalCovariance(np.ones(n) * (0.05*sig_clim)**2)\n\n# Observation error covariance\nR = cov.DiagonalCovariance(np.ones(k) * (0.15*sig_clim)**2)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Generate synthetic data for the twin experiment\n# ----------------------------------------------------------------------------------------------------------------------\n\nprint(\"Generating true state and observations...\")\n\ndt = 0.025 / 6.0 # Model integration time step (equivalent to 30min)\nn_steps_in_day = 48\nn_steps = 1000\n\nx0 = np.ones(n) * 8.0 # Initial state\nx0[20] = 8.004 # Perturb 20-th coordinate\n\nxt, yobs = make_data(model_fn=model.__call__, x0=x0, dt=dt, H=H, Q=Q, R=R, nsteps=n_steps, nspin=0)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Ready to run...\n# ----------------------------------------------------------------------------------------------------------------------\n\nxall = np.zeros((len(enkfs)+1, n_steps, n))\nrmse = np.zeros((len(enkfs)+1, n_steps))\n\n# Called when a smoother solution is ready. Here we will just store the result and RMSE to be plotted later\ndef on_result(x, A, t, args):\n kfi, is_ensemble = args\n xall[kfi, t, :] = x\n rmse[kfi, t] = math.sqrt(np.mean((x - xt[:, t])**2))\n\n\n# Generate the initial system state and ensemble that all algorithms will start from. The initial guess is\n# purposefully not very good\nx0 = np.ones(n)\nA0 = ensemble.generate(N, x0, P0)\n\n# Run all Ensemble Kalman Filters/Smoothers\n\nfor kfi, (name, enkf) in enumerate(enkfs):\n print(\"Running {}...\".format(name))\n\n # Avoid non-deterministic output for testing\n np.random.seed(1234)\n\n A = np.copy(A0)\n # Before the time stepping loop starts, let the smother know the initial system state. This way we will get\n # a smoother solution for it as well.\n enkf.smoother_begin(A, 0)\n\n # THIS IS THE MAIN \"SIMULATION\" TIME-STEPPING LOOP\n for t in range(1, n_steps):\n\n # Integrate the system state forward using the model. In this synthetic example we will use the forecast()\n # method to simplify the code\n A = enkf.forecast(model.__call__, A, Q, dt)\n\n # Assimilate observations. Please note that we need to call begin_analysis() and end_analysis() even if\n # we do not have any observations. If we didn't, we would not get the smoother solution for these time steps.\n enkf.begin_analysis(A, t)\n\n if t % obs_skip == 0: enkf.assimilate(z=yobs[:, t], H=H, R=R, z_coords=observedStates)\n\n A = enkf.end_analysis(on_smoother_result=on_result, result_args=(kfi, True))\n\n # Time-stepping loop completed. Call finish() to get any pending smoother solutions. Please note that this is\n # not needed if only the filtering solution is of interest\n enkf.smoother_finish(on_smoother_result=on_result, result_args=(kfi, True))\n\n\n# Do an exact Kalman Filter/Smoother run for reference. The API is similar to the ensemble smoothing API, see the\n# comments above for explanation\n\nprint(\"Running KF...\")\nnp.random.seed(1234)\n\nx = np.copy(x0)\nP = P0.to_matrix(force_dense=True)\n\nkf.smoother_begin(x, P, 0)\n\n# THIS IS THE MAIN \"SIMULATION\" TIME-STEPPING LOOP\nfor t in range(1, n_steps):\n x, P = kf.forecast(x, P, Q, dt)\n\n kf.begin_analysis(x, P, t)\n if t % obs_skip == 0: kf.assimilate(z=yobs[:, t], H=H, R=R)\n x, P = kf.end_analysis(on_smoother_result=on_result, result_args=(-1, False))\n\nkf.smoother_finish(on_smoother_result=on_result, result_args=(-1, False))\n\n\n\n\nprint(\"Running REFERENCE...\")\n\nnp.random.seed(1234)\n\nxall2 = np.zeros((n_steps, n))\nrmse2 = np.zeros((n_steps, n))\n\nx = np.copy(x0)\nP = P0.to_matrix(force_dense=True)\nQQ = Q.to_matrix(force_dense=True)\n\nHH = H.to_matrix(force_dense=True)\n\nfor t in range(1, n_steps):\n\n # Forecast\n trj = model(x, dt)\n P = model.dot(trj, P)\n P = model.adjdot(trj, P)\n if Q is not None: P += QQ\n\n\n # Update\n if t % obs_skip == 0:\n F = HH.dot(P).dot(HH.T)\n\n if isinstance(R, np.ndarray):\n np.add(F, R, out=F)\n elif isinstance(R, cov.CovarianceOperator):\n R.add_to(F)\n\n z = yobs[:, t]\n\n # State update as xk + Cp*H'*F^-1*dz\n dz = z - HH.dot(x)\n x += P.dot(HH.T).dot(linalg.solve(F, dz, sym_pos=True, overwrite_b=True))\n #self._xa = self._xa.ravel()\n\n # Covariance estimate update as Cp - Cp*H'*F^-1*H*Cp\n P -= P.dot(HH.T).dot(linalg.solve(F, HH.dot(P), sym_pos=True, overwrite_a=True, overwrite_b=True))\n\n xall2[t, :] = x\n rmse2[t, :] = np.diagonal(P).ravel()\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Data assimilation completed, print RMSEs and plots\n# ----------------------------------------------------------------------------------------------------------------------\n\nprint(\"Done\")\n\n#RMSEskipStart = 1000\n#for i in range(len(filtersToRun)):\n# logging.info(\"RMSE %s : %.4f\", filtersToRun[i].name, np.mean(rmse[RMSEskipStart:,i]))\n\nprint(\"Plotting...\")\n\nline_styles = ['b-', 'g-', 'm-', 'c-', 'y-']\n\nOBS2PLOT = 17\nX2PLOT = observedStates[OBS2PLOT]\n\n\n# Plot the trajectory of the 29-th state variable for the entire DA run\nfig = plt.figure()\nplt.plot(range(n_steps), xt[X2PLOT, 0:n_steps], 'k--', linewidth=1.3, alpha=0.5, label=\"truth\")\n\nfor kfi, (name, enkf) in enumerate(enkfs):\n plt.plot(range(n_steps), xall[kfi, 0:n_steps, X2PLOT], line_styles[kfi], linewidth=1.0, label=name)\n #plt.plot(range(n_steps), rmse[kfi, 0:n_steps, X2PLOT], 'g-', linewidth=1.0, label=name + ' rmse')\n\nplt.plot(range(n_steps), xall[-1, 0:n_steps, X2PLOT], 'k-', linewidth=1.0, label='KF')\n\n\nplt.plot(range(n_steps), xall2[0:n_steps, X2PLOT], 'r--', linewidth=1.0, label='REFERENCE')\n#plt.plot(range(n_steps), rmse2[0:n_steps, X2PLOT], 'r:', linewidth=1.0, label='REFERENCE rmse')\n\nobs_t = np.arange(1, n_steps, obs_skip)\nplt.plot(obs_t, yobs[OBS2PLOT, obs_t], 'kx', markersize=3)\n\nplt.legend()\nplt.grid(True)\nplt.ylabel(\"x29\")\nplt.xlabel(\"t\")\nplt.show()\n\n# RMSE plot\n# fig = plt.figure()\n#\n# for kfi, (name, kf) in enumerate(kf_to_run):\n# plt.plot(range(n_steps), rmse[kfi, 0:n_steps], line_styles[kfi], linewidth=0.8, label=name)\n#\n# plt.legend()\n# plt.grid(True)\n# plt.ylabel(\"RMSE\")\n# plt.xlabel(\"t\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.use", "numpy.add", "numpy.zeros", "numpy.random.seed", "matplotlib.pyplot.grid", "numpy.copy", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure", "numpy.eye", "numpy.mean", "numpy.diagonal", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "scipy.linalg.solve" ] ]
cagdemir/portfolio-performance-evaluation-module
[ "8a04363c699b05365c403293e80005bb18f50a34" ]
[ "perf_metrics_calculator.py" ]
[ "\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\n#import matplotlib.patheffects as path_effects\nimport seaborn as sb\nimport numpy as np\nimport pandas as pd\nfrom tabulate import tabulate\n\n\n#measuring performance metrics quarterly, yearly, and overall\n\n#metrics 1. return - OK\n# 1.0. total return - OK\n# 1.1. cagr - OK\n# 2. volatility - OK \n# 3. sharpe - OK\n# 4. sortino -OK\n# 5. risk return ratio - OK\n# 6. treynor - OK\n# 6. max dd - OK\n# 7. max dd length - OK\n# 8. market beta - OK\n# 9. alpha - OK\n# 9.0 alpha raw - OK\n# 9.1 alpha CAPM - OK\n# 10. omega - OK\n# 12. VaR - OK\n# 13. max single period return - OK\n# 14. min single period ret - OK\n# 15. skewness - OK\n# 16. kurtosis - OK\n# 17. CDD (Conditional Draw Down): average of max 20% drawdowns - OK\n# 18. CDD Duration - OK\n#table - OK\n#plots 1. compounded return\n# 2. returns\n# 3. underwater\n# 4. heatmap\n# 5. annual return\n#\n\n\ndef performance_metrics(series_in, market=None, rf=None, target=None, freq='M', table=False, plots=False):\n \n # series_in is the pnl series of the asset or the strategy \n \n periods = series_in.shape[0]# total length of the return series that feed in\n if freq=='Y':\n unit_period = 1\n elif freq=='M':\n unit_period = 12\n elif freq=='W':\n unit_period = 52\n elif freq=='D':\n unit_period = 252\n else:\n print('Please check freq argument!')\n return np.nan\n \n series = series_in.copy()\n idx = series_in.index\n \n if rf is None:\n print('rf is assumed as 0!') \n series_excess = series.copy()\n elif type(rf)==int or type(rf)==float:\n print('rf converted to unit period in a non-compounding way')\n series_excess = series - rf/unit_period\n else:\n series_excess = series - rf\n \n series_compounded = (series+1).cumprod()\n series_excess_compounded = (series_excess+1).cumprod()\n \n ret_compounded = series_compounded.iloc[-1] - 1\n ret_excess_compounded = series_excess_compounded.iloc[-1] - 1\n cagr = (ret_compounded+1) ** (unit_period/periods) - 1\n \n volatility = series.std() * unit_period**.5\n series_negative = series.copy()\n series_negative[series_negative>0] = 0\n volatility_negative = series_negative.std() * unit_period**.5\n \n sharpe = cagr / volatility\n \n # sortinoe, ref: http://www.sunrisecapital.com/wp-content/uploads/2014/06/Futures_Mag_Sortino_0213.pdf\n sortino = cagr / volatility_negative\n \n # max dd\n \n max_dd_all = (series_compounded / series_compounded.cummax() )\n max_dd = max_dd_all.min()-1\n \n # max_dd duration\n \n max_dddur_all = max_dd_all.copy()\n max_dddur_all[max_dddur_all<1] = 0\n max_dddur_all_cumsum = max_dddur_all.cumsum()\n max_dddur_all = max_dddur_all_cumsum.value_counts()\n max_dddur = max_dddur_all.max() # this is in terms of unit period\n\n # risk return ratio [similar ratios; calmar, mar, sterling, burke... etc.]\n \n risk_return = cagr / (-max_dd)\n \n # Conditional drawdown \n condition = .2\n n = int(np.round((max_dddur_all[max_dddur_all>1].shape[0]*condition)))\n conditional_dd = max_dddur_all_cumsum.groupby(max_dddur_all_cumsum).apply(lambda x: max_dd_all.loc[x.index].min()).sort_values().iloc[:n].mean() - 1\n #conditional_dd = 5\n # CDD duration\n \n conditional_dd_dur = max_dddur_all.iloc[:n].mean()\n \n # alpha and beta\n \n def alpha_beta(return_series, market):\n\n X = market.values.reshape(-1, 1)\n X = np.concatenate([np.ones_like(X), X], axis=1)\n b = np.linalg.pinv(X.T.dot(X)).dot(X.T).dot(return_series.values)\n return b[0], b[1]\n \n if market is None:\n alpha_raw = ret_compounded\n alpha = np.nan\n beta = np.nan\n else:\n alpha,beta = alpha_beta(series_excess, market)\n alpha_raw = ret_compounded -((market +1).cumprod().iloc[-1]-1)\n \n # treynor ratio\n \n if market is None:\n treynor = np.nan\n else:\n treynor = cagr / beta\n \n # max-min single\n \n max_single = series_in.max()\n min_single = series_in.min()\n \n # skewness -kurt\n \n skewness = series_in.skew()\n kurt = series_in.kurt()\n \n # Var\n \n VaR = series_in.quantile(.05)\n \n #omega ratio\n \n omega = cagr / (-series_negative.mean()) # need to be checked\n \n \n metrics_names = ['Compounded Total Return', 'Compounded Excess Return', 'CAGR',\n 'Annualized Volatility', 'Annualized Negative Volatility', 'Sharpe', 'Sortino',\n 'Treynor', 'Omega', 'Risk-Return', 'alpha Raw', 'alpha',\n 'beta', 'Max Drawdown', 'Conditional Drawdown (Highest 20%)',\n 'Max Drawdown Duration', 'Conditional Drawdown Duration (Longest 20%)',\n 'Maximum Single Period Return', 'Minimum Single Period Return', 'VaR (5%)', \n 'Skewness', 'Kurtosis']\n \n metrics_values = [ret_compounded, ret_excess_compounded, cagr, volatility,\n volatility_negative, sharpe, sortino, treynor, omega, \n risk_return, alpha_raw, alpha, beta, max_dd, conditional_dd,\n max_dddur, conditional_dd_dur, max_single, min_single, VaR,\n skewness, kurt]\n \n dict_table = dict(zip(metrics_names, metrics_values))\n \n \n#----------------------------------------------------------------------------------------------------- \n \n if table:\n print(tabulate(zip(metrics_names, metrics_values), headers=['Metrics', 'Value'], tablefmt=\"fancy_grid\", floatfmt=\".4f\"))\n\n#-----------------------------------------------------------------------------------------------------\n \n if plots:\n \n #-----------------------------------------------------------------------------------------------------\n\n# # plotting compounded returns\n# plt.figure()\n# series_compounded.plot(color='red', linewidth=1)\n# #plt.plot(series_compounded)\n# plt.fill_between(series_compounded.index,series_compounded, 1)\n# plt.ylabel(\"Compounded Returns\")\n# plt.xlabel(\"Date\")\n# plt.title(\"Portfolio in Time\");\n# plt.grid(color='black', linestyle='--', linewidth=0.5)\n \n #-----------------------------------------------------------------------------------------------------\n \n # plotting raw returns\n plt.figure()\n plt.plot(series_in.index,series_in,color='blue',linewidth=0.5)\n plt.axhline(y=series_in.mean(), color='red', linewidth=1,linestyle='--')\n plt.ylabel(\"Return\")\n plt.xlabel(\"Date\")\n plt.title('Raw Return')\n plt.grid(color='black', linestyle='--', linewidth=0.5)\n \n #-----------------------------------------------------------------------------------------------------\n \n # plotting underwater figure\n \n plt.figure()\n plt.plot(max_dd_all.index,max_dd_all,color='red',linewidth=0.2)\n plt.fill_between(max_dd_all.index, max_dd_all,1)\n plt.ylabel(\"Return\")\n plt.xlabel(\"Date\")\n plt.title(\"Underwater graph of highest 5 drawdown\");\n plt.grid(color='black', linestyle='--', linewidth=0.5)\n plt.show()\n \n #-----------------------------------------------------------------------------------------------------\n \n # plotting conditional max dd areas\n \n plt.figure()\n list_color=['red','blue','black','green','orange']\n cap_dd_toPlot = 5\n n_dd_toPlot = min(len(max_dddur_all),cap_dd_toPlot)\n \n for i in range(n_dd_toPlot):\n \n start = max_dddur_all_cumsum[(max_dddur_all_cumsum==max_dddur_all.index[i])].index[0]\n stop = max_dddur_all_cumsum[(max_dddur_all_cumsum==max_dddur_all.index[i])].index[-1]\n \n #plt.plot(series_compounded)\n plt.axvspan(start,stop, alpha=0.3, color=list_color[i])\n \n plt.plot(series_compounded)\n plt.show()\n \n \n #-----------------------------------------------------------------------------------------------------\n \n # plotting returns\n fig, ax = plt.subplots()\n ax= sb.boxplot(saturation=5, fliersize=5,width=0.75,data=series,whis=1)\n ax = sb.swarmplot(data=series, color=\".25\")\n ax.set(xlabel='Date', ylabel='Return')\n plt.show()\n \n #-----------------------------------------------------------------------------------------------------\n \n # plotting heat map and annual returns\n \n if not freq=='Y':\n \n plt.figure()\n \n years = idx.year.unique()\n \n if freq=='M':\n secondary_period = idx.month.unique().sort_values()\n \n elif freq=='W':\n \n secondary_period_end = series_in.groupby(pd.Grouper(freq='A')).apply(lambda x: x.index.week.unique().shape[0]).max()#range(53)\n secondary_period = range(0,secondary_period_end)\n \n elif freq=='D':\n\n secondary_period_end = max(series_in.groupby(pd.Grouper(freq='A')).apply(lambda x: x.shape[0]).max(),252)#idx.day.unique().sort_values()\n secondary_period = range(0,secondary_period_end)\n \n \n series_grouped = series_in.groupby(series_in.index.year)\n \n ret_perPeriod = pd.concat([series_grouped.get_group(i).reset_index(drop=True) for i in years], axis=1).T\n ret_perPeriod.iloc[0]=ret_perPeriod.iloc[0].shift(ret_perPeriod.iloc[0].isna().sum()) #aligning the nan's as for the first year\n ret_perPeriod.index = years\n ret_perPeriod.columns = secondary_period\n \n plt.ylabel('Date')\n plt.xlabel('Month')\n plt.title('Return')\n #heat_map = \n sb.heatmap(ret_perPeriod,cbar_kws={'label': 'Colorbar', 'orientation': 'horizontal'}) # ,annot=True,) \n \n plt.show()\n \n # plot annualized\n \n annualized_perPeriod=(ret_perPeriod.T.replace(np.nan,0)+1).cumprod().iloc[-1,:]-1\n \n fig, ax = plt.subplots()\n y_pos = np.arange(len(annualized_perPeriod))\n \n ax.barh(y_pos,annualized_perPeriod*100, align='center',alpha=0.6)\n ax.set_yticks(y_pos)\n ax.set_yticklabels(years)\n ax.invert_yaxis() # labels read top-to-bottom\n ax.set_xlabel('Return % ')\n ax.set_title('Annual Return')\n \n plt.show() \n \n elif freq == 'Y':\n \n years = idx.year\n \n fig, ax = plt.subplots()\n y_pos = np.arange(len(series))\n \n ax.barh(y_pos,series*100, align='center',alpha=0.6)\n ax.set_yticks(y_pos)\n ax.set_yticklabels(years)\n ax.invert_yaxis() # labels read top-to-bottom\n ax.set_xlabel('Return % ')\n ax.set_title('Annual Return')\n \n plt.show()\n \n\n return dict_table\n" ]
[ [ "matplotlib.pyplot.axvspan", "numpy.ones_like", "numpy.round", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "pandas.Grouper", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show" ] ]
FDR0903/mimicLOB
[ "0ef79a9a7129fa63a25423e29aba249594ec23b5" ]
[ "mimicLOB/orderbook/orderbook.py" ]
[ "import sys\nimport math\nfrom collections import deque # a faster insert/pop queue\nfrom six.moves import cStringIO as StringIO\nfrom decimal import Decimal\nimport numpy as np\nfrom .ordertree import OrderTree\nimport pandas as pd\nimport time\nfrom .orderlist import OrderList\nimport requests\n\n\nclass OrderBook(object):\n def __init__(self, **kwargs):\n \n # Defaulkt values if none given\n tick_size = kwargs['tick_size'] if 'tick_size' in kwargs else Decimal(1)\n b_tape = kwargs['b_tape'] if 'b_tape' in kwargs else True\n b_tape_LOB = kwargs['b_tape_LOB'] if 'b_tape_LOB' in kwargs else True\n verbose = kwargs['verbose'] if 'verbose' in kwargs else True\n \n # LOB running on a server ?\n self._distant = kwargs['distant'] if 'distant' in kwargs else False\n\n # Basic properties of the LOB\n self._name = 'LOB'\n self._tick_size = Decimal(tick_size)\n\n # Tapes\n self.tape = deque(maxlen=None) # Transaction tape, Index[0] is most recent trade\n self._LOBtape = deque(maxlen=None) # lob tape, is most recent trade\n\n self._pricetape = []\n self._qttytape = []\n self._tstape = []\n\n # Trees of asks & bids\n self._bids = OrderTree()\n self._asks = OrderTree()\n\n # Current values\n self._lastTradePrice = 0\n self._lastTradeSign = 'bid'\n self._last_tick = None\n self._last_timestamp = 0\n self._time = 0\n self._last_order_timestamp = 0\n \n\n # Agent list : agents in this list are notified when trades are executed\n # if the lob is running on a server, this will contain agents' server\n # fix servers addresses\n self._agentList = {}\n\n # Countings\n self._next_order_id = 0\n self._maxEntries = 15 #number of ticks stored in the LOB tape\n \n # Verbose\n self._verbose = verbose\n\n # booleans\n self._b_tape = b_tape # tape transactions\n self._b_tape_LOB = b_tape_LOB # requires lot of memory and time consumption\n self._b_auction = kwargs['b_auction'] if 'b_auction' in kwargs else False # Specifies if the LOB i s in auction mode\n\n # method of order processing\n if self.b_auction:\n self.process_order = self.process_order_during_auction\n else:\n self.process_order = self.process_order_during_continuous_trading\n\n\n#########################################\n# Accessors, Setter, and Destructors\n#########################################\n @property\n def b_auction(self):\n return self._b_auction\n @property\n def maxEntries(self):\n return self._maxEntries\n @property\n def LOBtape(self):\n return self._LOBtape\n @property\n def b_tape_LOB(self):\n return self._b_tape_LOB\n @property\n def verbose(self):\n return self._verbose\n @property\n def b_tape(self):\n return self._b_tape\n @property\n def agentList(self):\n return self._agentList\n @property\n def pricetape(self):\n return self._pricetape\n @property\n def qttytape(self):\n return self._qttytape\n @property\n def tstape(self):\n return self._tstape\n @property\n def lastTradePrice(self):\n return self._lastTradePrice\n @property\n def lastTradeSign(self):\n return self._lastTradeSign\n @property\n def name(self):\n return self._name\n @property\n def bids(self):\n return self._bids\n @property\n def asks(self):\n return self._asks\n @property\n def last_tick(self):\n return self._last_tick\n @property\n def last_timestamp(self):\n return self._last_timestamp\n @property\n def tick_size(self):\n return self._tick_size\n @property\n def next_order_id(self):\n self._next_order_id += 1\n return self._next_order_id\n @property\n def time(self):\n # self._time = time.time_ns()\n self._time = self._last_order_timestamp + 1\n # print('j ai fait appel ร  time')\n return self._time\n @property\n def last_order_timestamp(self):\n return self._last_order_timestamp\n\n @last_order_timestamp.setter\n def last_order_timestamp(self, last_order_timestamp):\n self._last_order_timestamp = last_order_timestamp\n @maxEntries.setter\n def maxEntries(self, maxEntries):\n self._maxEntries = maxEntries\n @LOBtape.setter\n def LOBtape(self, LOBtape):\n self._LOBtape = LOBtape\n @verbose.setter\n def verbose(self, verbose):\n self._verbose = verbose\n @pricetape.setter\n def pricetape(self, pricetape):\n self._pricetape = pricetape\n @qttytape.setter\n def qttytape(self, qttytape):\n self._qttytape = qttytape\n @tstape.setter\n def tstape(self, tstape):\n self._tstape = tstape\n @tick_size.setter\n def tick_size(self, tick_size):\n self._tick_size = Decimal(tick_size)\n @lastTradePrice.setter\n def lastTradePrice(self, lastTradePrice):\n self._lastTradePrice = lastTradePrice\n @lastTradeSign.setter\n def lastTradeSign(self, lastTradeSign):\n self._lastTradeSign = lastTradeSign\n @last_tick.setter\n def last_tick(self, last_tick):\n self._last_tick = last_tick\n @last_timestamp.setter\n def last_timestamp(self, last_timestamp):\n self._last_timestamp = last_timestamp\n @bids.setter\n def bids(self, bids):\n self._bids = bids\n @asks.setter\n def asks(self, asks):\n self._asks = asks\n @b_auction.setter\n def b_auction(self, b_auction):\n ######################################################\n # END OF AUCTION ALGORITHM\n # it means we went from auction to no auction. \n # A price should be decided according to walras equilibrium : \n # the price that maximizes the offer and demand\n ###############################################\n if self.b_auction and not b_auction:\n # find the best price and execute all orders\n bidPrices = pd.DataFrame([[price, self.bids.price_map[price].volume] for price in list(self.bids.prices)]).set_index(0)\n askPrices = pd.DataFrame([[price, self.asks.price_map[price].volume] for price in list(self.asks.prices)]).set_index(0)\n bidPrices.columns = ['bids']\n askPrices.columns = ['asks']\n\n # sort prices\n askPrices.sort_index(inplace=True)\n bidPrices.sort_index(ascending=False, inplace=False)\n\n # get cumulative volumes (cours C.A Lehalle)\n bidPrices['cum bids'] = bidPrices.loc[::-1, 'bids'].cumsum()[::-1]\n askPrices['cum asks'] = askPrices.cumsum()\n\n # Get the price that maximizes the exchanged volume.\n walras_df = pd.concat([bidPrices, askPrices], axis=1, sort=True).fillna(method='ffill').fillna(method='bfill')\n walras_df2 = walras_df[['cum bids', 'cum asks']].min(axis=1)[::-1]\n execPrice = walras_df2.idxmax()\n\n if self.verbose:\n print(f'\\n*** END OF AUCTION ***\\n Best price : {execPrice}')\n\n # We execute all bids > execPrice or asks < execPrice.\n self.process_order = self.process_order_during_continuous_trading\n old_b_tape_LOB = self.b_tape_LOB\n self._b_tape_LOB = False\n\n # First change the prices & re-order orders of the orderlist\n # according to timestamp\n # if not execPrice in self.bids.prices:\n # self.bids.create_price(execPrice)\n # if not execPrice in self.asks.prices:\n # self.asks.create_price(execPrice)\n for price in list(self.bids.prices): \n if price > execPrice:\n if self.verbose:\n print(f'\\n*** Removing bid price : {price}. Quantity : {self.bids.price_map[price].volume} ***')\n\n # loop in orders of this order list, add them to the \n for order in self.bids.price_map[price]: \n order.price = execPrice\n self.bids.move_order_with_time(order)\n # execPrice_bid_orderlist.append_order_with_time(order)\n\n self.bids.remove_price(price)\n \n\n # print(self.asks.price_map[execPrice])\n for price in list(self.asks.prices): \n if price < execPrice:\n # loop in orders of this order list, add them to the \n if self.verbose:\n print(f'\\n*** Removing ask price : {price}. Quantity : {self.asks.price_map[price].volume} ***')\n for order in self.asks.price_map[price]: \n order.price = execPrice\n # update bids\n self.asks.move_order_with_time(order)\n\n self.asks.remove_price(price)\n\n \n execPrice_bid_orderlist = self.bids.price_map[execPrice]\n execPrice_ask_orderlist = self.asks.price_map[execPrice]\n\n if walras_df.loc[execPrice, 'cum asks']>walras_df.loc[execPrice, 'cum bids']: \n #execute bids\n if self.verbose:\n print(f'\\n*** Executing all bids @{execPrice} ***')\n \n\n for order in execPrice_bid_orderlist:\n # TODO : Should handle the timestamp (not self.time automatically)\n quote = {'order_id' : order.order_id,\n 'trader_id': order.trader_id}\n quantity_to_trade, new_trades = self.process_order_list('ask', \n execPrice_ask_orderlist, \n order.quantity,\n quote)\n self.bids.remove_price(execPrice)\n else:\n print(self.bids.price_map[execPrice])\n print(self.asks.price_map[execPrice])\n \n #execute asks\n if self.verbose:\n print(f'\\n*** Executing all asks @{execPrice} ***')\n for order in execPrice_ask_orderlist:\n quantity_to_trade, new_trades = self.process_order_list('bid', \n execPrice_bid_orderlist, \n order.quantity,\n {'order_id' : order.order_id,\n 'trader_id': order.trader_id})\n self.asks.remove_price(execPrice)\n self._b_tape_LOB = old_b_tape_LOB\n\n # it means we stop live trading, and all orders are just\n # added to the LOB without execution.\n # elif not self.b_auction and b_auction:\n self._b_auction = b_auction\n\n if self.b_auction:\n self.process_order = self.process_order_during_auction\n else:\n self.process_order = self.process_order_during_continuous_trading\n\n def addAgent(self, agent):\n if type(agent)==dict:\n self.agentList[agent['id']] = agent['address'] #fix address\n else:\n self.agentList[agent.id] = agent\n\n def removeAgent(self, agent):\n del self.agentList[agent.id]\n\n def resetLOB(self):\n self._bids = OrderTree()\n self._asks = OrderTree()\n\n def reset(self):\n self.tape = deque(maxlen=None) \n self.pricetape = []\n self.qttytape = []\n self.tstape = []\n self.bids = OrderTree()\n self.asks = OrderTree()\n self._agentList = {}\n\n#########################################\n# Order Processin Methods \n#########################################\n def notify_cancelation(self, side, trader_id, order_id):\n if trader_id in self.agentList:\n if self._distant: # ping the agent's fix server\n params = {'side' : side,\n 'order_id' : order_id}\n return requests.get(f\"{self.agentList[trader_id]}/notify_order_cancelation\",\n json=params).json()\n else:\n self.agentList[trader_id].notify_order_cancelation(side, order_id)\n\n def notify_modification(self, order_update):\n trader_id = order_update['trader_id']\n if trader_id in self.agentList:\n if self._distant: # ping the agent's fix server\n return requests.get(f\"{self.agentList[trader_id]}/notify_order_modification\",\n json=order_update).json()\n else:\n self.agentList[trader_id].notify_order_modification(order_update)\n\n def notify_agents(self, trades, order_in_book):\n if order_in_book:\n if order_in_book['trader_id'] in self.agentList:\n if self._distant: # ping the agent's fix server\n trader_id = order_in_book['trader_id']\n return requests.get(f\"{self.agentList[trader_id]}/notify_orders_in_book\",\n json=order_in_book).json()\n else:\n self.agentList[order_in_book['trader_id']].notify_orders_in_book(order_in_book)\n if trades:\n for trade in trades:\n if trade['party1_id'] in self.agentList:\n if self._distant: # ping the agent's fix server\n trader_id = trade['party1_id']\n params = {'trade' : trade,\n 'check_pending' : True}\n return requests.get(f\"{self.agentList[trader_id]}/notify_trades\",\n json=params).json()\n else:\n self.agentList[trade['party1_id']].notify_trades(trade)\n\n if trade['party2_id'] in self.agentList:\n if self._distant: # ping the agent's fix server\n trader_id = trade['party2_id']\n params = {'trade' : trade,\n 'check_pending' : False}\n return requests.get(f\"{self.agentList[trader_id]}/notify_trades\",\n json=params).json()\n else:\n self.agentList[trade['party2_id']].notify_trades(trade, False)\n\n\n def process_order_during_auction(self, quote):\n order_type = quote['type']\n order_in_book = None\n if 'timestamp' not in quote:\n quote['timestamp'] = self.time\n if not 'order_id' in quote:\n quote['order_id'] = self.next_order_id\n\n self._last_order_timestamp = quote['timestamp']\n\n if quote['quantity'] <= 0:\n sys.exit('process_order() given order of quantity <= 0')\n\n # no market order during auctions\n if order_type == 'market':\n # Tape LOB state before processing order : \n if self.b_tape_LOB:\n self.LOBtape.append(self.getCurrentLOB('market', 'MO', quote))\n \n if self.verbose:\n print(f'\\n**** Error : **** \\n: Market Order during auction mode {str(quote)}')\n\n elif order_type == 'limit':\n quote['price'] = Decimal(quote['price'])\n side = quote['side']\n\n # Tape LOB state before processing order : \n if self.b_tape_LOB:\n self.LOBtape.append(self.getCurrentLOB('limit', 'LO', quote))\n \n if side=='bid':\n if not 'order_id' in quote:\n quote['order_id'] = self.next_order_id\n self.bids.insert_order(quote)\n order_in_book = quote\n elif side=='ask':\n if not 'order_id' in quote:\n quote['order_id'] = self.next_order_id\n self.asks.insert_order(quote)\n order_in_book = quote\n else:\n sys.exit('process_limit_order() given neither \"bid\" nor \"ask\"')\n else:\n sys.exit(\"order_type for process_order() is neither 'market' or 'limit'\")\n \n self.notify_agents(None, order_in_book)\n return None, order_in_book\n\n def process_order_during_continuous_trading(self, quote):\n order_type = quote['type']\n order_in_book = None\n if not 'timestamp' in quote:\n quote['timestamp'] = self.time\n if not 'order_id' in quote:\n quote['order_id'] = self.next_order_id\n\n self._last_order_timestamp = quote['timestamp']\n if quote['quantity'] <= 0:\n sys.exit('process_order() given order of quantity <= 0')\n\n if order_type == 'market':\n # Tape LOB state before processing order : \n if self.b_tape_LOB:\n self.LOBtape.append(self.getCurrentLOB('market', 'MO', quote))\n\n trades = self.process_market_order(quote)\n\n elif order_type == 'limit':\n quote['price'] = Decimal(quote['price'])\n\n # Tape LOB state before processing order : \n if self.b_tape_LOB:\n self.LOBtape.append(self.getCurrentLOB('limit', 'LO', quote))\n\n trades, order_in_book = self.process_limit_order(quote)\n else:\n sys.exit(\"order_type for process_order() is neither 'market' or 'limit'\")\n \n self.notify_agents(trades, order_in_book)\n return trades, order_in_book\n\n def process_order_list(self, side, order_list, quantity_still_to_trade, quote):\n '''\n Takes an OrderList (stack of orders at one price) and an incoming order and matches\n appropriate trades given the order's quantity.\n '''\n trades = []\n quantity_to_trade = quantity_still_to_trade\n while len(order_list) > 0 and quantity_to_trade > 0:\n head_order = order_list.get_head_order()\n traded_price = head_order.price\n counter_party = head_order.trader_id\n new_book_quantity = None\n if quantity_to_trade < head_order.quantity:\n traded_quantity = quantity_to_trade\n # Do the transaction\n new_book_quantity = head_order.quantity - quantity_to_trade\n head_order.update_quantity(new_book_quantity, head_order.timestamp)\n quantity_to_trade = 0\n elif quantity_to_trade == head_order.quantity:\n traded_quantity = quantity_to_trade\n if side == 'bid':\n self.bids.remove_order_by_id(head_order.order_id)\n else:\n self.asks.remove_order_by_id(head_order.order_id)\n quantity_to_trade = 0\n else: # quantity to trade is larger than the head order\n traded_quantity = head_order.quantity\n if side == 'bid':\n self.bids.remove_order_by_id(head_order.order_id)\n else:\n self.asks.remove_order_by_id(head_order.order_id)\n quantity_to_trade -= traded_quantity\n\n transaction_record = {\n # 'timestamp': self.time,\n 'traded_price': traded_price,\n 'traded_quantity': traded_quantity,\n 'time': quote['timestamp'] if 'timestamp' in quote else self.time\n }\n\n party2_orderid=quote['order_id'] if 'order_id' in quote else None \n\n if side == 'bid': \n transaction_record['party1_id'] = counter_party\n transaction_record['party1_side'] = 'bid'\n transaction_record['party1_order_id'] = head_order.order_id\n\n # transaction_record['party1_newbookquantity'] = [counter_party, 'bid', head_order.order_id, new_book_quantity]\n transaction_record['party2_id'] = quote['trader_id']\n transaction_record['party2_side'] = 'ask'\n transaction_record['party2_order_id'] = party2_orderid # None means that the sender of order is party 2\n self.lastTradeSign = 'ask'\n else:\n transaction_record['party1_id'] = counter_party\n transaction_record['party1_side'] = 'ask'\n transaction_record['party1_order_id'] = head_order.order_id\n\n transaction_record['party2_id'] = quote['trader_id']\n transaction_record['party2_side'] = 'ask'\n transaction_record['party2_order_id'] = party2_orderid # None means that the sender of order is party 2\n self.lastTradeSign = 'bid'\n\n if self.verbose:\n print(f'\\n**** New Trade : **** \\n: {str(transaction_record)}')\n\n self.lastTradePrice = traded_price \n if self.b_tape: \n self.tape.append(transaction_record)\n\n #FDR\n self.tstape += [quote['timestamp'] if 'timestamp' in quote else self.time]\n self.pricetape += [traded_price]\n self.qttytape += [traded_quantity]\n \n trades.append(transaction_record)\n\n return quantity_to_trade, trades\n \n def process_market_order(self, quote):\n if self.verbose:\n print(f'\\n**** I received this market order **** \\n: {str(quote)}')\n\n trades = []\n quantity_to_trade = quote['quantity']\n side = quote['side']\n if side == 'bid':\n while quantity_to_trade > 0 and self.asks:\n best_price_asks = self.asks.min_price_list()\n quantity_to_trade, new_trades = self.process_order_list('ask', best_price_asks, quantity_to_trade, quote)\n trades += new_trades\n elif side == 'ask':\n while quantity_to_trade > 0 and self.bids:\n best_price_bids = self.bids.max_price_list()\n quantity_to_trade, new_trades = self.process_order_list('bid', best_price_bids, quantity_to_trade, quote)\n trades += new_trades\n else:\n sys.exit('process_market_order() recieved neither \"bid\" nor \"ask\"')\n return trades\n\n def process_limit_order(self, quote):\n if self.verbose:\n print(f'\\n**** I received this limit order **** \\n: {str(quote)}')\n\n order_in_book = None\n trades = []\n quantity_to_trade = quote['quantity']\n side = quote['side']\n price = quote['price']\n if side == 'bid':\n while (self.asks and price >= self.asks.min_price() and quantity_to_trade > 0):\n best_price_asks = self.asks.min_price_list()\n quantity_to_trade, new_trades = self.process_order_list('ask', best_price_asks, quantity_to_trade, quote)\n trades += new_trades\n # If volume remains, need to update the book with new quantity\n if quantity_to_trade > 0:\n quote['quantity'] = quantity_to_trade\n self.bids.insert_order(quote)\n order_in_book = quote\n elif side == 'ask':\n while (self.bids and price <= self.bids.max_price() and quantity_to_trade > 0):\n best_price_bids = self.bids.max_price_list()\n quantity_to_trade, new_trades = self.process_order_list('bid', best_price_bids, quantity_to_trade, quote)\n trades += new_trades\n # If volume remains, need to update the book with new quantity\n if quantity_to_trade > 0:\n quote['quantity'] = quantity_to_trade\n self.asks.insert_order(quote)\n order_in_book = quote\n else:\n sys.exit('process_limit_order() given neither \"bid\" nor \"ask\"')\n return trades, order_in_book\n\n def cancel_order(self, side, order_id):\n if self.verbose:\n print(f'\\n**** I received this cancel order **** \\n: {str(order_id)}')\n\n # Tape LOB state before processing order : \n self.cancel_order_tape(side, order_id)\n\n # Cancel Order\n trader_id = None\n if side == 'bid':\n if self.bids.order_exists(order_id):\n trader_id = self.bids.remove_order_by_id(order_id)\n else:\n if self.verbose:\n print(f'\\n**** Cancel Error : Order does not exist **** \\n: {str(order_id)}')\n\n elif side == 'ask':\n if self.asks.order_exists(order_id):\n trader_id = self.asks.remove_order_by_id(order_id)\n else:\n if self.verbose:\n print(f'\\n**** Cancel Error : Order does not exist **** \\n: {str(order_id)}')\n\n else:\n sys.exit('cancel_order() given neither \"bid\" nor \"ask\"')\n \n if trader_id:\n self.notify_cancelation(side, trader_id, order_id)\n\n # add the cancel order to the tape\n def cancel_order_tape(self, side, order_id):\n if self.b_tape_LOB:\n if side == 'bid':\n if self.bids.order_exists(order_id):\n order = self.bids.order_map[order_id]\n self.LOBtape.append(self.getCurrentLOB('limit', 'cancel', order))\n else:\n self.LOBtape.append(self.getCurrentLOB('limit', 'cancel', {'order_id':order_id}))\n elif side == 'ask':\n if self.asks.order_exists(order_id):\n order = self.asks.order_map[order_id]\n self.LOBtape.append(self.getCurrentLOB('limit', 'cancel', order))\n else:\n self.LOBtape.append(self.getCurrentLOB('limit', 'cancel', {'order_id':order_id}))\n else:\n sys.exit('cancel_order() given neither \"bid\" nor \"ask\"')\n \n ######################################################################\n # Order cancelation rules :\n # 4202/4 Modification and cancellation.\n # Any order entered into the Central Order Book may be modified or \n # cancelled prior to its execution. Any increase in the order \n # quantity or change in the limit price shall cause the forfeiture \n # of time priority.\n ######################################################################\n \n def modify_order(self, order_id, order_update):\n if self.verbose:\n print(f'\\n**** I received this modify order **** \\n: {str(order_id)} : {str(order_update)}')\n\n side = order_update['side']\n order_update['order_id'] = order_id\n if 'timestamp' not in order_update:\n order_update['timestamp'] = self.time\n\n # Tape LOB state before processing order : \n if self.b_tape_LOB:\n self.LOBtape.append(self.getCurrentLOB('limit', 'modify', order_update))\n\n if side == 'bid':\n if self.bids.order_exists(order_update['order_id']):\n # Check if the order looses priority :\n if self.bids.update_order_looses_priority(order_update):\n # if true, delete order and re process it (if it becomes agressive)\n trader_id = self.bids.remove_order_by_id(order_id)\n \n # don't record the new order, it isn't new.\n old_b_tape_lob = self.b_tape_LOB\n self._b_tape_LOB = False\n self.process_order(order_update)\n self._b_tape_LOB = old_b_tape_lob\n \n else:\n self.bids.update_order_quantity(order_update)\n self.notify_modification(order_update)\n else:\n if self.verbose:\n print(f'\\n**** Order modification Error : order does not exist **** \\n: {str(order_id)}')\n \n elif side == 'ask':\n if self.asks.order_exists(order_update['order_id']):\n # Check if the order looses priority :\n if self.asks.update_order_looses_priority(order_update):\n # if true, delete order and re process it (if it becomes agressive)\n trader_id = self.asks.remove_order_by_id(order_id)\n\n # don't record the new order, it isn't new.\n old_b_tape_lob = self.b_tape_LOB\n self._b_tape_LOB = False\n self.process_order(order_update)\n self._b_tape_LOB = old_b_tape_lob\n else:\n self.asks.update_order_quantity(order_update)\n self.notify_modification(order_update)\n else:\n if self.verbose:\n print(f'\\n**** Order modification Error : order does not exist **** \\n: {str(order_id)}')\n else:\n sys.exit('modify_order() given neither \"bid\" nor \"ask\"')\n\n\n # def modify_order_old(self, order_id, order_update):\n # if self.verbose:\n # print(f'\\n**** I received this modify order **** \\n: {str(order_id)}')\n \n # side = order_update['side']\n # order_update['order_id'] = order_id\n # order_update['timestamp'] = self.time\n\n # # Tape LOB state before processing order : \n # if self.b_tape_LOB:\n # self.LOBtape.append(self.getCurrentLOB('limit', 'modify', order_update))\n\n # if side == 'bid':\n # if self.bids.order_exists(order_update['order_id']):\n # self.bids.update_order(order_update)\n\n # # self.notify_modification(order_id, order_update)\n # else:\n # if self.verbose:\n # print(f'\\n**** Order modification Error : order does not exist **** \\n: {str(order_id)}')\n # elif side == 'ask':\n # if self.asks.order_exists(order_update['order_id']):\n # self.asks.update_order(order_update)\n # else:\n # if self.verbose:\n # print(f'\\n**** Order modification Error : order does not exist **** \\n: {str(order_id)}')\n # else:\n # sys.exit('modify_order() given neither \"bid\" nor \"ask\"')\n\n#########################################\n# Order Book state information \n#########################################\n # for market makers\n def get_head_order_at_price(self, side, price):\n price = Decimal(price)\n if side == 'bid':\n order = None\n if self.bids.price_exists(price):\n order = self.bids.get_price_list(price).tail_order\n return order\n elif side == 'ask':\n order = None\n if self.asks.price_exists(price):\n order = self.asks.get_price_list(price).tail_order\n return order\n else:\n sys.exit('get_head_order_at_price() given neither \"bid\" nor \"ask\"')\n\n\n\n def get_volume_at_price(self, side, price):\n price = Decimal(price)\n if side == 'bid':\n volume = 0\n if self.bids.price_exists(price):\n volume = self.bids.get_price_list(price).volume\n return volume\n elif side == 'ask':\n volume = 0\n if self.asks.price_exists(price):\n volume = self.asks.get_price_list(price).volume\n return volume\n else:\n sys.exit('get_volume_at_price() given neither \"bid\" nor \"ask\"')\n\n def get_best_bid(self):\n return self.bids.max_price()\n\n def get_worst_bid(self):\n return self.bids.min_price()\n\n def get_best_ask(self):\n return self.asks.min_price()\n\n def get_worst_ask(self):\n return self.asks.max_price()\n\n#########################################\n# Order Book Purging\n#########################################\n def tape_dump(self, filename, filemode, tapemode):\n if filename is not None:\n dumpfile = open(filename, filemode)\n for tapeitem in self.tape:\n dumpfile.write('Time: %s, Price: %s, Quantity: %s\\n' % (tapeitem['time'],\n tapeitem['price'],\n tapeitem['quantity']))\n dumpfile.close()\n if tapemode == 'wipe':\n self.tape = []\n\n\n#########################################\n# Order Book Statistical info : volatility ? imbalance ? \n#########################################\n\n \n\n\n \n#########################################\n# Fancy Outputs\n#########################################\n def getCurrentLOB(self, ordertype_, actiontype_, order, side=None):\n # gives the -i best bids & i asks and corersponding quantities as a dictionary\n # bids:\n if type(order) == dict:\n order_id = order['order_id'] if 'order_id' in order else None\n timestamp = order['timestamp'] if 'timestamp' in order else None\n price = order['price'] if 'price' in order else None\n quantity = order['quantity'] if 'quantity' in order else None\n side = order['side'] if 'side' in order else None\n else:\n order_id = order.order_id\n timestamp = order.timestamp\n price = order.price\n quantity = order.quantity\n side = order.side\n\n res = [timestamp, order_id, price, quantity, side, ordertype_, actiontype_]\n res += [0, 0, 0, 0] * self.maxEntries\n \n # if it is auction mode, don't store limits\n if not self.b_auction:\n j = 7+2*self.maxEntries-1\n try:\n if len(self.bids.prices) == 0:\n if self._lastTradeSign == 'bid':\n bestbid = self.lastTradePrice\n else:\n bestbid = self.lastTradePrice - self.tick_size\n else:\n bestbid = self.bids.prices[-1]\n\n for i in range(self.maxEntries):\n price = bestbid - i*self.tick_size\n res[j-1] = price\n res[j] = self.bids.price_map[price].volume if price in self.bids.prices else 0\n j -= 2\n except:\n if len(self.bids.prices) > 0:\n sys.exit('ERROR !') \n\n\n \n\n j = 7+2*self.maxEntries\n try:\n if len(self.bids.prices) == 0:\n if self._lastTradeSign == 'ask':\n bestask = self.lastTradePrice\n else:\n bestask = self.lastTradePrice + self.tick_size\n else:\n bestask = self.asks.prices[0]\n \n for i in range(self.maxEntries):\n price = bestask + i*self.tick_size\n res[j] = price\n res[j+1] = self.asks.price_map[price].volume if price in self.asks.prices else 0\n j += 2\n except:\n if len(self.asks.prices) > 0:\n sys.exit('ERROR !')\n \n return res\n\n\n def getLOBstate(self):\n resDF = pd.DataFrame([0])\n for _, value in self.bids.price_map.items():\n for order_ in value:\n px = float(order_.price)\n qtty = float(order_.quantity)\n \n if not px in resDF.index:\n resDF.loc[px] = -qtty\n else:\n resDF.loc[px] -= qtty\n \n for _, value in self.asks.price_map.items():\n for order_ in value:\n px = float(order_.price)\n qtty = float(order_.quantity)\n \n if not px in resDF.index:\n resDF.loc[px] = qtty\n else:\n resDF.loc[px] += qtty\n resDF.drop(resDF.index[0], inplace=True)\n resDF.reset_index(inplace=True)\n resDF.columns = ['Price', 'Quantity']\n return resDF\n\n def to_png(self, _cache={}):\n # create a dataframe with bis & asks\n # bids are positive quantities, asks are negative.\n import pandas as pd\n import matplotlib.pyplot as plt\n\n resDF = pd.DataFrame([np.nan])\n for _, value in self.bids.price_map.items():\n for order_ in value:\n px = float(order_.price)\n qtty = float(order_.quantity)\n \n if not px in resDF.index:\n resDF.loc[px] = qtty\n else:\n resDF.loc[px] += qtty\n \n for _, value in self.asks.price_map.items():\n for order_ in value:\n px = float(order_.price)\n qtty = float(order_.quantity)\n \n if not px in resDF.index:\n resDF.loc[px] = -qtty\n else:\n resDF.loc[px] -= qtty\n\n resDF = resDF.sort_index().dropna()\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(15, 8))\n resDF.columns = ['Quantites']\n resDF.plot.bar(rot=0, ax=ax1)\n\n # Prices\n if self.tape != None and len(self.tape) > 0:\n ax2.plot(self.tstape, self.pricetape)\n ax3.bar(self.tstape, self.qttytape)\n \n # save fig\n fig.savefig(r'static/LOB.png')\n plt.close()\n return 'DONE'\n\n def to_html(self):\n tempfile = ''\n tempfile += '***Bids***<br />'\n if self.bids != None and len(self.bids) > 0:\n for key, value in reversed(self.bids.price_map.items()):\n tempfile += f'{value}<br />'\n tempfile += \"<br />***Asks***<br />\"\n if self.asks != None and len(self.asks) > 0:\n for key, value in self.asks.price_map.items():\n tempfile += f'{value}<br />'\n return tempfile\n \n def __str__(self):\n # return self.name\n\n tempfile = StringIO()\n tempfile.write(\"***Bids***\\n\")\n if self.bids != None and len(self.bids) > 0:\n for key, value in reversed(self.bids.price_map.items()):\n tempfile.write('%s' % value)\n tempfile.write(\"\\n***Asks***\\n\")\n if self.asks != None and len(self.asks) > 0:\n for key, value in self.asks.price_map.items():\n tempfile.write('%s' % value)\n tempfile.write(\"\\n***Trades***\\n\")\n if self.tape != None and len(self.tape) > 0:\n num = 0\n for entry in self.tape:\n if num < 10: # get last 5 entries\n tempfile.write(str(entry['quantity']) + \" @ \" + str(entry['price']) + \" (\" + str(entry['timestamp']) + \") \" + str(entry['party1'][0]) + \"/\" + str(entry['party2'][0]) + \"\\n\")\n num += 1\n else:\n break\n tempfile.write(\"\\n\")\n return tempfile.getvalue()\n\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "pandas.concat" ] ]
Impavidity/relogic
[ "f647106e143cd603b95b63e06ea530cdd516aefe" ]
[ "relogic/main.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport json\nimport os\nimport random\nfrom types import SimpleNamespace\n\nimport numpy as np\n\nimport torch\nfrom relogic.logickit.base import utils\nfrom relogic.logickit.base.configure import configure, update_configure\nfrom relogic.logickit.training import trainer, training_progress\nfrom relogic.logickit.serving import Server\nfrom relogic.logickit.analyzer.heads_importance import compute_heads_importance, mask_heads\n\nif \"PUDB\" not in os.environ or os.environ[\"PUDB\"] == \"false\":\n import relogic.utils.crash_on_ipy\n\n\ndef train(config):\n if config.use_external_teacher:\n teacher_model_path = config.teacher_model_path\n teacher_config = os.path.join(teacher_model_path, \"general_config.json\")\n with open(teacher_config) as f:\n teacher_config = SimpleNamespace(**json.load(f))\n teacher_config.local_rank = config.local_rank\n teacher_config.no_cuda = config.no_cuda\n else:\n teacher_config = None\n # A quick fix for loading external teacher\n model_trainer = trainer.Trainer(\n config=config, teacher_config=teacher_config)\n # A quick fix for version migration\n progress = training_progress.TrainingProgress(config=config)\n if config.use_external_teacher:\n model_path = os.path.join(teacher_model_path,\n teacher_config.model_name + \".ckpt\")\n model_trainer.restore(model_path)\n model_trainer.restore_teacher(model_path)\n model_trainer.train(progress)\n\ndef finetune(config):\n general_config_path = os.path.join(config.finetune_restore_path,\n \"general_config.json\")\n with open(general_config_path) as f:\n restore_config = SimpleNamespace(**json.load(f))\n if config.model_name:\n model_path = os.path.join(config.finetune_restore_path,\n config.model_name + \".ckpt\")\n else:\n model_path = os.path.join(config.finetune_restore_path,\n restore_config.model_name + \".ckpt\")\n\n model_trainer = trainer.Trainer(config)\n model_trainer.restore(model_path)\n progress = training_progress.TrainingProgress(config=config)\n model_trainer.train(progress)\n\n\ndef eval(config):\n general_config_path = os.path.join(config.restore_path,\n \"general_config.json\")\n with open(general_config_path) as f:\n restore_config = SimpleNamespace(**json.load(f))\n if config.model_name:\n model_path = os.path.join(config.restore_path,\n config.model_name + \".ckpt\")\n else:\n model_path = os.path.join(config.restore_path,\n restore_config.model_name + \".ckpt\")\n restore_config.mode = config.mode\n restore_config.output_features = config.output_features\n restore_config.local_rank = config.local_rank\n restore_config.no_cuda = config.no_cuda\n restore_config.buckets = config.buckets\n restore_config.gold_answer_file = config.gold_answer_file\n restore_config.null_score_diff_threshold = config.null_score_diff_threshold\n restore_config.output_attentions = config.output_attentions\n restore_config.use_external_teacher = False\n if not hasattr(restore_config, \"branching_encoder\"):\n restore_config.branching_encoder = False\n # Update the evaluation dataset\n update_configure(restore_config, config)\n print(restore_config)\n utils.heading(\"RUN {} ({:})\".format(config.mode.upper(),\n restore_config.task_names))\n model_trainer = trainer.Trainer(restore_config)\n model_trainer.restore(model_path)\n if config.mode == \"serving\":\n server = Server(model_trainer)\n server.start()\n elif config.mode == \"analysis\":\n analyze(config, model_trainer)\n elif config.mode == \"feature_extraction\":\n task_ = None\n for task in model_trainer.tasks:\n if task.name == config.selected_task:\n task_ = task\n model_trainer.feature_extraction(\n task=task_, dump_file=config.feature_dump_file)\n else:\n model_trainer.evaluate_all_tasks()\n\ndef analyze(config, model_trainer):\n # compute_heads_importance(config, model_trainer)\n mask_heads(config, model_trainer)\n\n\n\n\ndef main():\n utils.heading(\"SETUP\")\n parser = argparse.ArgumentParser()\n\n # IO\n parser.add_argument(\n \"--mode\", default=None, choices=[\"train\", \"valid\", \"eval\", \"finetune\", \"analysis\", \"feature_extraction\"])\n parser.add_argument(\"--output_dir\", type=str, default=\"data/models\")\n parser.add_argument(\"--max_seq_length\", type=int, default=450)\n parser.add_argument(\"--max_query_length\", type=int, default=64)\n parser.add_argument(\"--doc_stride\", type=int, default=128)\n parser.add_argument(\"--do_lower_case\", default=False, action=\"store_true\")\n parser.add_argument(\"--model_name\", type=str)\n parser.add_argument(\"--restore_path\", type=str)\n parser.add_argument(\"--finetune_restore_path\", type=str)\n parser.add_argument(\"--train_file\", type=str, default=\"train.json\")\n parser.add_argument(\"--dev_file\", type=str, default=\"dev.json\")\n parser.add_argument(\"--test_file\", type=str, default=\"test.json\")\n\n # Task Definition\n parser.add_argument(\"--task_names\", type=str)\n parser.add_argument(\"--raw_data_path\", type=str)\n parser.add_argument(\"--label_mapping_path\", type=str)\n parser.add_argument(\"--unsupervised_data\", type=str)\n parser.add_argument(\"--lang\", type=str, default=\"en\")\n parser.add_argument(\"--pretokenized\", action=\"store_true\", default=False)\n parser.add_argument(\"--topk\", default=1)\n parser.add_argument(\"--gold_answer_file\", default=\"data/preprocessed_data/squad20.json\")\n parser.add_argument(\"--dump_to_files_dict\", default=\"\")\n\n parser.add_argument(\"--output_attentions\", default=False, action=\"store_true\")\n parser.add_argument(\"--span_inference\", default=False, action=\"store_true\")\n parser.add_argument(\"--metrics\", default=\"\", type=str)\n\n # Task related configuration\n\n # Sequence Labeling\n parser.add_argument(\"--sequence_labeling_use_cls\", default=False, action=\"store_true\")\n\n # Relation Extraction\n parser.add_argument(\"--no_entity_surface\", dest=\"entity_surface_aware\", default=True, action=\"store_false\")\n parser.add_argument(\"--use_dependency_feature\", dest=\"use_dependency_feature\", default=False, action=\"store_true\")\n parser.add_argument(\"--rel_extraction_module_type\", type=str, default=\"hybrid\")\n\n # Semantic Role Labeling\n parser.add_argument(\"--no_predicate_surface\", dest=\"predicate_surface_aware\", default=True, action=\"store_false\")\n parser.add_argument(\"--no_span_annotation\", dest=\"use_span_annotation\", default=True, action=\"store_false\")\n parser.add_argument(\"--use_span_candidates\", default=False, action=\"store_true\")\n parser.add_argument(\"--srl_module_type\", type=str, default=\"sequence_labeling\")\n parser.add_argument(\"--label_embed_dim\", type=int, default=100)\n parser.add_argument(\"--external_vocab_embed_dim\", type=int, default=300)\n parser.add_argument(\"--external_embeddings\", type=str)\n parser.add_argument(\"--use_description\", default=False, action=\"store_true\")\n parser.add_argument(\"--srl_label_format\", default=\"srl_label_span_based\", type=str)\n parser.add_argument(\"--num_width_embeddings\", type=int, default=300)\n parser.add_argument(\"--span_width_embedding_dim\", type=int, default=100)\n parser.add_argument(\"--srl_candidate_loss\", default=False, action=\"store_true\")\n parser.add_argument(\"--srl_arg_span_repr\", default=\"ave\")\n parser.add_argument(\"--srl_pred_span_repr\", default=\"ave\")\n parser.add_argument(\"--srl_use_label_embedding\", default=False, action=\"store_true\")\n parser.add_argument(\"--srl_compute_pos_tag_loss\", default=False, action=\"store_true\")\n parser.add_argument(\"--srl_use_gold_predicate\", default=False, action=\"store_true\")\n parser.add_argument(\"--srl_use_gold_argument\", default=False, action=\"store_true\")\n parser.add_argument(\"--predicate_reveal_method\", default=None, type=str)\n parser.add_argument(\"--indicator_embedding_size\", default=10, type=int)\n\n # Dependency Parsing\n parser.add_argument(\"--dep_parsing_mlp_dim\", default=300, type=int)\n parser.add_argument(\"--dropout\", default=0.3, type=float)\n\n # Parallel Mapping\n parser.add_argument(\"--parallel_mapping_mode\", default=\"alignment\", type=str)\n\n # Reading Comprehension\n parser.add_argument(\"--null_score_diff_threshold\", default=1.0)\n\n # Information Retrieval\n parser.add_argument(\"--qrels_file_path\", type=str, default=None)\n parser.add_argument(\"--regression\", default=False, action=\"store_true\")\n parser.add_argument(\"--word_level_interaction\", default=False, action=\"store_true\")\n parser.add_argument(\"--ir_siamese\", default=False, action=\"store_true\")\n\n # CNN model\n parser.add_argument(\"--output_channel\", type=int, default=150)\n parser.add_argument(\"--kernel_size\", type=int, default=2)\n parser.add_argument(\"--word_embed_dim\", type=int, default=300)\n\n # Modeling\n parser.add_argument(\"--use_gcn\", dest=\"use_gcn\", default=False, action=\"store_true\")\n parser.add_argument(\"--fix_embedding\", default=False, action=\"store_true\")\n\n # Model\n parser.add_argument(\"--bert_model\", type=str)\n parser.add_argument(\"--encoder_type\", type=str, default=\"bert\", choices=[\"bert\", \"xlm\", \"xlmr\", \"lstm\", \"embedding\"])\n parser.add_argument(\"--hidden_size\", type=int, default=768)\n parser.add_argument(\"--projection_size\", type=int, default=300)\n parser.add_argument(\n \"--initializer_range\", type=float,\n default=0.02) # initialization for task module\n # follow the initialization range of bert\n parser.add_argument(\"--no_bilstm\", default=True, dest=\"use_bilstm\", action=\"store_false\")\n parser.add_argument(\"--repr_size\", default=300, type=int)\n parser.add_argument(\"--branching_encoder\", default=False, action=\"store_true\")\n parser.add_argument(\"--routing_config_file\", type=str)\n parser.add_argument(\"--selected_non_final_layers\", type=str, default=\"none\", help=\"split by ; among tasks\")\n parser.add_argument(\"--dataset_type\", type=str, default=\"bucket\")\n parser.add_argument(\"--language_id_file\", type=str, default=None)\n\n # Semi-Supervised\n parser.add_argument(\"--is_semisup\", default=False, action=\"store_true\")\n parser.add_argument(\"--partial_view_sources\", type=str)\n parser.add_argument(\"--use_external_teacher\", default=False, action=\"store_true\")\n parser.add_argument(\"--teacher_model_path\", default=None, type=str)\n\n # Training\n parser.add_argument(\"--seed\", type=int, default=3435)\n parser.add_argument(\"--no_cuda\", action=\"store_true\")\n parser.add_argument(\"--local_rank\", type=int, default=-1)\n parser.add_argument(\"--learning_rate\", type=float, default=5e-5)\n parser.add_argument(\"--warmup_proportion\", type=float, default=0.1)\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\n \"Number of updates steps to accumulate before performing a backward/update pass\"\n )\n parser.add_argument(\"--print_every\", type=int, default=25)\n parser.add_argument(\"--eval_dev_every\", default=2000, type=int)\n parser.add_argument(\"--train_batch_size\", type=str, default=\"8\")\n parser.add_argument(\"--test_batch_size\", type=str, default=\"8\")\n parser.add_argument(\"--grad_clip\", type=float, default=1.0)\n parser.add_argument(\"--epoch_number\", type=int, default=20)\n parser.add_argument(\"--self_attention_head_size\", default=64, type=int)\n parser.add_argument(\"--schedule_method\", default=\"warmup_linear\")\n parser.add_argument(\n \"--no_schedule_lr\", dest=\"schedule_lr\", default=True, action=\"store_false\")\n parser.add_argument(\"--word_dropout\", default=False, action=\"store_true\")\n parser.add_argument(\"--word_dropout_prob\", default=0.6, type=float)\n parser.add_argument(\"--max_margin\", type=float, default=3)\n parser.add_argument(\"--warmup_epoch_number\", type=int, default=0)\n parser.add_argument(\"--sgd_learning_rate\", type=float, default=0.1)\n parser.add_argument(\"--adam_learning_rate\", type=float, default=0.003)\n parser.add_argument(\"--sep_optim\", dest=\"sep_optim\", default=False, action=\"store_true\")\n parser.add_argument(\"--multi_gpu\", dest=\"multi_gpu\", default=False, action=\"store_true\")\n parser.add_argument(\"--ignore_parameters\", default=\"\", type=str)\n parser.add_argument(\"--fix_bert\", default=False, action=\"store_true\")\n parser.add_argument(\"--two_stage_optim\", default=False, action=\"store_true\")\n parser.add_argument(\"--training_scheme\", default=None, type=str)\n parser.add_argument(\"--training_scheme_file\", default=None, type=str)\n parser.add_argument(\"--num_train_optimization_steps\", default=0, type=int)\n parser.add_argument(\"--early_stop_at\", default=0, type=int)\n parser.add_argument(\"--loss_weight\", type=str, default='1')\n parser.add_argument(\"--select_index_method\", type=str, default=\"cls\")\n parser.add_argument(\"--use_cosine_loss\", default=False, action=\"store_true\")\n parser.add_argument(\"--adversarial_training\", default=None, type=str)\n parser.add_argument(\"--no_bucket\", default=False, action=\"store_true\")\n parser.add_argument(\"--param_initialization\", default=None, type=str)\n # We allow to set same training steps for different dataset\n # Need to combine to CUDA_VISIBLE_DEVICES\n parser.add_argument(\"--only_adam\", default=False, action=\"store_true\")\n\n # Analysis\n parser.add_argument(\"--head_to_mask_file\", type=str, default=\"\")\n\n\n # Configuration\n parser.add_argument(\"--config_file\", type=str, default=None)\n parser.add_argument(\"--trainer_config\", type=str, default=None)\n parser.add_argument(\"--module_config\", type=str, default=None)\n parser.add_argument(\"--task_config\", type=str, default=None)\n\n #\n parser.add_argument(\"--selected_task\", type=str)\n parser.add_argument(\"--feature_dump_file\", type=str)\n parser.add_argument(\"--output_features\", default=False, action=\"store_true\")\n\n args = parser.parse_args()\n\n if not args.mode:\n raise ValueError(\"You need to specify the mode\")\n if args.output_dir:\n if os.path.exists(args.output_dir) and os.listdir(\n args.output_dir) and args.mode == \"train\":\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\".format(\n args.output_dir))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\n \"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".\n format(args.gradient_accumulation_steps))\n # args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n # num_train_optimization_steps = len(train_examples) / batch_size * epoch_number\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n configure(args)\n\n print(args)\n\n if args.mode == \"train\":\n utils.heading(\"START TRAINING ({:})\".format(args.task_names))\n train(args)\n elif args.mode == \"valid\":\n eval(args)\n elif args.mode == \"eval\":\n eval(args)\n elif args.mode == \"finetune\":\n finetune(args)\n elif args.mode == \"serving\":\n eval(args)\n elif args.mode == \"analysis\":\n eval(args)\n elif args.mode == \"feature_extraction\":\n eval(args)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.cuda.manual_seed_all" ] ]
kh11kim/spatialmath-python
[ "3e75e62da29d909d1a5d0ba5d4cdb97659397263" ]
[ "spatialmath/base/transforms2d.py" ]
[ "# Part of Spatial Math Toolbox for Python\n# Copyright (c) 2000 Peter Corke\n# MIT Licence, see details in top-level file: LICENCE\n\n\"\"\"\nThis modules contains functions to create and transform SO(2) and SE(2) matrices,\nrespectively 2D rotation matrices and homogeneous tranformation matrices.\n\nVector arguments are what numpy refers to as ``array_like`` and can be a list,\ntuple, numpy array, numpy row vector or numpy column vector.\n\n\"\"\"\n\n# pylint: disable=invalid-name\n\nimport sys\nimport math\nimport numpy as np\nimport scipy.linalg\nfrom spatialmath import base\n\n_eps = np.finfo(np.float64).eps\n\ntry: # pragma: no cover\n # print('Using SymPy')\n import sympy\n\n _symbolics = True\n\nexcept ImportError: # pragma: no cover\n _symbolics = False\n\n# ---------------------------------------------------------------------------------------#\ndef rot2(theta, unit=\"rad\"):\n \"\"\"\n Create SO(2) rotation\n\n :param theta: rotation angle\n :type theta: float\n :param unit: angular units: 'rad' [default], or 'deg'\n :type unit: str\n :return: SO(2) rotation matrix\n :rtype: ndarray(2,2)\n\n - ``rot2(ฮธ)`` is an SO(2) rotation matrix (2x2) representing a rotation of ฮธ radians.\n - ``rot2(ฮธ, 'deg')`` as above but ฮธ is in degrees.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> rot2(0.3)\n >>> rot2(45, 'deg')\n \"\"\"\n theta = base.getunit(theta, unit)\n ct = base.sym.cos(theta)\n st = base.sym.sin(theta)\n # fmt: off\n R = np.array([\n [ct, -st],\n [st, ct]])\n # fmt: on\n return R\n\n\n# ---------------------------------------------------------------------------------------#\ndef trot2(theta, unit=\"rad\", t=None):\n \"\"\"\n Create SE(2) pure rotation\n\n :param theta: rotation angle about X-axis\n :type ฮธ: float\n :param unit: angular units: 'rad' [default], or 'deg'\n :type unit: str\n :param t: 2D translation vector, defaults to [0,0]\n :type t: array_like(2)\n :return: 3x3 homogeneous transformation matrix\n :rtype: ndarray(3,3)\n\n - ``trot2(ฮธ)`` is a homogeneous transformation (3x3) representing a rotation of\n ฮธ radians.\n - ``trot2(ฮธ, 'deg')`` as above but ฮธ is in degrees.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> trot2(0.3)\n >>> trot2(45, 'deg', t=[1,2])\n\n .. note:: By default, the translational component is zero but it can be\n set to a non-zero value.\n\n :seealso: xyt2tr\n \"\"\"\n T = np.pad(rot2(theta, unit), (0, 1), mode=\"constant\")\n if t is not None:\n T[:2, 2] = base.getvector(t, 2, \"array\")\n T[2, 2] = 1 # integer to be symbolic friendly\n return T\n\n\ndef xyt2tr(xyt, unit=\"rad\"):\n \"\"\"\n Create SE(2) pure rotation\n\n :param xyt: 2d translation and rotation\n :type xyt: array_like(3)\n :param unit: angular units: 'rad' [default], or 'deg'\n :type unit: str\n :return: 3x3 homogeneous transformation matrix\n :rtype: ndarray(3,3)\n\n - ``xyt2tr([x,y,ฮธ])`` is a homogeneous transformation (3x3) representing a rotation of\n ฮธ radians and a translation of (x,y).\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> xyt2tr([1,2,0.3])\n >>> xyt2tr([1,2,45], 'deg')\n\n :seealso: tr2xyt\n \"\"\"\n xyt = base.getvector(xyt, 3)\n T = np.pad(rot2(xyt[2], unit), (0, 1), mode=\"constant\")\n T[:2, 2] = xyt[0:2]\n T[2, 2] = 1.0\n return T\n\n\ndef tr2xyt(T, unit=\"rad\"):\n \"\"\"\n Convert SE(2) to x, y, theta\n\n :param T: SE(2) matrix\n :type T: ndarray(3,3)\n :param unit: angular units: 'rad' [default], or 'deg'\n :type unit: str\n :return: [x, y, ฮธ]\n :rtype: ndarray(3)\n\n - ``tr2xyt(T)`` is a vector giving the equivalent 2D translation and\n rotation for this SO(2) matrix.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> T = xyt2tr([1, 2, 0.3])\n >>> T\n >>> tr2xyt(T)\n\n :seealso: trot2\n \"\"\"\n\n if T.dtype == \"O\" and _symbolics:\n angle = sympy.atan2(T[1, 0], T[0, 0])\n else:\n angle = math.atan2(T[1, 0], T[0, 0])\n return np.r_[T[0, 2], T[1, 2], angle]\n\n\n# ---------------------------------------------------------------------------------------#\ndef transl2(x, y=None):\n \"\"\"\n Create SE(2) pure translation, or extract translation from SE(2) matrix\n\n\n **Create a translational SE(2) matrix**\n\n :param x: translation along X-axis\n :type x: float\n :param y: translation along Y-axis\n :type y: float\n :return: SE(2) transform matrix or the translation elements of a homogeneous\n transform :rtype: ndarray(3,3)\n\n - ``T = transl2([X, Y])`` is an SE(2) homogeneous transform (3x3)\n representing a pure translation.\n - ``T = transl2( V )`` as above but the translation is given by a 2-element\n list, dict, or a numpy array, row or column vector.\n\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> import numpy as np\n >>> transl2(3, 4)\n >>> transl2([3, 4])\n >>> transl2(np.array([3, 4]))\n\n **Extract the translational part of an SE(2) matrix**\n\n :param x: SE(2) transform matrix\n :type x: ndarray(3,3)\n :return: translation elements of SE(2) matrix\n :rtype: ndarray(2)\n\n - ``t = transl2(T)`` is the translational part of the SE(3) matrix ``T`` as a\n 2-element NumPy array.\n\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> import numpy as np\n >>> T = np.array([[1, 0, 3], [0, 1, 4], [0, 0, 1]])\n >>> transl2(T)\n\n .. note:: This function is compatible with the MATLAB version of the Toolbox. It\n is unusual/weird in doing two completely different things inside the one\n function.\n \"\"\"\n\n if base.isscalar(x) and base.isscalar(y):\n # (x, y) -> SE(2)\n t = np.r_[x, y]\n elif base.isvector(x, 2):\n # R2 -> SE(2)\n t = base.getvector(x, 2)\n elif base.ismatrix(x, (3, 3)):\n # SE(2) -> R2\n return x[:2, 2]\n else:\n raise ValueError(\"bad argument\")\n\n if t.dtype != \"O\":\n t = t.astype(\"float64\")\n T = np.identity(3, dtype=t.dtype)\n T[:2, 2] = t\n return T\n\n\ndef ishom2(T, check=False):\n \"\"\"\n Test if matrix belongs to SE(2)\n\n :param T: SE(2) matrix to test\n :type T: ndarray(3,3)\n :param check: check validity of rotation submatrix\n :type check: bool\n :return: whether matrix is an SE(2) homogeneous transformation matrix\n :rtype: bool\n\n - ``ishom2(T)`` is True if the argument ``T`` is of dimension 3x3\n - ``ishom2(T, check=True)`` as above, but also checks orthogonality of the\n rotation sub-matrix and validitity of the bottom row.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> import numpy as np\n >>> T = np.array([[1, 0, 3], [0, 1, 4], [0, 0, 1]])\n >>> ishom2(T)\n >>> T = np.array([[1, 1, 3], [0, 1, 4], [0, 0, 1]]) # invalid SE(2)\n >>> ishom2(T) # a quick check says it is an SE(2)\n >>> ishom2(T, check=True) # but if we check more carefully...\n >>> R = np.array([[1, 0], [0, 1]])\n >>> ishom2(R)\n\n :seealso: isR, isrot2, ishom, isvec\n \"\"\"\n return (\n isinstance(T, np.ndarray)\n and T.shape == (3, 3)\n and (\n not check\n or (base.isR(T[:2, :2]) and np.all(T[2, :] == np.array([0, 0, 1])))\n )\n )\n\n\ndef isrot2(R, check=False):\n \"\"\"\n Test if matrix belongs to SO(2)\n\n :param R: SO(2) matrix to test\n :type R: ndarray(3,3)\n :param check: check validity of rotation submatrix\n :type check: bool\n :return: whether matrix is an SO(2) rotation matrix\n :rtype: bool\n\n - ``isrot2(R)`` is True if the argument ``R`` is of dimension 2x2\n - ``isrot2(R, check=True)`` as above, but also checks orthogonality of the rotation matrix.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> import numpy as np\n >>> T = np.array([[1, 0, 3], [0, 1, 4], [0, 0, 1]])\n >>> isrot2(T)\n >>> R = np.array([[1, 0], [0, 1]])\n >>> isrot2(R)\n >>> R = np.array([[1, 1], [0, 1]]) # invalid SO(2)\n >>> isrot2(R) # a quick check says it is an SO(2)\n >>> isrot2(R, check=True) # but if we check more carefully...\n\n :seealso: isR, ishom2, isrot\n \"\"\"\n return (\n isinstance(R, np.ndarray) and R.shape == (2, 2) and (not check or base.isR(R))\n )\n\n\n# ---------------------------------------------------------------------------------------#\n\n\ndef trinv2(T):\n r\"\"\"\n Invert an SE(2) matrix\n\n :param T: SE(2) matrix\n :type T: ndarray(3,3)\n :return: inverse of SE(2) matrix\n :rtype: ndarray(3,3)\n :raises ValueError: bad arguments\n\n Computes an efficient inverse of an SE(2) matrix:\n\n :math:`\\begin{pmatrix} {\\bf R} & t \\\\ 0\\,0 & 1 \\end{pmatrix}^{-1} = \\begin{pmatrix} {\\bf R}^T & -{\\bf R}^T t \\\\ 0\\, 0 & 1 \\end{pmatrix}`\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> T = trot2(0.3, t=[4,5])\n >>> trinv2(T)\n >>> T @ trinv2(T)\n\n :SymPy: supported\n \"\"\"\n if not ishom2(T):\n raise ValueError(\"expecting SE(2) matrix\")\n # inline this code for speed, don't use tr2rt and rt2tr\n R = T[:2, :2]\n t = T[:2, 2]\n Ti = np.zeros((3, 3), dtype=T.dtype)\n Ti[:2, :2] = R.T\n Ti[:2, 2] = -R.T @ t\n Ti[2, 2] = 1\n return Ti\n\n\ndef trlog2(T, check=True, twist=False):\n \"\"\"\n Logarithm of SO(2) or SE(2) matrix\n\n :param T: SE(2) or SO(2) matrix\n :type T: ndarray(3,3) or ndarray(2,2)\n :param check: check that matrix is valid\n :type check: bool\n :param twist: return a twist vector instead of matrix [default]\n :type twist: bool\n :return: logarithm\n :rtype: ndarray(3,3) or ndarray(3); or ndarray(2,2) or ndarray(1)\n :raises ValueError: bad argument\n\n An efficient closed-form solution of the matrix logarithm for arguments that\n are SO(2) or SE(2).\n\n - ``trlog2(R)`` is the logarithm of the passed rotation matrix ``R`` which\n will be 2x2 skew-symmetric matrix. The equivalent vector from ``vex()``\n is parallel to rotation axis and its norm is the amount of rotation about\n that axis.\n - ``trlog(T)`` is the logarithm of the passed homogeneous transformation\n matrix ``T`` which will be 3x3 augumented skew-symmetric matrix. The\n equivalent vector from ``vexa()`` is the twist vector (6x1) comprising [v\n w].\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> trlog2(trot2(0.3))\n >>> trlog2(trot2(0.3), twist=True)\n >>> trlog2(rot2(0.3))\n >>> trlog2(rot2(0.3), twist=True)\n\n :seealso: :func:`~trexp`, :func:`~spatialmath.base.transformsNd.vex`,\n :func:`~spatialmath.base.transformsNd.vexa`\n \"\"\"\n\n if ishom2(T, check=check):\n # SE(2) matrix\n\n if base.iseye(T):\n # is identity matrix\n if twist:\n return np.zeros((3,))\n else:\n return np.zeros((3, 3))\n else:\n if twist:\n return base.vexa(scipy.linalg.logm(T))\n else:\n return scipy.linalg.logm(T)\n\n elif isrot2(T, check=check):\n # SO(2) rotation matrix\n if twist:\n return base.vex(scipy.linalg.logm(T))\n else:\n return scipy.linalg.logm(T)\n else:\n raise ValueError(\"Expect SO(2) or SE(2) matrix\")\n\n\n# ---------------------------------------------------------------------------------------#\n\n\ndef trexp2(S, theta=None, check=True):\n \"\"\"\n Exponential of so(2) or se(2) matrix\n\n :param S: se(2), so(2) matrix or equivalent velctor\n :type T: ndarray(3,3) or ndarray(2,2)\n :param theta: motion\n :type theta: float\n :return: matrix exponential in SE(2) or SO(2)\n :rtype: ndarray(3,3) or ndarray(2,2)\n :raises ValueError: bad argument\n\n An efficient closed-form solution of the matrix exponential for arguments\n that are se(2) or so(2).\n\n For se(2) the results is an SE(2) homogeneous transformation matrix:\n\n - ``trexp2(ฮฃ)`` is the matrix exponential of the se(2) element ``ฮฃ`` which is\n a 3x3 augmented skew-symmetric matrix.\n - ``trexp2(ฮฃ, ฮธ)`` as above but for an se(3) motion of ฮฃฮธ, where ``ฮฃ``\n must represent a unit-twist, ie. the rotational component is a unit-norm skew-symmetric\n matrix.\n - ``trexp2(S)`` is the matrix exponential of the se(3) element ``S`` represented as\n a 3-vector which can be considered a screw motion.\n - ``trexp2(S, ฮธ)`` as above but for an se(2) motion of Sฮธ, where ``S``\n must represent a unit-twist, ie. the rotational component is a unit-norm skew-symmetric\n matrix.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> trexp2(skew(1))\n >>> trexp2(skew(1), 2) # revolute unit twist\n >>> trexp2(1)\n >>> trexp2(1, 2) # revolute unit twist\n\n For so(2) the results is an SO(2) rotation matrix:\n\n - ``trexp2(ฮฉ)`` is the matrix exponential of the so(3) element ``ฮฉ`` which is a 2x2\n skew-symmetric matrix.\n - ``trexp2(ฮฉ, ฮธ)`` as above but for an so(3) motion of ฮฉฮธ, where ``ฮฉ`` is\n unit-norm skew-symmetric matrix representing a rotation axis and a rotation magnitude\n given by ``ฮธ``.\n - ``trexp2(ฯ‰)`` is the matrix exponential of the so(2) element ``ฯ‰`` expressed as\n a 1-vector.\n - ``trexp2(ฯ‰, ฮธ)`` as above but for an so(3) motion of ฯ‰ฮธ where ``ฯ‰`` is a\n unit-norm vector representing a rotation axis and a rotation magnitude\n given by ``ฮธ``. ``ฯ‰`` is expressed as a 1-vector.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> trexp2(skewa([1, 2, 3]))\n >>> trexp2(skewa([1, 0, 0]), 2) # prismatic unit twist\n >>> trexp2([1, 2, 3])\n >>> trexp2([1, 0, 0], 2)\n\n :seealso: trlog, trexp2\n \"\"\"\n\n if base.ismatrix(S, (3, 3)) or base.isvector(S, 3):\n # se(2) case\n if base.ismatrix(S, (3, 3)):\n # augmentented skew matrix\n if check and not base.isskewa(S):\n raise ValueError(\"argument must be a valid se(2) element\")\n tw = base.vexa(S)\n else:\n # 3 vector\n tw = base.getvector(S)\n\n if base.iszerovec(tw):\n return np.eye(3)\n\n if theta is None:\n (tw, theta) = base.unittwist2_norm(tw)\n elif not base.isunittwist2(tw):\n raise ValueError(\"If theta is specified S must be a unit twist\")\n\n t = tw[0:2]\n w = tw[2]\n\n R = base.rodrigues(w, theta)\n\n skw = base.skew(w)\n V = (\n np.eye(2) * theta\n + (1.0 - math.cos(theta)) * skw\n + (theta - math.sin(theta)) * skw @ skw\n )\n\n return base.rt2tr(R, V @ t)\n\n elif base.ismatrix(S, (2, 2)) or base.isvector(S, 1):\n # so(2) case\n if base.ismatrix(S, (2, 2)):\n # skew symmetric matrix\n if check and not base.isskew(S):\n raise ValueError(\"argument must be a valid so(2) element\")\n w = base.vex(S)\n else:\n # 1 vector\n w = base.getvector(S)\n\n if theta is not None and not base.isunitvec(w):\n raise ValueError(\"If theta is specified S must be a unit twist\")\n\n # do Rodrigues' formula for rotation\n return base.rodrigues(w, theta)\n else:\n raise ValueError(\" First argument must be SO(2), 1-vector, SE(2) or 3-vector\")\n\n\ndef adjoint2(T):\n # http://ethaneade.com/lie.pdf\n if T.shape == (3, 3):\n # SO(2) adjoint\n return np.identity(2)\n elif T.shape == (3, 3):\n # SE(2) adjoint\n (R, t) = base.tr2rt(T)\n # fmt: off\n return np.block([\n [R, np.c_[t[1], -t[0]].T], \n [0, 0, 1]\n ])\n # fmt: on\n else:\n raise ValueError(\"bad argument\")\n\n\ndef tr2jac2(T):\n r\"\"\"\n SE(2) Jacobian matrix\n\n :param T: SE(2) matrix\n :type T: ndarray(3,3)\n :return: Jacobian matrix\n :rtype: ndarray(3,3)\n\n Computes an Jacobian matrix that maps spatial velocity between two frames defined by\n an SE(2) matrix.\n\n ``tr2jac2(T)`` is a Jacobian matrix (3x3) that maps spatial velocity or\n differential motion from frame {B} to frame {A} where the pose of {B}\n elative to {A} is represented by the homogeneous transform T = :math:`{}^A {\\bf T}_B`.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> T = trot2(0.3, t=[4,5])\n >>> tr2jac2(T)\n\n :Reference: Robotics, Vision & Control: Second Edition, P. Corke, Springer 2016; p65.\n :SymPy: supported\n \"\"\"\n\n if not ishom2(T):\n raise ValueError(\"expecting an SE(2) matrix\")\n\n J = np.eye(3, dtype=T.dtype)\n J[:2, :2] = base.t2r(T)\n return J\n\n\ndef trinterp2(start, end, s=None):\n \"\"\"\n Interpolate SE(2) or SO(2) matrices\n\n :param start: initial SE(2) or SO(2) matrix value when s=0, if None then identity is used\n :type start: ndarray(3,3) or ndarray(2,2) or None\n :param end: final SE(2) or SO(2) matrix, value when s=1\n :type end: ndarray(3,3) or ndarray(2,2)\n :param s: interpolation coefficient, range 0 to 1\n :type s: float\n :return: interpolated SE(2) or SO(2) matrix value\n :rtype: ndarray(3,3) or ndarray(2,2)\n :raises ValueError: bad arguments\n\n - ``trinterp2(None, T, S)`` is a homogeneous transform (3x3) interpolated\n between identity when S=0 and T (3x3) when S=1.\n - ``trinterp2(T0, T1, S)`` as above but interpolated\n between T0 (3x3) when S=0 and T1 (3x3) when S=1.\n - ``trinterp2(None, R, S)`` is a rotation matrix (2x2) interpolated\n between identity when S=0 and R (2x2) when S=1.\n - ``trinterp2(R0, R1, S)`` as above but interpolated\n between R0 (2x2) when S=0 and R1 (2x2) when S=1.\n\n .. note:: Rotation angle is linearly interpolated.\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> T1 = transl2(1, 2)\n >>> T2 = transl2(3, 4)\n >>> trinterp2(T1, T2, 0)\n >>> trinterp2(T1, T2, 1)\n >>> trinterp2(T1, T2, 0.5)\n >>> trinterp2(None, T2, 0)\n >>> trinterp2(None, T2, 1)\n >>> trinterp2(None, T2, 0.5)\n\n :seealso: :func:`~spatialmath.base.transforms3d.trinterp`\n\n \"\"\"\n if base.ismatrix(end, (2, 2)):\n # SO(2) case\n if start is None:\n # \tTRINTERP2(T, s)\n\n th0 = math.atan2(end[1, 0], end[0, 0])\n\n th = s * th0\n else:\n # \tTRINTERP2(T1, start= s)\n if start.shape != end.shape:\n raise ValueError(\"start and end matrices must be same shape\")\n\n th0 = math.atan2(start[1, 0], start[0, 0])\n th1 = math.atan2(end[1, 0], end[0, 0])\n\n th = th0 * (1 - s) + s * th1\n\n return rot2(th)\n elif base.ismatrix(end, (3, 3)):\n if start is None:\n # \tTRINTERP2(T, s)\n\n th0 = math.atan2(end[1, 0], end[0, 0])\n p0 = transl2(end)\n\n th = s * th0\n pr = s * p0\n else:\n # \tTRINTERP2(T0, T1, s)\n if start.shape != end.shape:\n raise ValueError(\"both matrices must be same shape\")\n\n th0 = math.atan2(start[1, 0], start[0, 0])\n th1 = math.atan2(end[1, 0], end[0, 0])\n\n p0 = transl2(start)\n p1 = transl2(end)\n\n pr = p0 * (1 - s) + s * p1\n th = th0 * (1 - s) + s * th1\n\n return base.rt2tr(rot2(th), pr)\n else:\n return ValueError(\"Argument must be SO(2) or SE(2)\")\n\n\ndef trprint2(T, label=None, file=sys.stdout, fmt=\"{:.3g}\", unit=\"deg\"):\n \"\"\"\n Compact display of SE(2) or SO(2) matrices\n\n :param T: matrix to format\n :type T: ndarray(3,3) or ndarray(2,2)\n :param label: text label to put at start of line\n :type label: str\n :param file: file to write formatted string to\n :type file: file object\n :param fmt: conversion format for each number\n :type fmt: str\n :param unit: angular units: 'rad' [default], or 'deg'\n :type unit: str\n :return: formatted string\n :rtype: str\n\n The matrix is formatted and written to ``file`` and the\n string is returned. To suppress writing to a file, set ``file=None``.\n\n - ``trprint2(R)`` displays the SO(2) rotation matrix in a compact\n single-line format and returns the string::\n\n [LABEL:] ฮธ UNIT\n\n - ``trprint2(T)`` displays the SE(2) homogoneous transform in a compact\n single-line format and returns the string::\n\n [LABEL:] [t=X, Y;] ฮธ UNIT\n\n .. runblock:: pycon\n\n >>> from spatialmath.base import *\n >>> T = transl2(1,2) @ trot2(0.3)\n >>> trprint2(T, file=None, label='T')\n >>> trprint2(T, file=None, label='T', fmt='{:8.4g}')\n\n\n .. notes::\n\n - Default formatting is for compact display of data\n - For tabular data set ``fmt`` to a fixed width format such as\n ``fmt='{:.3g}'``\n\n :seealso: trprint\n \"\"\"\n\n s = \"\"\n\n if label is not None:\n s += \"{:s}: \".format(label)\n\n # print the translational part if it exists\n if ishom2(T):\n s += \"t = {};\".format(_vec2s(fmt, transl2(T)))\n\n angle = math.atan2(T[1, 0], T[0, 0])\n if unit == \"deg\":\n angle *= 180.0 / math.pi\n s += \" {}ยฐ\".format(_vec2s(fmt, [angle]))\n else:\n s += \" {} rad\".format(_vec2s(fmt, [angle]))\n\n if file:\n print(s, file=file)\n return s\n\n\ndef _vec2s(fmt, v):\n v = [x if np.abs(x) > 100 * _eps else 0.0 for x in v]\n return \", \".join([fmt.format(x) for x in v])\n\n\ndef points2tr2(p1, p2):\n \"\"\"\n SE(2) transform from corresponding points\n\n :param p1: first set of points\n :type p1: array_like(2,N)\n :param p2: second set of points\n :type p2: array_like(2,N)\n :return: transform from ``p1`` to ``p2``\n :rtype: ndarray(3,3)\n\n Compute an SE(2) matrix that transforms the point set ``p1`` to ``p2``.\n p1 and p2 must have the same number of columns, and columns correspond\n to the same point.\n \"\"\"\n\n # first find the centroids of both point clouds\n p1_centroid = np.mean(p1, axis=0)\n p2_centroid = np.mean(p2, axis=0)\n\n # get the point clouds in reference to their centroids\n p1_centered = p1 - p1_centroid\n p2_centered = p2 - p2_centroid\n\n # compute moment matrix\n M = np.dot(p2_centered.T, p1_centered)\n\n # get singular value decomposition of the cross covariance matrix\n U, W, VT = np.linalg.svd(M)\n\n # get rotation between the two point clouds\n R = U @ VT\n # special reflection case\n if np.linalg.det(R) < 0:\n VT[-1, :] *= -1\n R = VT.T @ U.T\n\n # get the translation\n t = np.expand_dims(p2_centroid, 0).T - np.dot(R, np.expand_dims(p1_centroid, 0).T)\n\n # assemble translation and rotation into a transformation matrix\n T = np.identity(3)\n T[:2, 2] = np.squeeze(t)\n T[:2, :2] = R\n\n return T\n\n\ndef trplot2(\n T,\n color=\"blue\",\n frame=None,\n axislabel=True,\n axissubscript=True,\n textcolor=None,\n labels=(\"X\", \"Y\"),\n length=1,\n arrow=True,\n rviz=False,\n ax=None,\n block=False,\n dims=None,\n wtl=0.2,\n width=1,\n d1=0.1,\n d2=1.15,\n **kwargs\n):\n \"\"\"\n Plot a 2D coordinate frame\n\n :param T: an SE(3) or SO(3) pose to be displayed as coordinate frame\n :type: ndarray(3,3) or ndarray(2,2)\n :param color: color of the lines defining the frame\n :type color: str\n :param textcolor: color of text labels for the frame, default color of lines above\n :type textcolor: str\n :param frame: label the frame, name is shown below the frame and as subscripts on the frame axis labels\n :type frame: str\n :param axislabel: display labels on axes, default True\n :type axislabel: bool\n :param axissubscript: display subscripts on axis labels, default True\n :type axissubscript: bool\n :param labels: labels for the axes, defaults to X and Y\n :type labels: 2-tuple of strings\n :param length: length of coordinate frame axes, default 1\n :type length: float\n :param arrow: show arrow heads, default True\n :type arrow: bool\n :param ax: the axes to plot into, defaults to current axes\n :type ax: Axes3D reference\n :param block: run the GUI main loop until all windows are closed, default True\n :type block: bool\n :param dims: dimension of plot volume as [xmin, xmax, ymin, ymax]\n :type dims: array_like(4)\n :param wtl: width-to-length ratio for arrows, default 0.2\n :type wtl: float\n :param rviz: show Rviz style arrows, default False\n :type rviz: bool\n :param projection: 3D projection: ortho [default] or persp\n :type projection: str\n :param width: width of lines, default 1\n :type width: float\n :param d1: distance of frame axis label text from origin, default 0.05\n :type d1: float\n :param d2: distance of frame label text from origin, default 1.15\n :type d2: float\n :return: axes containing the frame\n :rtype: AxesSubplot\n :raises ValueError: bad argument\n\n Adds a 2D coordinate frame represented by the SO(2) or SE(2) matrix to the current axes.\n\n The appearance of the coordinate frame depends on many parameters:\n\n - coordinate axes depend on:\n - ``color`` of axes\n - ``width`` of line\n - ``length`` of line\n - ``arrow`` if True [default] draw the axis with an arrow head\n - coordinate axis labels depend on:\n - ``axislabel`` if True [default] label the axis, default labels are X, Y, Z\n - ``labels`` 2-list of alternative axis labels\n - ``textcolor`` which defaults to ``color``\n - ``axissubscript`` if True [default] add the frame label ``frame`` as a subscript\n for each axis label\n - coordinate frame label depends on:\n - `frame` the label placed inside {} near the origin of the frame\n - a dot at the origin\n - ``originsize`` size of the dot, if zero no dot\n - ``origincolor`` color of the dot, defaults to ``color``\n - If no current figure, one is created\n - If current figure, but no axes, a 3d Axes is created\n\n Examples:\n\n trplot2(T, frame='A')\n trplot2(T, frame='A', color='green')\n trplot2(T1, 'labels', 'AB');\n\n :SymPy: not supported\n\n :seealso: :func:`tranimate2` :func:`plotvol2` :func:`axes_logic`\n \"\"\"\n\n # TODO\n # animation\n # style='line', 'arrow', 'rviz'\n\n # check input types\n if isrot2(T, check=True):\n T = base.r2t(T)\n elif not ishom2(T, check=True):\n raise ValueError(\"argument is not valid SE(2) matrix\")\n\n ax = base.axes_logic(ax, 2)\n\n try:\n if not ax.get_xlabel():\n ax.set_xlabel(labels[0])\n if not ax.get_ylabel():\n ax.set_ylabel(labels[0])\n except AttributeError:\n pass # if axes are an Animate object\n\n if not hasattr(ax, \"_plotvol\"):\n ax.set_aspect(\"equal\")\n\n if dims is not None:\n ax.axis(base.expand_dims(dims))\n elif not hasattr(ax, \"_plotvol\"):\n ax.autoscale(enable=True, axis=\"both\")\n\n # create unit vectors in homogeneous form\n o = T @ np.array([0, 0, 1])\n x = T @ np.array([length, 0, 1])\n y = T @ np.array([0, length, 1])\n\n # draw the axes\n\n if rviz:\n ax.plot([o[0], x[0]], [o[1], x[1]], color=\"red\", linewidth=5 * width)\n ax.plot([o[0], y[0]], [o[1], y[1]], color=\"lime\", linewidth=5 * width)\n elif arrow:\n ax.quiver(\n o[0],\n o[1],\n x[0] - o[0],\n x[1] - o[1],\n angles=\"xy\",\n scale_units=\"xy\",\n scale=1,\n linewidth=width,\n facecolor=color,\n edgecolor=color,\n )\n ax.quiver(\n o[0],\n o[1],\n y[0] - o[0],\n y[1] - o[1],\n angles=\"xy\",\n scale_units=\"xy\",\n scale=1,\n linewidth=width,\n facecolor=color,\n edgecolor=color,\n )\n # plot an invisible point at the end of each arrow to allow auto-scaling to work\n ax.scatter(x=[o[0], x[0], y[0]], y=[o[1], x[1], y[1]], s=[20, 0, 0])\n else:\n ax.plot([o[0], x[0]], [o[1], x[1]], color=color, linewidth=width)\n ax.plot([o[0], y[0]], [o[1], y[1]], color=color, linewidth=width)\n\n # label the frame\n if frame:\n if textcolor is not None:\n color = textcolor\n\n o1 = T @ np.array([-d1, -d1, 1])\n ax.text(\n o1[0],\n o1[1],\n r\"$\\{\" + frame + r\"\\}$\",\n color=color,\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n )\n\n if axislabel:\n # add the labels to each axis\n x = (x - o) * d2 + o\n y = (y - o) * d2 + o\n\n if frame is None or not axissubscript:\n format = \"${:s}$\"\n else:\n format = \"${:s}_{{{:s}}}$\"\n\n ax.text(\n x[0],\n x[1],\n format.format(labels[0], frame),\n color=color,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n ax.text(\n y[0],\n y[1],\n format.format(labels[1], frame),\n color=color,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n\n if block:\n # calling this at all, causes FuncAnimation to fail so when invoked from tranimate2 skip this bit\n plt.show(block=block)\n return ax\n\n\ndef tranimate2(T, **kwargs):\n \"\"\"\n Animate a 2D coordinate frame\n\n :param T: an SE(2) or SO(2) pose to be displayed as coordinate frame\n :type: ndarray(3,3) or ndarray(2,2)\n :param nframes: number of steps in the animation [defaault 100]\n :type nframes: int\n :param repeat: animate in endless loop [default False]\n :type repeat: bool\n :param interval: number of milliseconds between frames [default 50]\n :type interval: int\n :param movie: name of file to write MP4 movie into\n :type movie: str\n\n Animates a 2D coordinate frame moving from the world frame to a frame represented by the SO(2) or SE(2) matrix to the current axes.\n\n - If no current figure, one is created\n - If current figure, but no axes, a 3d Axes is created\n\n\n Examples:\n\n tranimate2(transl(1,2)@trot2(1), frame='A', arrow=False, dims=[0, 5])\n tranimate2(transl(1,2)@trot2(1), frame='A', arrow=False, dims=[0, 5], movie='spin.mp4')\n \"\"\"\n anim = base.animate.Animate2(**kwargs)\n try:\n del kwargs[\"dims\"]\n except KeyError:\n pass\n\n anim.trplot2(T, **kwargs)\n anim.run(**kwargs)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n import pathlib\n\n # trplot2( transl2(1,2), frame='A', rviz=True, width=1)\n # trplot2( transl2(3,1), color='red', arrow=True, width=3, frame='B')\n # trplot2( transl2(4, 3)@trot2(math.pi/3), color='green', frame='c')\n # plt.grid(True)\n\n exec(\n open(\n pathlib.Path(__file__).parent.parent.parent.absolute()\n / \"tests\"\n / \"base\"\n / \"test_transforms2d.py\"\n ).read()\n ) # pylint: disable=exec-used\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros", "numpy.block", "numpy.linalg.det", "numpy.mean", "numpy.eye", "numpy.identity", "numpy.finfo", "numpy.linalg.svd", "numpy.abs", "numpy.squeeze", "numpy.expand_dims" ] ]
tobias-liaudat/ModOpt
[ "913ce6e06340ef03db925be2b1301181836c119d" ]
[ "modopt/base/transform.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"DATA TRANSFORM ROUTINES\n\nThis module contains methods for transforming data.\n\n:Author: Samuel Farrens <[email protected]>\n\n\"\"\"\n\nimport numpy as np\n\n\ndef cube2map(data_cube, layout):\n r\"\"\"Cube to Map\n\n This method transforms the input data from a 3D cube to a 2D map with a\n specified layout\n\n Parameters\n ----------\n data_cube : numpy.ndarray\n Input data cube, 3D array of 2D images\n Layout : tuple\n 2D layout of 2D images\n\n Returns\n -------\n numpy.ndarray\n 2D map\n\n Raises\n ------\n ValueError\n For invalid data dimensions\n ValueError\n For invalid layout\n\n Examples\n --------\n >>> from modopt.base.transform import cube2map\n >>> a = np.arange(16).reshape((4, 2, 2))\n >>> cube2map(a, (2, 2))\n array([[ 0, 1, 4, 5],\n [ 2, 3, 6, 7],\n [ 8, 9, 12, 13],\n [10, 11, 14, 15]])\n\n See Also\n --------\n map2cube : complimentary function\n\n \"\"\"\n\n if data_cube.ndim != 3:\n raise ValueError('The input data must have 3 dimensions.')\n\n if data_cube.shape[0] != np.prod(layout):\n raise ValueError('The desired layout must match the number of input '\n 'data layers.')\n\n return np.vstack([np.hstack(data_cube[slice(layout[1] * i, layout[1] *\n (i + 1))]) for i in range(layout[0])])\n\n\ndef map2cube(data_map, layout):\n r\"\"\"Map to cube\n\n This method transforms the input data from a 2D map with given layout to\n a 3D cube\n\n Parameters\n ----------\n data_map : numpy.ndarray\n Input data map, 2D array\n layout : tuple\n 2D layout of 2D images\n\n Returns\n -------\n numpy.ndarray\n 3D cube\n\n Raises\n ------\n ValueError\n For invalid layout\n\n Examples\n --------\n >>> from modopt.base.transform import map2cube\n >>> a = np.array([[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13],\n [10, 11, 14, 15]])\n >>> map2cube(a, (2, 2))\n array([[[ 0, 1],\n [ 2, 3]],\n [[ 4, 5],\n [ 6, 7]],\n [[ 8, 9],\n [10, 11]],\n [[12, 13],\n [14, 15]]])\n\n See Also\n --------\n cube2map : complimentary function\n\n \"\"\"\n\n if np.all(np.array(data_map.shape) % np.array(layout)):\n raise ValueError('The desired layout must be a multiple of the number '\n 'pixels in the data map.')\n\n d_shape = np.array(data_map.shape) // np.array(layout)\n\n return np.array([data_map[(slice(i * d_shape[0], (i + 1) * d_shape[0]),\n slice(j * d_shape[1], (j + 1) * d_shape[1]))] for i in\n range(layout[0]) for j in range(layout[1])])\n\n\ndef map2matrix(data_map, layout):\n r\"\"\"Map to Matrix\n\n This method transforms a 2D map to a 2D matrix\n\n Parameters\n ----------\n data_map : numpy.ndarray\n Input data map, 2D array\n layout : tuple\n 2D layout of 2D images\n\n Returns\n -------\n numpy.ndarray\n 2D matrix\n\n Raises\n ------\n ValueError\n For invalid layout\n\n Examples\n --------\n >>> from modopt.base.transform import map2matrix\n >>> a = np.array([[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13],\n [10, 11, 14, 15]])\n >>> map2matrix(a, (2, 2))\n array([[ 0, 4, 8, 12],\n [ 1, 5, 9, 13],\n [ 2, 6, 10, 14],\n [ 3, 7, 11, 15]])\n\n See Also\n --------\n matrix2map : complimentary function\n\n \"\"\"\n\n layout = np.array(layout)\n\n # Select n objects\n n_obj = np.prod(layout)\n\n # Get the shape of the images\n image_shape = (np.array(data_map.shape) // layout)[0]\n\n # Stack objects from map\n data_matrix = []\n\n for i in range(n_obj):\n lower = (image_shape * (i // layout[1]),\n image_shape * (i % layout[1]))\n upper = (image_shape * (i // layout[1] + 1),\n image_shape * (i % layout[1] + 1))\n data_matrix.append((data_map[lower[0]:upper[0],\n lower[1]:upper[1]]).reshape(image_shape ** 2))\n\n return np.array(data_matrix).T\n\n\ndef matrix2map(data_matrix, map_shape):\n r\"\"\"Matrix to Map\n\n This method transforms a 2D matrix to a 2D map\n\n Parameters\n ----------\n data_matrix : numpy.ndarray\n Input data matrix, 2D array\n map_shape : tuple\n 2D shape of the output map\n\n Returns\n -------\n numpy.ndarray\n 2D map\n\n Raises\n ------\n ValueError\n For invalid layout\n\n Examples\n --------\n >>> from modopt.base.transform import matrix2map\n >>> a = np.array([[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14],\n [3, 7, 11, 15]])\n >>> matrix2map(a, (2, 2))\n array([[ 0, 1, 4, 5],\n [ 2, 3, 6, 7],\n [ 8, 9, 12, 13],\n [10, 11, 14, 15]])\n\n See Also\n --------\n map2matrix : complimentary function\n\n \"\"\"\n\n map_shape = np.array(map_shape)\n\n # Get the shape and layout of the images\n image_shape = np.sqrt(data_matrix.shape[0]).astype(int)\n layout = np.array(map_shape // np.repeat(image_shape, 2), dtype='int')\n\n # Map objects from matrix\n data_map = np.zeros(map_shape)\n\n temp = data_matrix.reshape(image_shape, image_shape, data_matrix.shape[1])\n\n for i in range(data_matrix.shape[1]):\n lower = (image_shape * (i // layout[1]),\n image_shape * (i % layout[1]))\n upper = (image_shape * (i // layout[1] + 1),\n image_shape * (i % layout[1] + 1))\n data_map[lower[0]:upper[0], lower[1]:upper[1]] = temp[:, :, i]\n\n return data_map.astype(int)\n\n\ndef cube2matrix(data_cube):\n r\"\"\"Cube to Matrix\n\n This method transforms a 3D cube to a 2D matrix\n\n Parameters\n ----------\n data_cube : numpy.ndarray\n Input data cube, 3D array\n\n Returns\n -------\n numpy.ndarray\n 2D matrix\n\n Examples\n --------\n >>> from modopt.base.transform import cube2matrix\n >>> a = np.arange(16).reshape((4, 2, 2))\n >>> cube2matrix(a)\n array([[ 0, 4, 8, 12],\n [ 1, 5, 9, 13],\n [ 2, 6, 10, 14],\n [ 3, 7, 11, 15]])\n\n See Also\n --------\n matrix2cube : complimentary function\n\n \"\"\"\n\n return data_cube.reshape([data_cube.shape[0]] +\n [np.prod(data_cube.shape[1:])]).T\n\n\ndef matrix2cube(data_matrix, im_shape):\n r\"\"\"Matrix to Cube\n\n This method transforms a 2D matrix to a 3D cube\n\n Parameters\n ----------\n data_matrix : numpy.ndarray\n Input data cube, 2D array\n im_shape : tuple\n 2D shape of the individual images\n\n Returns\n -------\n numpy.ndarray\n 3D cube\n\n Examples\n --------\n >>> from modopt.base.transform import matrix2cube\n >>> a = np.array([[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14],\n [3, 7, 11, 15]])\n >>> matrix2cube(a, (2, 2))\n array([[[ 0, 1],\n [ 2, 3]],\n [[ 4, 5],\n [ 6, 7]],\n [[ 8, 9],\n [10, 11]],\n [[12, 13],\n [14, 15]]])\n\n See Also\n --------\n cube2matrix : complimentary function\n\n \"\"\"\n\n return data_matrix.T.reshape([data_matrix.shape[1]] + list(im_shape))\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.prod", "numpy.sqrt", "numpy.repeat" ] ]
NZ99/dm_mathematics_ita
[ "f3ed1bcce530f659d3abf233a91388b1ffd08a25" ]
[ "mathematics_dataset/modules/numbers.py" ]
[ "# Copyright 2018 DeepMind Technologies Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Number-related questions, e.g., \"write seventy-two as a number\".\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport math\nimport random\n\n# Dependency imports\nfrom mathematics_dataset import example\nfrom mathematics_dataset.sample import number\nfrom mathematics_dataset.util import composition\nfrom mathematics_dataset.util import display\nimport numpy as np\nimport six\nfrom six.moves import range\nimport sympy\n\n\n_ENTROPY_TRAIN = (3, 10)\n_ENTROPY_INTERPOLATE = (8, 8)\n_ENTROPY_EXTRAPOLATE = (12, 12)\n\n\n# Number of module compositions appearing in train/test, and extrapolation data.\n_NUM_MODULES_COMPOSED = [2, 4]\n\n\ndef _make_modules(entropy, num_modules_composed):\n \"\"\"Returns modules given \"difficulty\" parameters.\"\"\"\n fns = {\n 'gcd': gcd,\n 'lcm': lcm,\n 'div_remainder': div_remainder,\n 'is_prime': is_prime,\n 'is_factor': is_factor,\n 'round_number': round_number,\n 'place_value': place_value,\n 'list_prime_factors': list_prime_factors,\n }\n\n # These modules don't have both pure and composed.\n modules = {\n 'base_conversion': functools.partial(base_conversion, *entropy),\n }\n\n sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)\n sample_args_composed = composition.PreSampleArgs(\n num_modules_composed[0], num_modules_composed[1], *entropy)\n\n for name, module in six.iteritems(fns):\n modules[name] = functools.partial(module, None, sample_args_pure)\n modules[name + '_composed'] = functools.partial(\n module, None, sample_args_composed)\n\n return modules\n\n\ndef train(entropy_fn):\n \"\"\"Returns dict of training modules.\"\"\"\n return _make_modules(\n entropy=entropy_fn(_ENTROPY_TRAIN),\n num_modules_composed=_NUM_MODULES_COMPOSED)\n\n\ndef test():\n \"\"\"Returns dict of testing modules.\"\"\"\n return _make_modules(\n entropy=_ENTROPY_INTERPOLATE,\n num_modules_composed=_NUM_MODULES_COMPOSED)\n\n\ndef test_extra():\n \"\"\"Returns dict of extrapolation testing modules.\"\"\"\n sample_args_pure = composition.PreSampleArgs(1, 1, *_ENTROPY_EXTRAPOLATE)\n return {\n 'round_number_big': functools.partial(\n round_number, None, sample_args_pure),\n 'place_value_big': functools.partial(place_value, None, sample_args_pure),\n }\n\n\ndef place_value(value, sample_args, context=None):\n \"\"\"E.g., \"Q: What is the tens digit of 31859? A: 5.\"\"\"\n del value # unused for now\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n integer = number.integer(entropy, signed=False, min_abs=1)\n (entity,) = context.sample(sample_args, [integer])\n\n integer_as_string = str(integer)\n num_digits = len(integer_as_string)\n\n firsts = ['', 'elle decine d', 'elle centinaia d']\n seconds = [\n 'i migliaia', 'i milioni', 'i miliardi', 'i bilioni', 'i migliaia di bilioni',\n 'i trilioni', 'i triliardi', 'i quadrilioni', 'i quadriliardi', 'i migliaia di quadriliardi',\n 'i milioni di quadriliardi',\n ]\n place_names = ['elle unitร ', 'elle decine', 'elle centinaia', 'elle migliaia']\n for second in seconds:\n for first in firsts:\n place_names.append(first + second)\n\n place = random.randint(1, num_digits) # 1 = units, 2 = tens, etc.\n place_name = place_names[place - 1]\n answer = sympy.Integer(integer_as_string[num_digits - place])\n\n return example.Problem(\n question=example.question(\n context,\n 'Qual รจ la cifra d{place_name} in {integer}?',\n place_name=place_name, integer=entity.expression_else_handle),\n answer=answer)\n\n\n# TODO(b/124040078): add to composition system?\ndef round_number(value, sample_args, context=None):\n \"\"\"Question for rounding integers and decimals.\"\"\"\n del value # unused for now\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n\n # This is the power of 10 to round to. E.g., power == 0 corresponds to\n # rounding to the nearest integer; power == -2 corresponds to rounding to two\n # decimal places, and power == 3 corresponds to rounding to the nearest 1000.\n power = random.randint(-7, 6)\n\n answer_entropy = 1 + random.uniform(0, entropy / 2)\n entropy = max(1, entropy - answer_entropy)\n value_integer = number.integer(answer_entropy, signed=True)\n\n remainder_divisor = 10 ** int(math.ceil(entropy))\n remainder_range_lower = -remainder_divisor / 2\n remainder_range_upper = remainder_divisor / 2\n\n if value_integer <= 0:\n remainder_range_lower += 1\n if value_integer >= 0:\n remainder_range_upper -= 1\n\n remainder = random.randint(remainder_range_lower, remainder_range_upper)\n input_ = value_integer + sympy.Rational(remainder, remainder_divisor)\n scale = 10**power if power >= 0 else sympy.Rational(1, 10**(-power))\n input_ = input_ * scale\n value = value_integer * scale\n if not number.is_integer(input_):\n input_ = display.Decimal(input_)\n if not number.is_integer(value):\n value = display.Decimal(value)\n\n (input_,) = context.sample(sample_args, [input_])\n\n if power > 0:\n # Rounding to a power of ten.\n round_to = 10**power\n #if random.choice([False, True]):\n # Write the rounding value as a word instead.\n # round_to = display.StringNumber(round_to,\n # join_number_words_with_hyphens=False)\n #else:\n description = 'al piรน vicino multiplo di {round_to}'.format(round_to=round_to)\n elif power == 0:# and random.choice([False, True]):\n # Round to nearest integer.\n description = 'all\\'intero piรน vicino alla'\n else:\n # Round to decimal places.\n description = 'alla {dps}a cifra decimale dopo la virgola'\n #if power != -1:\n # # Plural\n # description += 's'\n dps = display.StringOrdinal(-power)\n #if random.choice([False, True]):\n # dps = display.StringNumber(dps)\n description = description.format(dps=dps)\n\n template = random.choice([\n 'Si approssimi {input} {description}.',\n 'Quanto fa {input} approssimato {description}?',\n ])\n\n return example.Problem(\n question=example.question(\n context, template, input=input_, description=description),\n answer=value)\n\n\ndef _semi_prime(entropy):\n \"\"\"Generates a semi-prime with the given entropy.\"\"\"\n # Add on extra entropy to account for the sparsity of the primes; we don't\n # actually use the integers sampled, but rather a random prime close to them;\n # thus some entropy is lost, which we must account for\n entropy += math.log10(max(1, entropy * math.log(10)))\n\n # We intentionally uniformy sample the \"entropy\" (i.e., approx number digits)\n # of the two factors.\n entropy_1, entropy_2 = entropy * np.random.dirichlet([1, 1])\n\n # Need >= 2 for randprime to always work (Betrand's postulate).\n approx_1 = number.integer(entropy_1, signed=False, min_abs=2)\n approx_2 = number.integer(entropy_2, signed=False, min_abs=2)\n\n factor_1 = sympy.ntheory.generate.randprime(approx_1 / 2, approx_1 * 2)\n factor_2 = sympy.ntheory.generate.randprime(approx_2 / 2, approx_2 * 2)\n\n return factor_1 * factor_2\n\n\ndef is_prime(value, sample_args, context=None):\n \"\"\"Questions asking about primality.\"\"\"\n del value # unused for now\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n composite = _semi_prime(entropy)\n\n if random.choice([False, True]):\n # Use the composite\n integer = composite\n is_prime_ = False\n else:\n # Take the next prime after the composite, to ensure the same distribution\n # as composites. Do \"composite - 4\" so we occasionally see \"2\" as a prime.\n integer = sympy.ntheory.generate.nextprime(composite - 4)\n is_prime_ = True\n\n (integer_entity,) = context.sample(sample_args, [integer])\n\n if random.choice([False, True]) and integer != 1:\n answer = 'Si' if not is_prime_ else 'No'\n #attribute_name = random.choice(['composite', 'a composite number'])\n attribute_name = 'un numero composto'\n else:\n answer = 'Si' if is_prime_ else 'No'\n #attribute_name = random.choice(['primo', 'a prime number'])\n attribute_name = 'un numero primo'\n\n return example.Problem(\n question=example.question(\n context, 'รˆ {integer} {attribute}?',\n integer=integer_entity.expression_else_handle,\n attribute=attribute_name),\n answer=answer)\n\n\ndef is_factor(value, sample_args, context=None):\n \"\"\"E.g., \"Is 5 a factor of 48?\".\"\"\"\n del value # unused\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n\n entropy_factor = 1 + random.uniform(0, entropy/3)\n entropy = max(0, entropy - entropy_factor)\n maybe_factor = number.integer(entropy_factor, False, min_abs=2)\n\n integer = maybe_factor * number.integer(entropy, False, min_abs=1)\n # Produce balanced classes.\n if random.choice([False, True]):\n # The following makes it not a factor.\n integer += random.randint(1, maybe_factor - 1)\n\n (entity,) = context.sample(sample_args, [integer])\n\n templates = [\n 'รˆ {maybe_factor} un divisore di {value}?',\n 'รˆ {value} un multiplo di {maybe_factor}?',\n ]\n if maybe_factor == 2:\n templates += [\n 'รˆ {value} pari?',\n ]\n template = random.choice(templates)\n\n answer = 'Si' if integer % maybe_factor == 0 else 'No'\n return example.Problem(\n question=example.question(\n context, template, maybe_factor=maybe_factor,\n value=entity.expression_else_handle),\n answer=answer)\n\n\ndef list_prime_factors(value, sample_args, context=None):\n \"\"\"E.g., \"What are the prime factors of 36?\".\"\"\"\n del value # unused for now\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n entropy = max(1, entropy)\n\n integer = number.integer(entropy, signed=False, min_abs=2)\n\n (entity,) = context.sample(sample_args, [integer])\n prime_factors = sorted(sympy.factorint(integer).keys())\n template = random.choice([\n 'Quali sono i fattori primi di {integer}?',\n 'Si elenchino i fattori primi di {integer}.',\n ])\n return example.Problem(\n question=example.question(\n context, template, integer=entity.expression_else_handle),\n answer=display.NumberList(prime_factors))\n\n\ndef _pair_with_large_hidden_factor(entropy):\n \"\"\"Returns pair of numbers with possibly large common factor hidden.\"\"\"\n entropy_p, entropy_q, _ = entropy * np.random.dirichlet([1, 1, 1])\n # Min entropy on p and q to minimize trivial solutions.\n entropy_p = max(1, entropy_p)\n entropy_q = max(1, entropy_q)\n entropy_mult = max(0, entropy - entropy_p - entropy_q)\n\n p = number.integer(entropy_p, False, min_abs=1)\n q = number.integer(entropy_q, False, min_abs=1)\n mult = number.integer(entropy_mult, False, min_abs=1)\n p *= mult\n q *= mult\n return p, q\n\n\ndef lcm(value, sample_args, context=None):\n \"\"\"Question for least common multiple of p and q.\"\"\"\n del value # unused\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n\n p, q = _pair_with_large_hidden_factor(entropy)\n answer = sympy.lcm(p, q)\n\n if random.choice([False, True]):\n p, q = context.sample(sample_args, [p, q])\n # Ask the question directly.\n #adjective = random.choice(['minimo', 'lowest', 'smallest'])\n template = random.choice([\n 'Si calcoli il minimo comune multiplo di {p} e {q}.',\n 'Qual รจ il minimo comune multiplo di {p} e {q}?',\n ])\n return example.Problem(\n question=example.question(\n context, template, p=p.expression_else_handle,\n q=q.expression_else_handle),\n answer=answer)\n else:\n # Phrase the question as finding the common denominator of two fractions.\n p = number.integer(2, signed=True, coprime_to=p) / p\n q = number.integer(2, signed=True, coprime_to=q) / q\n p, q = context.sample(sample_args, [p, q])\n\n template = random.choice([\n 'Qual รจ il comune denominatore di {p} e {q}?',\n 'Si trovi il comune denominatore di {p} e {q}.',\n 'Si calcoli il comune denominatore di {p} e {q}.',\n ])\n return example.Problem(\n question=example.question(\n context, template, p=p.expression_else_handle,\n q=q.expression_else_handle),\n answer=answer)\n\n\ndef _random_coprime_pair(entropy):\n \"\"\"Returns a pair of random coprime integers.\"\"\"\n coprime_product = number.integer(entropy, False, min_abs=1)\n factors = sympy.factorint(coprime_product)\n def take():\n prime = random.choice(list(factors.keys()))\n power = factors[prime]\n del factors[prime]\n return prime ** power\n\n if random.random() < 0.8 and len(factors) >= 2:\n # Disallow trivial factoring where possible.\n count_left = random.randint(1, len(factors) - 1)\n count_right = len(factors) - count_left\n else:\n count_left = random.randint(0, len(factors))\n count_right = len(factors) - count_left\n\n left = sympy.prod([take() for _ in range(count_left)])\n right = sympy.prod([take() for _ in range(count_right)])\n assert left * right == coprime_product\n return left, right\n\n\n# @composition.module(number.is_positive_integer)\ndef gcd(value, sample_args, context=None):\n \"\"\"Question for greatest common divisor of p and q.\"\"\"\n is_question = context is None\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n if value is None:\n value_entropy = 1 + random.uniform(0, entropy/3)\n entropy = max(1, entropy - value_entropy)\n value = number.integer(value_entropy, False, min_abs=1)\n\n p_mult, q_mult = _random_coprime_pair(entropy)\n\n p = value * p_mult\n q = value * q_mult\n assert sympy.gcd(p, q) == value\n\n p, q = context.sample(sample_args, [p, q])\n\n #adjective = (random.choice(['greatest', 'highest']) + ' common '\n # + random.choice(['divisor', 'factor']))\n\n if is_question:\n template = random.choice([\n 'Si calcoli il massimo comune divisore di {p} e {q}.',\n 'Qual รจ il massimo comune divisore di {p} e {q}?',\n ])\n return example.Problem(\n question=example.question(\n context, template, p=p, q=q),\n answer=value)\n else:\n return composition.Entity(\n context=context,\n value=value,\n description='Sia {self} il massimo comune divisore di {p} e {q}.', p=p, q=q)\n\n\n# @composition.module(number.is_positive_integer)\ndef div_remainder(value, sample_args, context=None):\n \"\"\"E.g., \"What is the remainder when 27 is divided by 5?\".\"\"\"\n is_question = context is None\n if context is None:\n context = composition.Context()\n\n entropy, sample_args = sample_args.peel()\n\n if value is None:\n entropy_value = 1 + random.uniform(0, entropy/3)\n entropy = max(0, entropy - entropy_value)\n value = number.integer(entropy_value, signed=False)\n\n entropy_a, entropy_q = entropy * np.random.dirichlet([1, 1])\n a = number.integer(entropy_a, signed=False, min_abs=1)\n q = value + number.integer(entropy_q, signed=False, min_abs=1)\n\n p = a * q + value\n assert p % q == value\n p, q = context.sample(sample_args, [p, q])\n\n if is_question:\n template = random.choice([\n 'Si calcoli il resto di {p} diviso {q}.',\n 'Qual รจ il resto di {p} diviso {q}?',\n ])\n return example.Problem(\n question=example.question(\n context, template, p=p.expression_else_handle,\n q=q.expression_else_handle),\n answer=value)\n else:\n return composition.Entity(\n context=context,\n value=value,\n description='Sia {self} il resto di {p} diviso {q}.',\n p=p, q=q)\n\n\ndef base_conversion(min_entropy, max_entropy):\n \"\"\"E.g., \"What is 17 base 8 in base 10?\".\"\"\"\n context = composition.Context()\n\n from_base = random.randint(2, 16)\n while True:\n to_base = random.randint(2, 16)\n if to_base != from_base:\n break\n\n # Entropy used up in selecting bases.\n entropy_used = math.log10(16 * 15)\n entropy = random.uniform(\n min_entropy - entropy_used, max_entropy - entropy_used)\n\n value = number.integer(entropy, signed=True)\n template = random.choice([\n '{from_str}, se convertito da base {from_base} a base {to_base}, รจ uguale a',\n 'Si converta {from_str} da base {from_base} a base {to_base}.',\n 'Qual รจ {from_str} (in base {from_base}) convertito a base {to_base}?',\n ])\n return example.Problem(\n question=example.question(\n context, template,\n from_str=display.NumberInBase(value, from_base),\n from_base=from_base,\n to_base=to_base),\n answer=display.NumberInBase(value, to_base))\n" ]
[ [ "numpy.random.dirichlet" ] ]
Letifery/income-analysis-USA
[ "2d2f7cdc5b4caecbbb7c3ae446f2102f2437b0d3" ]
[ "utils/model-alteration.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.linear_model import Perceptron\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.naive_bayes import ComplementNB\r\n\r\nclass ModelAlteration():\r\n def strat_kfold_evaluation(\r\n\t\tself,\r\n\t\tdf, \r\n\t\tmodel, \r\n\t\ttarget:int, \r\n\t\tfolds:int, \r\n\t\tshuffle:bool=True, \r\n\t\trandom_state:int=None) -> [float, ([],[])]:\r\n '''\r\n\t\tImplements some centroid based clustering algorithms on n-dimensional data\r\n\t\t\t\r\n\t\tParameters\r\n\t\t------------\r\n\t\tdf\t\t\t: Your dataframe\r\n\t\tmodel\t\t: A scikitlearn model used to classify labels\r\n\t\ttarget \t\t: The index of your target column\r\n\t\tfolds\t\t: How often your dataframe should be split\r\n\t\tshuffle\t\t: Specifies if the samples should be shuffled\r\n\t\trandom_state: If shuffle=True, random_state specifies the used seed.\r\n\t\t\t\t\tif None, shuffle will always be random.\r\n\t\t\t\t\t\t\r\n\t\tReturns\r\n\t\t------------\r\n\t\taccuracy\t: A list which contains the accuracy of the model over each folds\r\n\t\tbest_fold\t: The fold with the highest accuracy with the used model\r\n\t'''\r\n\t\r\n data, target = df.loc[:, df.columns!=target].values, df[target].values \r\n skf = StratifiedKFold(n_splits=folds, shuffle=shuffle, random_state=random_state)\r\n accuracy = [0 for _ in range(folds)]\r\n best_fold = []\r\n for i, index in enumerate(skf.split(data, target)):\r\n x_train, x_test = data[index[0]], data[index[1]]\r\n y_train, y_test = target[index[0]], target[index[1]]\r\n model.fit(x_train, y_train)\r\n accuracy[i] = (model.score(x_test, y_test))*100\r\n if accuracy[i] >= max(accuracy[:-1]): best_fold = index\r\n return(accuracy, best_fold)\r\n\r\n def plot_accuracy(self, acc:[[float]], xlab:str, legend:[str], xaxis:[]=[]):\r\n '''\r\n Plots all permutation of the parameters. \r\n ------------\r\n acc :[[float]]\r\n Contains the accuracy of all folds.\r\n xlab :String \r\n Contains the name for the x-axis.\r\n legend :[String]\r\n Contains the values for the plot legend.\r\n xaxis :[int] or [float] \r\n Contains values for the x-axis.\r\n '''\r\n plt.xlabel(xlab)\r\n plt.ylabel('Accuracy [%]')\r\n acc = acc if len(acc)>0 else [acc]\r\n if not xaxis:\r\n for i, accuracy in enumerate(acc):\r\n plt.plot(range(len(accuracy)), accuracy, label = legend[i])\r\n else:\r\n for i, accuracy in enumerate(acc):\r\n plt.plot(xaxis, accuracy, label = legend[i]) \r\n plt.legend(loc=\"upper left\")\r\n plt.show()\r\n\r\n def optimize_knn(self, \r\n df, \r\n target:int,\r\n neighbours:[int] = list(range(1,11)), \r\n metric:[int]=[1,2,3],\r\n folds:int = 10,\r\n plot:bool=True):\r\n '''\r\n Attempts to find the most optimal model parameters for the k-nearest-\r\n neighbours (kNN) classifier by finding the best fold for each permutation \r\n of the parameters. The best fold is determined by strat_kfold_evaluation(). \r\n The accuracy of all best folds is then compared and the parameters of \r\n the best fold are returned (in addition to the fold itself)\r\n Parameters\r\n ------------\r\n df : dataframe\r\n Your datatable\r\n target : int \r\n The index of your target column\r\n neighbours : [int]\r\n A list which contains the number of neighbors which should be used in kNN.\r\n metric : [int] \r\n Which metric should be used for kNN\r\n 1 - Manhattan\r\n 2 - Euclidean\r\n 3>= - Minkowski\r\n folds : int \r\n How often your dataframe should be split in strat_kfold_evaluation\r\n plot : bool\r\n Plots the accuracies over each fold\r\n Returns\r\n ------------\r\n best_fold: (np.array(int), {model_parameters})\r\n\tl \t : An indexlist of the fold which has performed best overall\r\n dic\t : And a dict with the model parameters for the best fold \r\n '''\r\n best_acc, best_model, fold_acc = 0, 0, [[None for _ in neighbours] for _ in metric]\r\n epoch, end = 1, len(neighbours)*len(metric)\r\n for i,m in enumerate(metric):\r\n for j,k in enumerate(neighbours):\r\n model = KNeighborsClassifier(n_neighbors=k, p = m)\r\n fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))\r\n if fold_acc[i][j] > best_acc: \r\n best_acc = fold_acc[i][j]\r\n best_model = (tmp_fold, {\"n_neighbors\" : k, \"p\" : m})\r\n print(\"Epoch %s/%s | neighbours=%s, metric=%s, Accuracy=%s\" % (epoch, end, k, m, fold_acc[i][j]))\r\n epoch += 1\r\n if plot: self.plot_accuracy(fold_acc, \"Number of neighbours\", list(map(lambda x: \"Metric \" + x, list(map(str, metric)))), neighbours)\r\n return(best_model)\r\n\r\n def optimize_perceptron(self, \r\n df, \r\n target:int,\r\n learning_rate:[float] = np.linspace(1, 20, num=20), \r\n penalty:[int]=[0,1,2,3],\r\n folds:int = 10,\r\n plot:bool=True):\r\n '''\r\n Attempts to find the most optimal model parameters for the perceptron \r\n classifier by finding the best fold for each permutation of the \r\n parameters. The best fold is determined by strat_kfold_evaluation(). \r\n The accuracy of all best folds is then compared and the parameters of \r\n the best fold are returned (in addition to the fold itself)\r\n Parameters\r\n ------------\r\n df : dataframe\r\n Your datatable\r\n target : int \r\n The index of your target column\r\n learning_rate : [float]\r\n A list containing the number of learning_rates the algorithm should\r\n try out\r\n penalty : [int] \r\n Which penalty should be used\r\n 0 - None\r\n 1 - l1\r\n 2 - l2\r\n 3 - elasticnet\r\n folds : int \r\n How often your dataframe should be split in strat_kfold_evaluation\r\n plot : bool\r\n Plots the accuracies over each fold\r\n Returns\r\n ------------\r\n best_fold: (np.array(int), {model_parameters})\r\n\tl \t : An indexlist of the fold which has performed best overall\r\n dic\t : And a dict with the model parameters for the best fold \r\n '''\r\n best_acc, best_model, fold_acc = 0, 0, [[None for _ in learning_rate] for _ in penalty]\r\n epoch, end = 1, len(learning_rate)*len(penalty)\r\n penalty = list(map((lambda x, d={0:None, 1:\"l1\", 2:\"l2\", 3:\"elasticnet\"}: d[x]), penalty)) \r\n\r\n for i, m in enumerate(penalty):\r\n for j, k in enumerate(learning_rate):\r\n model = Perceptron(eta0=k, penalty=m)\r\n fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))\r\n if fold_acc[i][j] > best_acc: \r\n best_acc = fold_acc[i][j]\r\n best_model = (tmp_fold, { \"eta0\" : k, \"penalty\" : m})\r\n print(\"Epoch %s/%s | learning_rate=%s, penalty=%s, Accuracy=%s\" % (epoch, end, k, m, fold_acc[i][j]))\r\n epoch += 1\r\n if plot: self.plot_accuracy(fold_acc, \"Used learning_rate\", list(map(lambda x: \"penalty: \" + str(x), penalty)), list(learning_rate))\r\n return(best_model)\r\n\r\n def optimize_SVM(self, \r\n df, \r\n target:int,\r\n regularization:[float] = np.linspace(1, 10, num=10), \r\n kernel:[int]=[1,2,3],\r\n folds:int = 10,\r\n plot:bool=True):\r\n '''\r\n Attempts to find the most optimal model parameters for the SVM\r\n classifier by finding the best fold for each permutation of the \r\n parameters. The best fold is determined by strat_kfold_evaluation(). \r\n The accuracy of all best folds is then compared and the parameters of \r\n the best fold are returned (in addition to the fold itself)\r\n Parameters\r\n ------------\r\n df : dataframe\r\n Your datatable\r\n target : int \r\n The index of your target column\r\n regularization: [float]\r\n A list containing all penalties which should be tried out on the \r\n respective kernel function\r\n kernel : [int] \r\n Which kernel functions should be used (refers to sklearn.svm.SVC)\r\n 0 - Linear (Takes a long time without dimension reduction)\r\n 1 - Poly\r\n 2 - rbf\r\n 3 - sigmoid\r\n 4 - precomputed (Look at Sklearns documentary first if you want to use it)\r\n folds : int \r\n How often your dataframe should be split in strat_kfold_evaluation\r\n plot : bool\r\n Plots the accuracies over each fold if True\r\n Returns\r\n ------------\r\n best_fold: (np.array(int), {model_parameters})\r\n\tl \t : An indexlist of the fold which has performed best overall\r\n dic\t : And a dict with the model parameters for the best fold \r\n '''\r\n best_acc, best_model, fold_acc = 0, 0, [[None for _ in regularization] for _ in kernel]\r\n epoch, end = 1, len(regularization)*len(kernel)\r\n kernel = list(map((lambda x, d={0:\"linear\", 1:\"poly\", 2:\"rbf\", 3:\"sigmoid\"}: d[x]), kernel)) \r\n\r\n for i, kern in enumerate(kernel):\r\n for j, reg in enumerate(regularization):\r\n model = SVC(C=reg, kernel=kern)\r\n fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))\r\n if fold_acc[i][j] > best_acc: \r\n best_acc = fold_acc[i][j]\r\n best_model = (tmp_fold, {\"C\" :reg, \"kernel\" :kern})\r\n print(\"Epoch %s/%s | regularization = %s, kernel = %s, Accuracy = %s\" % (epoch, end, reg, kern, fold_acc[i][j]))\r\n epoch += 1\r\n if plot: self.plot_accuracy(fold_acc, \"Used regularization\", list(map(lambda x: \"kernel: \" + str(x), kernel)), list(regularization))\r\n return(best_model)\r\n\r\n\r\n def optimize_decision_tree(self, \r\n df, \r\n target:int,\r\n criterion = [\"gini\", \"entropy\"], \r\n max_depth:[int]= np.linspace(1, 10, num=10),\r\n splitter = [\"best\", \"random\"],\r\n folds:int = 10,\r\n plot:bool=True):\r\n '''\r\n Attempts to find the most optimal model parameters for the decision tree \r\n classifier by finding the best fold for each permutation of the \r\n parameters. The best fold is determined by strat_kfold_evaluation(). \r\n The accuracy of all best folds is then compared and the parameters of \r\n the best fold are returned (in addition to the fold itself)\r\n Parameters\r\n ------------\r\n df : dataframe\r\n Your datatable\r\n target : int \r\n The index of your target column\r\n criterion : [String]\r\n A list containing \"gini\" and \"entropy\"\r\n max_depth : [int] \r\n A list containing the number of max_depth the algorithm should\r\n try out\r\n splitter : [String] \r\n A list containing \"best\" and \"random\"\r\n folds : int \r\n How often your dataframe should be split in strat_kfold_evaluation\r\n plot : bool\r\n Plots the accuracies over each fold\r\n Returns\r\n ------------\r\n best_fold: (np.array(int), {model_parameters})\r\n\tl \t : An indexlist of the fold which has performed best overall\r\n dic\t : And a dict with the model parameters for the best fold \r\n '''\r\n best_acc, best_model, fold_acc = 0, 0, [[[None for _ in max_depth] for _ in splitter] for _ in criterion]\r\n epoch, end = 1, len(criterion)*len(splitter)*len(max_depth)\r\n\r\n for i, cri in enumerate(criterion):\r\n for j, split in enumerate(splitter):\r\n for k, max_d in enumerate(max_depth):\r\n model = DecisionTreeClassifier(criterion = cri, splitter = split, max_depth = max_d)\r\n fold_acc[i][j][k], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))\r\n if fold_acc[i][j][k] > best_acc: \r\n best_acc = fold_acc[i][j][k]\r\n best_model = (tmp_fold, {\"criterion\": cri, \"splitter\": split, \"max_depth\": max_d})\r\n print(\"Epoch %s/%s | criterion = %s, splitter = %s, max_depth = %s, Accuracy = %s\" % (epoch, end, cri, split, max_d, fold_acc[i][j][k]))\r\n epoch += 1\r\n if plot: \r\n tmp, fold_acc = [], [x for y in fold_acc for x in y]\r\n for i, _ in enumerate(criterion):\r\n tmp += list(map(lambda x, y : \"crit: \" + str(x) + \" split: \" + str(y), [criterion[i]]*len(criterion), splitter))\r\n self.plot_accuracy(fold_acc, \"Used max depth\", tmp, list(max_depth)) \r\n return(best_model)\r\n\r\n\r\n def optimize_NB(self, \r\n df, \r\n target:int,\r\n alpha:[float]= np.linspace(1, 10, num=10),\r\n fit_prior:[bool] = [True, False], \r\n folds:int = 10,\r\n plot:bool=True):\r\n '''\r\n Attempts to find the most optimal model parameters for the NB \r\n classifier by finding the best fold for each permutation of the \r\n parameters. The best fold is determined by strat_kfold_evaluation(). \r\n The accuracy of all best folds is then compared and the parameters of \r\n the best fold are returned (in addition to the fold itself)\r\n Parameters\r\n ------------\r\n df : dataframe\r\n Your datatable\r\n target : int \r\n The index of your target column\r\n fit_prior : [bool]\r\n A list of True and False\r\n alpha : [int] \r\n A list containing the number of alpha, the algorithm should\r\n try out\r\n folds : int \r\n How often your dataframe should be split in strat_kfold_evaluation\r\n plot : bool\r\n Plots the accuracies over each fold\r\n Returns\r\n ------------\r\n best_fold: (np.array(int), {model_parameters})\r\n\tl \t : An indexlist of the fold which has performed best overall\r\n dic\t : And a dict with the model parameters for the best fold \r\n '''\r\n\r\n best_acc, best_model, fold_acc = 0, 0, [[None for _ in alpha ] for _ in fit_prior]\r\n epoch, end = 1, len(alpha)*len(fit_prior)\r\n\r\n for i, fit_p in enumerate(fit_prior):\r\n for j, alp in enumerate(alpha):\r\n model = ComplementNB(alpha = alp, fit_prior = fit_p)\r\n fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))\r\n if fold_acc[i][j] > best_acc: \r\n best_acc = fold_acc[i][j]\r\n best_model = (tmp_fold, {\"alpha\" : alp, \"fit_prior\" : fit_p})\r\n print(\"Epoch %s/%s | fit_prior = %s, alpha = %s, Accuracy = %s\" % (epoch, end, fit_p, alp, fold_acc[i][j]))\r\n epoch += 1\r\n if plot: self.plot_accuracy(fold_acc, \"Used alpha\", list(map(lambda x: \"fit_prior: \" + str(x), fit_prior)), list(alpha))\r\n return(best_model)\r\n" ]
[ [ "sklearn.model_selection.StratifiedKFold", "sklearn.naive_bayes.ComplementNB", "matplotlib.pyplot.xlabel", "sklearn.neighbors.KNeighborsClassifier", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "sklearn.svm.SVC", "sklearn.linear_model.Perceptron", "matplotlib.pyplot.ylabel", "sklearn.tree.DecisionTreeClassifier", "matplotlib.pyplot.show", "numpy.linspace" ] ]
AlonViz/IML.HUJI
[ "107f7c20b8bd64d41452e4a5b66abe843af7eb18" ]
[ "Tests/Ex1_Test.py" ]
[ "from scipy.stats import norm\nfrom IMLearn.learners.gaussian_estimators import UnivariateGaussian\nimport numpy as np\n\nsamples_ = np.random.normal(0, 2, 10)\nlgpdf_true = norm.logpdf(samples_, loc=0, scale=2)\ncalcpdf = lambda x: UnivariateGaussian.log_likelihood(0, 4, x)\ncalcpdfvec = np.vectorize(calcpdf)\nlgpdf_mine = calcpdfvec(samples_)\nlgpdf_mine = np.around(lgpdf_mine, 2)\nlgpdf_true = np.around(lgpdf_true, 2)\nassert (np.array_equal(lgpdf_mine, lgpdf_true))\n" ]
[ [ "numpy.random.normal", "scipy.stats.norm.logpdf", "numpy.array_equal", "numpy.vectorize", "numpy.around" ] ]
adamleon/kuka
[ "cac2880ff9bf1fb798029280a9baf51450195fc4" ]
[ "kuka_driver/src/kuka_driver/kuka_rsi_driver.py" ]
[ "#!/usr/bin/env python\n\n# Copyright (c) 2014, Norwegian University of Science and Technology\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n# Author: Lars Tingelstad\n# Maintainer: Lars Tingelstad <[email protected]>\n\nimport time\nimport threading\n\nimport numpy as np\n\n# ROS imports\nimport rospy\nimport actionlib\nfrom sensor_msgs.msg import JointState\nfrom control_msgs.msg import FollowJointTrajectoryAction\nfrom control_msgs.msg import FollowJointTrajectoryFeedback\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\n\nfrom kuka_robot_sensor_interface_connection import RobotSensorInterfaceConnection\n\n\nclass KUKADriverNode(object):\n\n def __init__(self, host, port):\n self._host = host\n self._port = port\n\n self._lock = threading.Lock()\n\n # Publish rate (Hz) - Set to half of the rsi control rate\n self._pub_rate = rospy.get_param('pub_rate', 125)\n rospy.loginfo(\"Setting publish rate (hz) based on parameter: %f\", self._pub_rate)\n\n # Joint names\n def_joint_names = [\"joint_1\", \"joint_2\", \"joint_3\", \"joint_4\", \"joint_5\", \"joint_6\"]\n self._joint_names = rospy.get_param('controller_joint_names', def_joint_names)\n if len(self._joint_names) == 0:\n rospy.logwarn(\"Joint list is empty, did you set controller_joint_name?\")\n rospy.loginfo(\"Setting joint names based on parameter: %s\", str(self.joint_names))\n\n # RSI connection\n self._rsi_connection = RobotSensorInterfaceConnection(host, port)\n\n # Publishers\n self._joint_state_pub = rospy.Publisher('joint_states',\n JointState)\n self._joint_feedback_pub = rospy.Publisher('feedback_states',\n FollowJointTrajectoryFeedback)\n # Subscribers\n self._joint_trajectory_sub = rospy.Subscriber('joint_path_command',\n JointTrajectory,\n self._on_trajectory)\n self._joint_trajectory_point_sub = rospy.Subscriber('joint_command',\n JointTrajectoryPoint,\n self._on_trajectory_point)\n\n # Timed task (started automatically)\n period = rospy.Duration(1.0/self._pub_rate)\n rospy.loginfo('Setting up publish worker with period (sec): %s', str(period.to_sec()))\n rospy.Timer(period, self._publish_worker)\n\n def start(self):\n rospy.loginfo('Starting kuka_driver')\n self._rsi_connection.connect()\n\n def _on_trajectory(self, msg):\n try:\n rospy.loginfo('Received trajectory with %s points, executing callback', str(len(msg.points)))\n\n points = msg.points\n for i in range(len(points) - 1):\n self._rsi_connection.set_desired_joint_states(np.array(points[i].positions))\n time.sleep(points[i+1].time_from_start.to_sec() - points[i].time_from_start.to_sec())\n\n self._rsi_connection.set_desired_joint_states(np.array(points[-1].positions))\n\n except Exception as e:\n rospy.logerr('Unexpected exception: %s', e)\n\n def _on_trajectory_point(self, msg):\n try:\n #rospy.loginfo('Received trajectory point, executing callback')\n joint_position_command = np.array(msg.positions)\n self._rsi_connection.set_desired_joint_states(joint_position_command)\n except Exception as e:\n rospy.logerr('Unexpected exception: %s', e)\n\n\n def _publish_worker(self, event):\n self._state_publisher()\n\n def _state_publisher(self):\n try:\n joint_state_msg = JointState()\n joint_fb_msg = FollowJointTrajectoryFeedback()\n\n with self._lock:\n\n time = rospy.Time.now()\n\n q_actual, q_desired, q_error = self._rsi_connection.get_joint_states()\n\n #Joint states\n joint_state_msg.header.stamp = time\n joint_state_msg.name = self._joint_names\n joint_state_msg.position = q_actual\n\n self._joint_state_pub.publish(joint_state_msg)\n\n #Joint feedback\n joint_fb_msg.header.stamp = time\n joint_fb_msg.joint_names = self._joint_names\n joint_fb_msg.actual.positions = q_actual\n joint_fb_msg.desired.positions = q_desired\n joint_fb_msg.error.positions = q_error\n\n self._joint_feedback_pub.publish(joint_fb_msg)\n\n except Exception as e:\n rospy.logerr('Unexpected exception in joint state publisher: %s', e)\n\ndef main():\n args = rospy.myargv()[1:]\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('robot_ip', type=str, default='',\n help='Please specify the robot (RSI) hostname')\n parser.add_argument('--robot_port', type=int, default=10000,\n help='Please specify the robot (RSI) port number')\n\n args = parser.parse_args(args)\n robot_host = args.robot_ip\n robot_port = args.robot_port\n\n rospy.init_node('kuka_driver')\n\n driver = KUKADriverNode(robot_host, robot_port)\n driver.start()\n\n\n try:\n rospy.spin()\n except:\n rospy.ROSInterruptException()\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array" ] ]
appendixisu/zi2zi
[ "7c78df630d4630de7b688bdfa77a829372bd1166" ]
[ "model/preprocessing_helper.py" ]
[ "import os\nimport pdb\n\nimport PIL\nimport numpy as np\nfrom PIL import Image, ImageFont\nfrom PIL import ImageDraw\n\nfrom model.utils import save_concat_images\n\nCANVAS_SIZE = 256\nCHAR_SIZE = 220\nEMBEDDING_DIM = 128\n\n\ndef _draw_single_char(font, ch, width, height):\n img = Image.new(\"L\", (width, height), 255)\n draw = ImageDraw.Draw(img)\n draw.text((0, -30), ch, fill=0, font=font)\n return img\n\n\ndef get_textsize(font, ch):\n img = Image.new(\"L\", (1, 1), 255)\n draw = ImageDraw.Draw(img)\n char_size = draw.textsize(ch, font=font)\n return char_size\n\n\ndef draw_single_char(img, canvas_size, char_size):\n width, height = img.size\n factor = width * 1.0 / char_size\n\n max_height = canvas_size * 2\n if height / factor > max_height: # too long\n img = img.crop((0, 0, width, int(max_height * factor)))\n if height / factor > char_size + 5: # CANVAS_SIZE/CHAR_SIZE is a benchmark, height should be less\n factor = height * 1.0 / char_size\n\n img = img.resize((int(width / factor), int(height / factor)), resample=PIL.Image.LANCZOS)\n\n bg_img = Image.new(\"L\", (canvas_size, canvas_size), 255)\n offset = ((canvas_size - img.size[0]) // 2, (canvas_size - img.size[1]) // 2)\n bg_img.paste(img, offset)\n return bg_img\n\n\ndef draw_single_char_by_font(ch, font, canvas_size, char_size):\n width, height = get_textsize(font, ch)\n # print(ch, 'font size =', width, height )\n char_img = _draw_single_char(font, ch, width, height)\n\n return draw_single_char(char_img, canvas_size, char_size)\n\n\ndef save_imgs(imgs, count, save_dir):\n p = os.path.join(save_dir, \"inferred_%04d.png\" % count)\n save_concat_images(imgs, img_path=p)\n print(\"generated images saved at %s\" % p)\n\n\ndef draw_paired_image(src_img, dst_img, canvas_size):\n assert src_img.size == (canvas_size, canvas_size)\n assert dst_img.size == (canvas_size, canvas_size)\n\n example_img = Image.new(\"L\", (canvas_size * 2, canvas_size), 255)\n example_img.paste(dst_img, (0, 0))\n example_img.paste(src_img, (canvas_size, 0))\n return example_img\n\n\ndef draw_example(ch, src_font, dst_font, canvas_size, filter_hashes, char_size):\n src_img = draw_single_char_by_font(ch, src_font, canvas_size, char_size)\n dst_img = draw_single_char_by_font(ch, dst_font, canvas_size, char_size)\n\n # check the filter example in the hashes or not\n dst_hash = hash(dst_img.tobytes())\n if dst_hash in filter_hashes or np.min(src_img) == 255 or np.min(dst_img) == 255:\n return None\n\n return draw_paired_image(src_img, dst_img, canvas_size)\n\n\ndef draw_example_src_only(ch, src_font, dst_img, canvas_size, char_size):\n src_img = draw_single_char_by_font(ch, src_font, canvas_size, char_size)\n\n assert dst_img.size == (canvas_size, canvas_size), pdb.set_trace()\n\n if np.min(src_img) == 255 or np.min(dst_img) == 255:\n return None\n\n example_img = Image.new(\"L\", (canvas_size * 2, canvas_size), 255)\n example_img.paste(dst_img, (0, 0))\n example_img.paste(src_img, (canvas_size, 0))\n return example_img\n\n\nif __name__ == '__main__':\n src_font = \"/Users/wei-chilan/Documents/python/zi2zi/data/raw_fonts/NotoSansCJKtc-Regular.otf\"\n print(os.path.isfile(src_font))\n src_font = ImageFont.truetype(src_font, size=CHAR_SIZE)\n src_img = draw_single_char_by_font('ๆˆ‘', src_font, CANVAS_SIZE, CHAR_SIZE)\n src_img.show()\n" ]
[ [ "numpy.min" ] ]
nagasudhirpulla/data_quality_stats_generator
[ "afe65c728dca41afb529e16140b46663e9bff9fd" ]
[ "index.py" ]
[ "# %%\nimport pandas as pd\nimport datetime as dt\nimport logging\nfrom dataFetcher import fetchPntHistData\nimport numpy as np\nfrom appUtils import addMonths\nimport argparse\nlogger = logging.getLogger(__name__)\n# %%\n# python index.py --file input/voltage_cs.xlsx --avg --max --min --sum --random\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--file\", help=\"Input file path\",\n default=\"input/pnts.xlsx\")\nparser.add_argument(\n '--avg', help=\"Average row to be included\", action='store_true')\nparser.add_argument(\n '--sum', help=\"Sum row to be included\", action='store_true')\nparser.add_argument('--min', help=\"Min row to be included\",\n action='store_true')\nparser.add_argument('--max', help=\"Max row to be included\",\n action='store_true')\nparser.add_argument('--random', help=\"Use random data instead of real data\",\n action='store_true')\nargs = parser.parse_args()\nfPath = args.file\nisDumpAvg = args.avg\nisDumpSum = args.sum\nisDumpMax = args.max\nisDumpMin = args.min\nisRandom = args.random\n\npntsDf = pd.read_excel(fPath)\npntTags = pntsDf.iloc[:, 2].drop_duplicates()\n# %%\n# set start and end times for this month\nnowDt = dt.datetime.now()\nstartDt = dt.datetime(nowDt.year, nowDt.month, 1)\nendDt = addMonths(startDt, 1)-dt.timedelta(days=1)\n\n# uncomment this if absolute times required\n# startDtStr = dt.datetime(2020, 5, 1)\n# endDtStr = dt.datetime(2020, 5, 31)\n\ndateBinDays = 1\ndateBinIntrvl = dt.timedelta(days=dateBinDays)\ndateBins = pd.date_range(start=startDt, end=endDt, freq=dateBinIntrvl)\n# %%\npntsQualityPercSumm = pd.DataFrame()\nfor pntItr in range(len(pntsDf)):\n pntId = pntsDf[pntsDf.columns[0]][pntItr].strip()\n pntName = pntsDf[pntsDf.columns[1]][pntItr].strip()\n pntGrp = pntsDf[pntsDf.columns[2]][pntItr].strip()\n print(\"itr = {0}, id = {1}, name = {2}, grp = {3}\".format(\n pntItr+1, pntId, pntName, pntGrp))\n pntQualSumm = pd.DataFrame()\n for currDt in dateBins:\n # print(\"date = {0}\".format(currDt))\n pntSampls = fetchPntHistData(\n pntId, currDt, currDt+dateBinIntrvl-dt.timedelta(seconds=1), logger=logger, isRandom=isRandom)\n pntQuals = [v['status'] for v in pntSampls]\n numSampls = len(pntQuals)\n if numSampls == 0:\n goodSamplsPerc = 0\n else:\n goodSamplsPerc = len([k for k in pntQuals if k in [\n 'GOOD_LIMVIOL', 'GOOD']])*100/numSampls\n # goodSamplsPerc = round(goodSamplsPerc, 2)\n binQualSumm = pd.DataFrame(columns=['date', 'good_perc'], data=[\n [currDt, goodSamplsPerc]])\n pntQualSumm = pntQualSumm.append(binQualSumm, ignore_index=True)\n pntQualSumm['station'] = pntGrp\n # print(pntQualSumm)\n pntsQualityPercSumm = pntsQualityPercSumm.append(\n pntQualSumm, ignore_index=True)\n\n# print(pntsQualityPercSumm)\n# %%\npntsQualitySummary = pntsQualityPercSumm.pivot_table(\n index=\"date\", columns=\"station\", values=\"good_perc\", aggfunc=np.max, fill_value=0)\nsummaryTags = [x for x in pntTags if x in pntsQualitySummary.columns]\npntsQualitySummary = pntsQualitySummary[summaryTags]\npntsQualitySummary.columns.name = None\n\nreportDf = pntsQualitySummary\n\nif isDumpAvg:\n # calculate average row\n avgRow = pd.DataFrame(pntsQualitySummary.mean(axis=0)).T\n reportDf = reportDf.append(avgRow)\n newIndex = reportDf.index.tolist()\n newIndex[-1] = \"AVG\"\n reportDf.index = newIndex\n\nif isDumpSum:\n # calculate sum row\n sumRow = pd.DataFrame(pntsQualitySummary.sum(axis=0)).T\n reportDf = reportDf.append(sumRow)\n newIndex = reportDf.index.tolist()\n newIndex[-1] = \"SUM\"\n reportDf.index = newIndex\n\nif isDumpMax:\n # calculate max row\n maxRow = pd.DataFrame(pntsQualitySummary.max(axis=0)).T\n reportDf = reportDf.append(maxRow)\n newIndex = reportDf.index.tolist()\n newIndex[-1] = \"MAX\"\n reportDf.index = newIndex\n\nif isDumpMin:\n # calculate min row\n minRow = pd.DataFrame(pntsQualitySummary.min(axis=0)).T\n reportDf = reportDf.append(minRow)\n newIndex = reportDf.index.tolist()\n newIndex[-1] = \"MIN\"\n reportDf.index = newIndex\n\nprint(reportDf)\n\n# %%\n# nowTimeStr = dt.datetime.strftime(dt.datetime.now(), \"%Y-%m-%d-%H-%M-%S-%f\")\nmonthTimeStr = dt.datetime.strftime(startDt, \"%m-%Y\")\ndumpFilename = 'output/measQualSumm_{}.xlsx'.format(monthTimeStr)\n\nwith pd.ExcelWriter(dumpFilename) as writer:\n reportDf.to_excel(writer, index=True, sheet_name='data_avail')\n\n# %%\nprint(\"Processing complete...\")\n" ]
[ [ "pandas.DataFrame", "pandas.read_excel", "pandas.date_range", "pandas.ExcelWriter" ] ]
yfletberliac/MERL
[ "6eca6c3c9fa0fbd766a82ef9a85fa383b8f649c9" ]
[ "baselines_merl/ddpg/ddpg.py" ]
[ "import os\nimport time\nfrom collections import deque\nimport pickle\n\nfrom baselines_merl.ddpg.ddpg_learner import DDPG\nfrom baselines_merl.ddpg.models import Actor, Critic\nfrom baselines_merl.ddpg.memory import Memory\nfrom baselines_merl.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise\nfrom baselines_merl.common import set_global_seeds\nimport baselines_merl.common.tf_util as U\n\nfrom baselines_merl import logger\nimport numpy as np\n\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\ndef learn(network, env,\n seed=None,\n total_timesteps=None,\n nb_epochs=None, # with default settings, perform 1M steps total\n nb_epoch_cycles=20,\n nb_rollout_steps=100,\n reward_scale=1.0,\n render=False,\n render_eval=False,\n noise_type='adaptive-param_0.2',\n normalize_returns=False,\n normalize_observations=True,\n critic_l2_reg=1e-2,\n actor_lr=1e-4,\n critic_lr=1e-3,\n popart=False,\n gamma=0.99,\n clip_norm=None,\n nb_train_steps=50, # per epoch cycle and MPI worker,\n nb_eval_steps=100,\n batch_size=64, # per MPI worker\n tau=0.01,\n eval_env=None,\n param_noise_adaption_interval=50,\n **network_kwargs):\n\n set_global_seeds(seed)\n\n if total_timesteps is not None:\n assert nb_epochs is None\n nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps)\n else:\n nb_epochs = 500\n\n if MPI is not None:\n rank = MPI.COMM_WORLD.Get_rank()\n else:\n rank = 0\n\n nb_actions = env.action_space.shape[-1]\n assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.\n\n memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)\n critic = Critic(network=network, **network_kwargs)\n actor = Actor(nb_actions, network=network, **network_kwargs)\n\n action_noise = None\n param_noise = None\n if noise_type is not None:\n for current_noise_type in noise_type.split(','):\n current_noise_type = current_noise_type.strip()\n if current_noise_type == 'none':\n pass\n elif 'adaptive-param' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))\n elif 'normal' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))\n elif 'ou' in current_noise_type:\n _, stddev = current_noise_type.split('_')\n action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))\n else:\n raise RuntimeError('unknown noise type \"{}\"'.format(current_noise_type))\n\n max_action = env.action_space.high\n logger.info('scaling actions by {} before executing in env'.format(max_action))\n\n agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,\n gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,\n batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,\n actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,\n reward_scale=reward_scale)\n logger.info('Using agent with the following configuration:')\n logger.info(str(agent.__dict__.items()))\n\n eval_episode_rewards_history = deque(maxlen=100)\n episode_rewards_history = deque(maxlen=100)\n sess = U.get_session()\n # Prepare everything.\n agent.initialize(sess)\n sess.graph.finalize()\n\n agent.reset()\n\n obs = env.reset()\n if eval_env is not None:\n eval_obs = eval_env.reset()\n nenvs = obs.shape[0]\n\n episode_reward = np.zeros(nenvs, dtype = np.float32) #vector\n episode_step = np.zeros(nenvs, dtype = int) # vector\n episodes = 0 #scalar\n t = 0 # scalar\n\n epoch = 0\n\n\n\n start_time = time.time()\n\n epoch_episode_rewards = []\n epoch_episode_steps = []\n epoch_actions = []\n epoch_qs = []\n epoch_episodes = 0\n for epoch in range(nb_epochs):\n for cycle in range(nb_epoch_cycles):\n # Perform rollouts.\n if nenvs > 1:\n # if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each\n # of the environments, so resetting here instead\n agent.reset()\n for t_rollout in range(nb_rollout_steps):\n # Predict next action.\n action, q, _, _ = agent.step(obs, apply_noise=True, compute_Q=True)\n\n # Execute next action.\n if rank == 0 and render:\n env.render()\n\n # max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch\n new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])\n # note these outputs are batched from vecenv\n\n t += 1\n if rank == 0 and render:\n env.render()\n episode_reward += r\n episode_step += 1\n\n # Book-keeping.\n epoch_actions.append(action)\n epoch_qs.append(q)\n agent.store_transition(obs, action, r, new_obs, done) #the batched data will be unrolled in memory.py's append.\n\n obs = new_obs\n\n for d in range(len(done)):\n if done[d]:\n # Episode done.\n epoch_episode_rewards.append(episode_reward[d])\n episode_rewards_history.append(episode_reward[d])\n epoch_episode_steps.append(episode_step[d])\n episode_reward[d] = 0.\n episode_step[d] = 0\n epoch_episodes += 1\n episodes += 1\n if nenvs == 1:\n agent.reset()\n\n\n\n # Train.\n epoch_actor_losses = []\n epoch_critic_losses = []\n epoch_adaptive_distances = []\n for t_train in range(nb_train_steps):\n # Adapt param noise, if necessary.\n if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0:\n distance = agent.adapt_param_noise()\n epoch_adaptive_distances.append(distance)\n\n cl, al = agent.train()\n epoch_critic_losses.append(cl)\n epoch_actor_losses.append(al)\n agent.update_target_net()\n\n # Evaluate.\n eval_episode_rewards = []\n eval_qs = []\n if eval_env is not None:\n nenvs_eval = eval_obs.shape[0]\n eval_episode_reward = np.zeros(nenvs_eval, dtype = np.float32)\n for t_rollout in range(nb_eval_steps):\n eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True)\n eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])\n if render_eval:\n eval_env.render()\n eval_episode_reward += eval_r\n\n eval_qs.append(eval_q)\n for d in range(len(eval_done)):\n if eval_done[d]:\n eval_episode_rewards.append(eval_episode_reward[d])\n eval_episode_rewards_history.append(eval_episode_reward[d])\n eval_episode_reward[d] = 0.0\n\n if MPI is not None:\n mpi_size = MPI.COMM_WORLD.Get_size()\n else:\n mpi_size = 1\n\n # Log stats.\n # XXX shouldn't call np.mean on variable length lists\n duration = time.time() - start_time\n stats = agent.get_stats()\n combined_stats = stats.copy()\n combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)\n combined_stats['rollout/return_std'] = np.std(epoch_episode_rewards)\n combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)\n combined_stats['rollout/return_history_std'] = np.std(episode_rewards_history)\n combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)\n combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)\n combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)\n combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)\n combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)\n combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)\n combined_stats['total/duration'] = duration\n combined_stats['total/steps_per_second'] = float(t) / float(duration)\n combined_stats['total/episodes'] = episodes\n combined_stats['rollout/episodes'] = epoch_episodes\n combined_stats['rollout/actions_std'] = np.std(epoch_actions)\n # Evaluation statistics.\n if eval_env is not None:\n combined_stats['eval/return'] = eval_episode_rewards\n combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)\n combined_stats['eval/Q'] = eval_qs\n combined_stats['eval/episodes'] = len(eval_episode_rewards)\n def as_scalar(x):\n if isinstance(x, np.ndarray):\n assert x.size == 1\n return x[0]\n elif np.isscalar(x):\n return x\n else:\n raise ValueError('expected scalar, got %s'%x)\n\n combined_stats_sums = np.array([ np.array(x).flatten()[0] for x in combined_stats.values()])\n if MPI is not None:\n combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums)\n\n combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)}\n\n # Total statistics.\n combined_stats['total/epochs'] = epoch + 1\n combined_stats['total/steps'] = t\n\n for key in sorted(combined_stats.keys()):\n logger.record_tabular(key, combined_stats[key])\n\n if rank == 0:\n logger.dump_tabular()\n logger.info('')\n logdir = logger.get_dir()\n if rank == 0 and logdir:\n if hasattr(env, 'get_state'):\n with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:\n pickle.dump(env.get_state(), f)\n if eval_env and hasattr(eval_env, 'get_state'):\n with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:\n pickle.dump(eval_env.get_state(), f)\n\n\n return agent\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones", "numpy.mean", "numpy.std", "numpy.isscalar", "numpy.abs" ] ]
ehtec/pie-chart-ocr
[ "ea36e29e9e585bd7a1779d7563578f190a6e0e65" ]
[ "piechartocr/color_processer_wrapper.py" ]
[ "from ctypes import cdll, c_double\n# from ctypes.util import find_library\nimport ctypes\n# from numpy.ctypeslib import as_ctypes, as_array, as_ctypes_type\nimport numpy as np\nfrom numpy.ctypeslib import ndpointer\nimport copy\nimport logging\nfrom .basefunctions import find_lib, get_root_path\nimport os\n\n\n# relative path to colorprocesser library\nRELATIVE_LIBRARY_PATH = \"build/\"\n\n\n# lib_path = find_library('colorprocesser')\nlib_path = find_lib(os.path.join(get_root_path(), RELATIVE_LIBRARY_PATH), 'libcolorprocesser')\nif not bool(lib_path):\n raise FileNotFoundError(\"colorprocesser library not found!\")\n# lib_full_path = os.path.join(get_root_path(), RELATIVE_LIBRARY_PATH, lib_path)\nlib_full_path = lib_path\nlogging.info(\"colorprocesser library path: {0}\".format(lib_full_path))\nlib = cdll.LoadLibrary(lib_full_path)\n# set output types for ColorProcesser methods\nlib.ColorProcesser_helloworld.restype = c_double\nlib.ColorProcesser_test_calc.restype = c_double\n\nlib.ColorProcesser_color_distance.restype = c_double\n\n\nclass ColorProcesser(object):\n def __init__(self):\n self.obj = lib.ColorProcesser_new()\n # fun = lib.ColorProcesser_new\n # fun.argtypes = []\n # fun.restype = ctypes.c_void_p\n # self.obj = fun()\n #\n # def __del__(self):\n # fun = lib.ColorProcesser_delete\n # fun.argtypes = [ctypes.c_void_p]\n # fun.restype = None\n # fun(self.obj)\n\n def helloworld(self):\n return lib.ColorProcesser_helloworld(self.obj)\n\n def test_calc(self):\n return lib.ColorProcesser_test_calc(self.obj)\n\n def color_distance(self, c1, c2):\n\n return lib.ColorProcesser_color_distance(\n self.obj,\n ctypes.c_double(c1[0]),\n ctypes.c_double(c1[1]),\n ctypes.c_double(c1[2]),\n ctypes.c_double(c2[0]),\n ctypes.c_double(c2[1]),\n ctypes.c_double(c2[2])\n )\n\n def array_color_distance(self, the_color, color_array):\n\n input_shape = color_array.shape\n\n # print(input_shape)\n\n m = np.prod(input_shape[:-1])\n\n # print(m)\n\n new_color_array = color_array.reshape(m, 3)\n\n # print(new_color_array)\n\n r1 = the_color[0]\n g1 = the_color[1]\n b1 = the_color[2]\n\n r2 = np.ascontiguousarray(new_color_array[:, 0], dtype=np.double)\n g2 = np.ascontiguousarray(new_color_array[:, 1], dtype=np.double)\n b2 = np.ascontiguousarray(new_color_array[:, 2], dtype=np.double)\n\n # the_color_distances = np.ascontiguousarray(np.zeros((m,)), dtype=np.double)\n\n lib.ColorProcesser_array_color_distance.restype = ndpointer(dtype=c_double, shape=(m, ))\n\n res_array = lib.ColorProcesser_array_color_distance(\n self.obj,\n c_double(r1),\n c_double(g1),\n c_double(b1),\n ctypes.c_void_p(r2.ctypes.data),\n ctypes.c_void_p(g2.ctypes.data),\n ctypes.c_void_p(b2.ctypes.data),\n # ctypes.c_void_p(the_color_distances.ctypes.data),\n ctypes.c_int(m)\n )\n\n # res_array_content = ctypes.cast(res_array, ndpointer(dtype=c_double, shape=(m, )))\n\n res_array_copy = copy.deepcopy(res_array)\n\n # res_array_p = ctypes.cast(res_array, ctypes.POINTER(c_double))\n\n res_array_p = res_array.ctypes.data_as(ctypes.POINTER(c_double))\n\n lib.free_double_array(res_array_p)\n\n return res_array_copy.reshape(input_shape[:-1])\n" ]
[ [ "numpy.ascontiguousarray", "numpy.prod", "numpy.ctypeslib.ndpointer" ] ]
mbarbetti/CaloChallenge
[ "d7b78f6b974cdc12bf4b0410a99ff4b0be1b4bde" ]
[ "code/XMLHandler.py" ]
[ "# pylint: disable=invalid-name\n\"\"\"\n Helperclass that reads the binning xml file\n\"\"\"\n\nimport math\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\nclass XMLHandler:\n\n def __init__(self, particle_name, filename='binning.xml'):\n\n tree = ET.parse(filename)\n root = tree.getroot()\n\n self.r_bins = []\n self.a_bins = []\n self.nBinAlphaPerlayer = []\n self.alphaListPerLayer = []\n\n self.r_edges = []\n self.r_midvalue = []\n self.r_midvalueCorrected = []\n self.relevantlayers = []\n self.layerWithBinningInAlpha = []\n\n self.eta_edges = []\n self.phi_edges = []\n self.eta_bins = []\n self.phi_bins = []\n\n self.etaRegion = 0\n\n found_particle = False\n for particle in root:\n if particle.attrib[\"name\"] == particle_name:\n found_particle = True\n for layer in particle:\n self.ReadPolarCoordinates(layer)\n if not found_particle:\n raise ValueError('Particle {} not found in {}'.format(particle_name, filename))\n\n self.totalBins = 0\n self.bin_number = []\n\n self.eta_all_layers = []\n self.phi_all_layers = []\n\n self.SetEtaAndPhiFromPolar()\n self.bin_edges = [0]\n for i in range(len(self.bin_number)):\n self.bin_edges.append(self.bin_number[i] + self.bin_edges[i])\n\n def ReadPolarCoordinates(self, subelem):\n bins = 0\n r_list = []\n str_r = subelem.attrib.get('r_edges')\n r_list = [float(s) for s in str_r.split(',')]\n bins = len(r_list) - 1\n\n self.r_edges.append(r_list)\n self.r_bins.append(bins)\n layer = subelem.attrib.get('id')\n\n bins_in_alpha = int(subelem.attrib.get('n_bin_alpha'))\n self.a_bins.append(bins_in_alpha)\n self.r_midvalue.append(self.get_midpoint(r_list))\n if bins_in_alpha > 1:\n self.layerWithBinningInAlpha.append(int(layer))\n\n def fill_r_a_lists(self, layer):\n no_of_rbins = self.r_bins[layer]\n list_mid_values = self.r_midvalue[layer]\n list_a_values = self.alphaListPerLayer[layer]\n r_list = []\n a_list = []\n actual_no_alpha_bins = self.nBinAlphaPerlayer[layer][0]\n for j0 in range(0, actual_no_alpha_bins):\n for i0 in range(0, no_of_rbins):\n r_list.append(list_mid_values[i0])\n a_list.append(list_a_values[i0][j0])\n return r_list, a_list\n\n def get_midpoint(self, arr):\n middle_points = []\n for i in range(len(arr)-1):\n middle_value = arr[i] + float((arr[i+1] - arr[i]))/2\n middle_points.append(middle_value)\n return middle_points\n\n def SetEtaAndPhiFromPolar(self):\n self.minAlpha = -math.pi\n self.SetNumberOfBins()\n\n r_all_layers = []\n alpha_all_layers = []\n\n for layer in range(len(self.r_bins)):\n r_list, a_list = self.fill_r_a_lists(layer)\n r_all_layers.append(r_list)\n alpha_all_layers.append(a_list)\n\n for layer in range(len(self.r_bins)):\n eta = r_all_layers[layer] * np.cos(alpha_all_layers[layer])\n self.eta_all_layers.append(eta)\n phi = r_all_layers[layer] * np.sin(alpha_all_layers[layer])\n self.phi_all_layers.append(phi)\n\n def SetNumberOfBins(self):\n for layer in range(len(self.r_bins)):\n bin_no = 0\n alphaBinList = []\n nBinAlpha = []\n\n bin_no = self.r_bins[layer]*self.a_bins[layer]\n centres_alpha = self.get_midpoint(np.linspace(self.minAlpha,\n math.pi, self.a_bins[layer]+1))\n for _ in range(self.r_bins[layer]):\n alphaBinList.append(centres_alpha)\n nBinAlpha.append(self.a_bins[layer])\n\n self.totalBins += bin_no\n self.bin_number.append(bin_no)\n if self.r_bins[layer] > 0:\n self.relevantlayers.append(layer)\n self.alphaListPerLayer.append(alphaBinList)\n self.nBinAlphaPerlayer.append(nBinAlpha)\n else:\n self.alphaListPerLayer.append([0])\n self.nBinAlphaPerlayer.append([0])\n\n def GetTotalNumberOfBins(self):\n return self.totalBins\n\n def GetBinEdges(self):\n return self.bin_edges\n\n def GetEtaPhiAllLayers(self):\n return self.eta_all_layers, self.phi_all_layers\n\n def GetRelevantLayers(self):\n return self.relevantlayers\n\n def GetLayersWithBinningInAlpha(self):\n return self.layerWithBinningInAlpha\n\n def GetEtaRegion(self):\n return self.etaRegion\n" ]
[ [ "numpy.sin", "numpy.linspace", "numpy.cos" ] ]
tpoiii/dipole_tracer_ac6
[ "d23287e72536d3aadf457362367a3d018551ca35" ]
[ "odc_util.py" ]
[ "\"\"\"\nApproximate translation of IRBEM-Lib open diffusion code (odc) utility library from Matlab\nContributors: Alexa Halford, Paul O'Brien\n\"\"\"\n\nimport numpy as np\n# global shared constants\n#SI/mks constants\nmks = {}\nmks['e']= 1.602176487e-19 # Coulombs per fundamental charge\nmks['c'] = 299792458 # m/s\nmks['eV'] = mks['e'] # Joules\nmks['keV'] = mks['eV']*1e3 # Joules\nmks['MeV'] = mks['eV']*1e6 # Joules\nmks['GeV'] = mks['eV']*1e9 # Joules\nmks['epsilon0'] = 8.854187817e-12 # % F/m = C^2 s^2 / kg / m^3 - permitivity of free space\nmks['mu0'] = 4*np.pi*1e-7 # % H/m = N/A^2 = kg m/C^2 - permeability of free space\nmks['R_E'] = 6371.2e3 # m, IAU 1966? IRBEM GDZ value\nmks['electron'] = {}\nmks['electron']['q'] = -mks['e'] # charge\nmks['electron']['m0'] = 9.10938215e-31 # rest mass, kg\nmks['proton'] = {}\nmks['proton']['q'] = mks['e'] # charge\nmks['proton']['m0'] = 1.672621637e-27 # rest mass, kg\n\n# Schulz & Lanzerotti [1974] constants\nSL = {};\n \nSL['T0'] = 1+1/(2*np.sqrt(3))*np.log(2+np.sqrt(3)) # S&L 1.28a\nSL['T1'] = np.pi/6*np.sqrt(2) # S&L 1.28b\nSL['B0'] = 31e3*1e-9 # T dipole field at lambda=0,L=1 (S&L value)\nSL['a'] = 6371.2e3 # Earth Radius, meters (S&L value)\nSL['Q0'] = -27.1266694 # S&L 1.78c\nSL['Q1'] = -90*SL['T1'] # S&L 1.78b\nSL['Qp1'] = (15/2)*(9*SL['T0'] - 41*SL['T1']) # S&L 1.78d\nSL['Y0'] = 2*SL['T0'] # S&L 1.31 (limiting case in subsequent text)\n\nconstants = {'mks':mks,'SL':SL}\n\ndef alphaL2K(alpha,L,Bunit='nT'):\n \"\"\"\n returns K in RE*sqrt(nT) for given equatorial pitch angle\n alpha, in deg, and L shell, in RE\n Bunit - optional, if 'G' assumes K in RE*sqrt(G). Otherwise K in RE*sqrt(nT)\n global odc_constants\n \"\"\"\n \n y = np.sin(np.radians(alpha))\n if Bunit == 'nT':\n Bm = SL['B0']*1e9/L**3./y**2 # mirror field, nT\n elif Bunit=='G':\n Bm = (SL['B0']*1e9/L**3./y**2 )/1e5 # nT to G\n K = SL_Y(y)*L*np.sqrt(Bm)\n return K\n\nglobal _KL2alpha_table # private persistent variable\n_KL2alpha_table = None # private persistent variable\ndef KL2alpha(K,L,Bunit='nT'):\n \"\"\"\n returns equatorial pitch angle alpha in degrees\n for given K, L. K in RE*sqrt(nT)\n Bunit - optional, if 'G' returns K in RE*sqrt(G). Otherwise K in\n RE*sqrt(nT)\n Not sure if table should be a dictionary like I've set it up for now, or not. \n \"\"\"\n global _KL2alpha_table\n if _KL2alpha_table is None:\n table = {}\n dy = 1e-4\n #Start at dy:go in steps of dy: up to 1\n table['y'] = np.arange(dy,1+dy/2,dy)\n table['Yy'] = SL_Y(table['y'])/table['y']\n if Bunit=='G':\n K = K*np.sqrt(1e5) # RE*sqrt(G) to RE*sqrt(nT)\n # K*sqrt(L)/sqrt(B0) = Y(y)/y\n KLB = K*np.sqrt(L)/np.sqrt(SL['B0']*1e9)\n y = np.interp(KLB,_KL2alpha_table['y'],_KL2alpha_table['Yy'],KLB)\n alpha = np.arcsin(y)\n return alpha\n\ndef fce2B(fce):\n \"\"\"\n returns B in nT for electron gyro given in Hz\n shorthand\n \"\"\"\n electron = mks['electron']\n B = fce*2*np.pi*electron['m0']/np.abs(electron['q'])*1e9 # nT\n return B\n\ndef SL_T(y, returndT = False):\n \"\"\"\n Schulz & Lanzerotti's T(y)\n and dT = dT/dy if requested\n \"\"\"\n T = SL['T0']-0.5*(SL['T0']-SL['T1'])*(y+np.sqrt(y)) # 1/4 bounce integral of 1\n if returndT:\n dT = -0.5*(SL['T0']-SL['T1'])*(1+1./np.sqrt(y)/2) # dT/dy\n return T, dT\n else:\n return T\n\ndef SL_Q(y):\n \"\"\"\n computes Q(y) from S&L, 1.79\n \"\"\"\n Q = SL['Q0']+(2*SL['Q1'] - 2*SL['Q0'] - (1/4)*SL['Qp1'])*y**4 + (SL['Q0']-SL['Q1']+(1/4)*SL['Qp1'])*y**8 # Q(y) from S&L, 1.79\n return Q\n\ndef SL_D(y):\n \"\"\"\n computes D(y) from S&L, 1.36\n \"\"\"\n D = SL_T(y)/2-SL_Y(y)/12\n return D\n\ndef SL_Y(y):\n \"\"\"\n computes Y(y) from S&L 1.31\n \"\"\"\n Y = 2*(1-y)*SL['T0'] + (SL['T0']-SL['T1'])*(y*np.log(y) + 2*y-2*y**0.5) # S&L 1.31\n if np.isscalar(y):\n if y == 0:\n Y = SL['Y0']\n else:\n Y[y==0] = SL['Y0']\n return Y\n\ndef dipole_mirror_latitude(alpha0,units = 'deg'):\n \"\"\"\n mirror_lat = dipole_mirror_latitude(alpha0)\n compute dipolar mirror latitude of particle with equatorial pitch angle\n alpha0, angles in degrees\n mirror_lat = dipole_mirror_latitude(alpha0,'rad')\n angles in radians\n \"\"\"\n \n if units.lower().startswith('d'):\n torad = np.pi/180\n elif units.lower().startswith('r'):\n torad = 1\n else:\n raise Exception('do not know what units were ment, the accpetible unit calls are deg or rad')\n\n sina0 = np.sin(alpha0*torad)\n if sina0==0:\n mirror_lat = np.inf\n return mirror_lat\n\n # note, below fixes error in Shprits thesis, eqn F13\n # which has sina0**2. Instead use sina0**4 from Shprits 2006 eqn 10.\n mirror_lat = np.zeros(len(sina0))*np.nan\n for i in range(len(sina0)):\n Px = [1, 0, 0, 0, 0, 3*sina0[i]**4, -4*sina0[i]**4]\n xroots = np.roots(Px)\n #xroot = xroots((imag(xroots)==0) & (real(xroots)>0))\n xroot = xroots[(np.abs(xroots.imag)<1e-30) & (xroots.real>0)] \n mirror_lat[i] = np.degrees(np.arccos(np.sqrt(xroot))) # mirror latitude\n return mirror_lat\n\nglobal _maglat_table\n_maglat_table = None\ndef BB0toMagLat(BB0, unit = 'deg'):\n \"\"\"\n maglat = BB0toMagLat(BB0) # returns maglat in deg\n maglat = BB0toMagLat(BB0,'rad') # returns maglat in radians\n \n BB0 is B / Bmin\n where B is the local magnetic field strength\n and Bmin is the minimum (i.e., equatorial) magnetic field strength on the\n same field line.\n \n maglat is the unsigned dipole latitude\n \"\"\"\n \n # we invert the B/Bmin vs maglat equation via a look-up table\n # we use a persistent variable so we don't have to regenerate the table\n # for each call to this function\n \n global _maglat_table\n if _maglat_table is None:\n _maglat_table = {}\n _maglat_table['deg'] = np.arange(0,90.0005,0.001)# matlab: (0:0.01:90)\n _maglat_table['rads'] = np.radians(_maglat_table['deg'])\n # this next bit is the expression for B/Bmin vs magnetic latitude for a dipole\n _maglat_table['bb0'] = (1+3*np.sin(_maglat_table['rads'])**2)**(1/2)/np.cos(_maglat_table['rads'])**6\n\n if unit.lower().startswith('d'):\n maglat = np.interp(BB0,_maglat_table['deg'], _maglat_table['bb0'])\n elif unit.lower().startswith('r'):\n maglat = np.interp(BB0,_maglat_table['rads'], _maglat_table['bb0'])\n else:\n raise Exception('Unknown unit ' + unit + ' please use either deg or rad');\n\n return maglat\n\ndef DriftPeriod(Species,Energy,PitchAngle,L, unit = 'deg'):\n \"\"\" \n calculates particle drift period (seconds), dipole\n Species: 'e' for electrons, 'p' for protons\n Energy: in MeV\n PitchAngle: in degrees (equatorial pitch angle)\n L: dimensionless dipole L value\n \"\"\"\n\n Species = SelectSpecies(Species)\n m0 = mks[Species]['m0']\n q = mks[Species]['q']\n c = mks['c'] # m/s \n a = SL['a'] # RE, meters\n B0 = SL['B0'] # T\n\n gamma,v,m = MeVtogamma(Energy,Species)\n if unit.lower().startswith('d'):\n y = np.sin(np.radians(PitchAngle))\n elif unit.lower().startswith('r'):\n y = np.sin(PitchAngle)\n Ty = SL_T(y)\n Dy = SL_D(y)\n\n # S&L eq 1.35, w/o minus sign, assume always want positive drift velocity\n f = (3*L/2/np.pi/gamma)*(gamma**2-1)*(c/a)**2*(m0*c/abs(q)/B0)*(Dy/Ty)/c # extra 1/c for SI\n Td = 1.0/f # seconds\n return Td\n\ndef BouncePeriod(Species,Energy,PitchAngle,L,unit='deg'):\n \"\"\"\n function Tb = BouncePeriod(Species,Energy,PitchAngle,L,unit='deg')\n calculates particle bounce period Tb (seconds), dipole\n Species: 'e' for electrons, 'p' for protons\n Energy: in MeV\n PitchAngle: in degrees or radians (equatorial pitch angle)\n unit: specify 'rad' for PitchAngle in radians\n L: dimensionless dipole L value\n \"\"\"\n\n a = SL['a'] # Earth Radius, meters\n #y = sind(PitchAngle);\n if unit.lower().startswith('d'):\n y = np.sin(np.radians(PitchAngle))\n elif unit.lower().startswith('r'):\n y = np.sin(PitchAngle)\n (gamma,v,m) = MeVtogamma(Energy,Species)\n Ty = SL_T(y)\n Tb = 4*L*a/v*Ty\n return Tb\n\ndef GyroPeriod(Species,Energy,MagLat,L):\n \"\"\"\n function Tg = GyroPeriod(Species,Energy,MagLat,L,unit='deg')\n calculates particle gyro period Tg (seconds), dipole\n Species: 'e' for electrons, 'p' for protons\n Energy: in MeV\n MagLat: in degrees\n L: dimensionless dipole L value\n \n Calculation done in .* so that vector and matrix input can work,\n if all inputs are the same size (or scalar)\n \"\"\"\n\n Species = SelectSpecies(Species)\n q = mks[Species]['q'] # C\n (gamma,v,m) = MeVtogamma(Energy,Species)\n\n B = dipoleB(L,MagLat)/1e9 # T\n\n f = abs(q)*B/(2*np.pi*m) # no \"c\" in denominator in SI units\n Tg = 1./f\n return Tg\n\ndef dipoleB(L,MagLat,phi_deg,nargout=1):\n \"\"\"\n B = dipoleB(L,MagLat,phi_deg,nargout=1)\n (B,Bx,By,Bz,X,Y,Z) = dipoleB(L,MagLat,phi_deg,nargout=7)\n computes magnitude of dipole field, nT\n returns components and positions (in RE) if requested\n MagLat: in degrees\n L: dimensionless dipole L value\n phi_deg: azimuth angle, degrees\n \"\"\"\n\n Beq = SL['B0']/L**3*1e9 # T to nT\n smlat = np.sin(np.radians(MagLat))\n cmlat = np.cos(np.radians(MagLat))\n cmlat6 = cmlat**6\n B = Beq*np.sqrt(1+3*smlat**2)/cmlat6\n if nargout==1:\n return B\n\n phi = np.radians(phi_deg)\n cphi = np.cos(phi)\n sphi = np.sin(phi)\n \n # angular part of Bx, By, Bz\n Btmp = Beq/cmlat6\n Bx = -3*cphi*cmlat*smlat*Btmp\n By = -3*sphi*cmlat*smlat*Btmp\n Bz = -1.*(3*smlat**2 - 1)*Btmp\n \n R = L*cmlat**2\n X = R*cmlat*cphi\n Y = R*cmlat*sphi\n Z = R*smlat\n return B,Bx,By,Bz,X,Y,Z\n\ndef SelectSpecies(Species):\n \"\"\"\n convert various alternatives to standard species name: electron or proton\n \"\"\"\n if Species.lower() in ('e','e-','electron','beta'):\n Species = 'electron'\n elif Species.lower() in ('p','p+','h+','proton','h','hydrogen'):\n Species = 'proton'\n else:\n raise Exception('Unknown Species: %s ' % str(Species))\n \n return Species\n\ndef MeVtogamma(MeV,species):\n \"\"\"\n compute gamma, v, m\n given energy in MeV and species 'e','p', etc\n v in m/s\n m in kg\n \"\"\"\n species = SelectSpecies(species)\n m0 = mks[species]['m0']\n\n W = MeV*mks['MeV'] # Energy, Joules\n\n gamma = 1+W/(m0*mks['c']**2) # relativistic factor \n v=mks['c']*np.sqrt(1-gamma**-2)\n m = m0*gamma\n\n return gamma,v,m\n\ndef dipoleIJ(L,y,MeV=None,species=None):\n \"\"\"\n I = util.dipoleIJ(L,y);\n (I,J) = util.dipoleIJ(L,y,MeV,species)\n computes I = L*Y(y) (in RE)\n and, if requested J = 2*p*I (in RE*MeV/c)\n for species ('e' or 'p')\n energy in MeV\n L: dimensionless dipole L value\n and y = sin(alpha_equatorial)\n \"\"\"\n \n I = L*SL_Y(y)\n \n if MeV is None:\n return I\n \n (gamma,v,m) = MeVtogamma(MeV,species)\n p = m*v/mks['MeV']*mks['c'] # MeV/c\n J = 2.*p*I # RE*MeV/c\n \n return (I,J)\n\ndef MBtoMeV(M,B,alpha,species):\n \"\"\"\n given M in MeV/G, and B in nT, alpha in degrees, and species\n returns energy, in MeV\n \"\"\"\n Bm = B/np.sin(np.radians(alpha))**2 # Bmirror\n species = SelectSpecies(species)\n m0 = mks[species]['m0']\n c = mks['c']\n\n p2 = 2*m0*Bm*M # (kg * nT * MeV/G) = (1e5)*(kg MeV)\n p2 = 1e-5*p2*mks['MeV'] # kg J = kg**2 m**2 / s**2\n gamma = np.sqrt(1+p2/m0**2/c**2)\n E = (gamma-1)*m0*c**2 # J\n MeV = E/mks['MeV']\n\n return MeV\n\ndef rigidity(MeV,species):\n \"\"\"\n returns rigidity (p/q) in GV/c (/c is usually dropped)\n \"\"\"\n\n gamma,v,m = MeVtogamma(MeV,species);\n # v in m/s\n # m in kg\n\n R = m*v/abs(mks[species]['q'])*mks['c'] # kg m**2 /s**2 / C = V \n R = R*1e-9 # R in GV\n return R\n\ndef flux2psd(flux,energy,species,energy_unit):\n \"\"\"\n # psd = flux2psd(flux,energy,species,energy_unit)\n # psd = flux2psd(flux,energy,'e','MeV');\n # species is:\n # electrons: 'electron','e','e-'\n # protons: 'p','H+','p+'\n flux is expected to be a matrix Nt x NE\n flux is in units of #/cm**2/s/sr/(energy_unit)\n energy is expected to be a vector of length NE\n psd is in units of (MeV s)**(-3)\n \"\"\"\n\n c_cm = mks['c']*100 # speed of light in cm/s\n\n species = SelectSpecies(species)\n m0 = mks[species]['m0']\n m0c2 = m0*mks['c']**2/mks['MeV']\n\n inMeV = EnergyUnitInMeV(energy_unit)\n\n W = energy*inMeV # energy, MeV\n gamma = W/m0c2+1 # relativistic factor\n p2 = (gamma**2-1)*m0c2**2/c_cm**2 # p**2 = (gamma**2-1)*m0**2*c**2; units of (MeV/cm*s)**2\n psd = flux/inMeV/p2 # psd = flux/p**2\n # #/cm**2/s/sr/MeV / (MeV/cm*s)**2\n # cm**2 / (cm**2 s sr MeV MeV**2 s**2)\n # # / (MeV**3 s**3)\n\n return psd\n\ndef EnergyUnitInMeV(energy_unit):\n \"\"\"\n inMeV = EnergyUnitInMeV(energy_unit)\n returns the MeV equivalent of 1 energy_unit\n e.g., EnergyUnitInMeV('GeV') = 1000\n \"\"\"\n if energy_unit == 'eV':\n inMeV = 1e-6\n elif energy_unit == 'keV':\n inMeV = 1e-3\n elif energy_unit == 'MeV':\n inMeV = 1\n elif energy_unit == 'GeV':\n inMeV = 1e3\n else:\n print ('Unknown energy unit, please use either eV, keV, MeV, GeV')\n return inMeV\n\n\ndef GyroRadius(Species,Energy,B):\n \"\"\"\n # r = GyroRadius(Species,Energy,B)\n # Species - 'e', 'p', etc\n # Energy - particle kinetic energy MeV\n # B - local magnetic field strength, nT\n # r = gyroradius in m\n \"\"\"\n\n species = SelectSpecies(Species)\n q = mks[species]['q']\n Bsi = B/1e9 # B in TeSL['a']\n gamma,vmag,m = MeVtogamma(Energy,species) # gamma, speed (m/s), m (kg)\n r = m*vmag/np.abs(q)/Bsi # kg * m/s / C / T = m\n return r\n\ndef EBalpha2M(E,B,alpha,species):\n \"\"\"\n # for E in MeV, alpha in degrees\n # returns M in MeV/G or MeV/nT, using same B unit as input\n \"\"\"\n\n mks = constants['mks']\n species = SelectSpecies(species)\n m0 = mks[species]['m0']\n \n c = mks['c'] # m/s\n EJ = E*mks['MeV'] # J = kg (m/s)**2\n p2 = (EJ**2+2*EJ*m0*c**2)/c**2 # (kg m/s)**2\n M = p2*np.sin(np.radians(alpha))**2./(2*m0*B)/mks['MeV'] # J/G -> MeV/G\n return M\n\ndef Ealpha2MK(E,alpha,L,species,Bunit = 'G'):\n \"\"\"\n for E in MeV, alpha in degrees\n Bunit is 'nT' or 'G' (default is 'G')\n returns dipole M,K\n M in MeV/G (or MeV/nT if Bunit='nT')\n K in RE*sqrt(G)(or RE*sqrt(nT) if Bunit='nT')\n \"\"\"\n \n mks = constants['mks']\n species = SelectSpecies(species)\n m0 = mks[species]['m0']\n K = alphaL2K(alpha,L,Bunit)\n BnT = dipoleB(L,0,0)\n if Bunit =='G':\n Beq = BnT/1e5 # 1 nT = 1E-5 G\n else:\n Beq = BnT\n\n c = mks['c'] # m/s\n EJ = E*mks['MeV'] # J = kg (m/s)**2\n p2 = (EJ**2+2*EJ*m0*c**2)/c**2 # (kg m/s)**2\n M = p2*np.sin(np.radians(alpha))**2./(2*m0*Beq)/mks['MeV'] # J/G -> MeV/G\n return M,K\n\ndef MK2Ealpha(M,K,L,species,Bunit='G'):\n \"\"\"\n for M in MeV/G and K in RE*sqrt(G)\n (or MeV/nT and RE*sqrt(nT) if Bunit = 'nT')\n returns E in MeV, alpha in degrees\n using dipole field\n Bunit = 'G' by default\n \"\"\"\n alpha = KL2alpha(K,L,Bunit)\n B = dipoleB(L,0,0) # nT\n if Bunit=='G':\n MG = M\n else:\n MG = M*1e5 # MeV/nT -> MeV/G\n \n\n E = MBtoMeV(MG,B,alpha,species)\n return E,alpha\n\ndef dbydL(f,E,alpha,L,species,dL= 0.001):\n \"\"\"\n returns df/dL at constant M,K\n f is a function handle with 3 arguments: E,alpha,L\n performs numerical derivative with dL=0.001 or specified by user\n E in MeV, alpha in degrees\n f is evaluated at L and at L+dL, \n at L+dL, E and alpha are adjusted to preserve M,K\n assumes dipole field for E,alpha <-> M,K conversions\n \"\"\"\n\n f1 = f(E,alpha,L)\n M1,K1 = Ealpha2MK(E,alpha,L,species,'G')\n L2 = L+dL\n E2,alpha2 = MK2Ealpha(M1,K1,L2,species,'G')\n f2 = f(E2,alpha2,L2)\n dfdL = (f2-f1)/(L2-L)\n return dfdL\n" ]
[ [ "numpy.sin", "numpy.log", "numpy.arcsin", "numpy.roots", "numpy.interp", "numpy.radians", "numpy.isscalar", "numpy.arange", "numpy.sqrt", "numpy.cos", "numpy.abs" ] ]
aaronwalsman/ltron
[ "ac82055687e74084a6e42da5c12725c523c5c63c" ]
[ "ltron/geometry/collision.py" ]
[ "import numpy\n\nfrom scipy.ndimage import binary_erosion\n\nfrom splendor.frame_buffer import FrameBufferWrapper\nfrom splendor.camera import orthographic_matrix\nfrom splendor.image import save_image, save_depth\nfrom splendor.masks import color_byte_to_index\n\nfrom ltron.geometry.utils import unscale_transform, default_allclose\n\nfrom ltron.exceptions import ThisShouldNeverHappen\n\ndef make_collision_framebuffer(resolution):\n frame_buffer = FrameBufferWrapper(\n resolution[0],\n resolution[1],\n anti_alias=False,\n )\n \n return frame_buffer\n\nclass CollisionChecker:\n def __init__(\n self,\n scene,\n resolution=(64,64),\n max_intersection=4,\n ):\n self.scene = scene\n self.frame_buffer = make_collision_framebuffer(resolution)\n self.max_intersection = max_intersection\n \n def check_collision(\n self,\n target_instances,\n render_transform,\n scene_instances=None,\n **kwargs,\n ):\n return check_collision(\n self.scene,\n target_instances,\n render_transform,\n frame_buffer=self.frame_buffer,\n max_intersection=self.max_intersection,\n **kwargs,\n )\n \n def check_snap_collision(\n self,\n target_instances,\n snap,\n **kwargs,\n ):\n return check_snap_collision(\n self.scene,\n target_instances,\n snap,\n frame_buffer=self.frame_buffer,\n **kwargs,\n )\n\ndef build_collision_map(\n scene,\n target_instances=None,\n scene_instances=None,\n frame_buffer=None,\n resolution=(64,64),\n *args,\n **kwargs,\n):\n \n if target_instances is None:\n target_instances = set(int(i) for i in scene.instances)\n \n if scene_instances is None:\n scene_instances = set(int(i) for i in scene.instances)\n else:\n scene_instances = set(\n int(scene_instance) for scene_instance in scene_instances)\n \n if frame_buffer is None:\n frame_buffer = make_collision_framebuffer(resolution)\n \n edges = scene.get_all_edges(unidirectional=False)\n collision_map = {}\n for instance in target_instances:\n instance = scene.instances[instance]\n instance_id = instance.instance_id\n instance_name = instance.instance_name\n collision_map[instance_id] = {}\n source_edges = edges[0] == instance_id\n snaps_to_check = edges[2, source_edges]\n snap_groups = {}\n for snap_id in snaps_to_check:\n snap = instance.get_snap(snap_id)\n #feature = (snap.polarity == '+', *tuple(snap.transform[:3,1]))\n axis = snap.transform[:3,1]\n if snap.polarity == '-':\n axis *= -1\n #axis = tuple(axis)\n feature = (tuple(axis) + (snap.polarity == '+',))\n for key in snap_groups:\n if default_allclose(key, feature):\n snap_groups[key].append(snap_id)\n break\n else:\n #snap_groups[feature] = [snap_id]\n snap_groups[feature] = [snap_id]\n \n #for snap_id in snaps_to_check:\n #for feature, snap_ids in snap_groups.items():\n for feature, snap_ids in snap_groups.items():\n snap_id = snap_ids[0]\n snap = instance.get_snap(snap_id)\n #for s_id in snap_ids:\n # collision_map[instance_id][s_id] = axis\n #collision_map[instance_id][axis] = set()\n map_key = (feature[:3], feature[3], tuple(snap_ids))\n #collision_map[instance_id][tuple(snap_ids)] = set()\n collision_map[instance_id][map_key] = set()\n current_scene_instances = scene_instances - set([instance_id])\n k = 0\n while current_scene_instances:\n #if instance_id == 1:\n # dump_images = 'one_%i_%s_%i'%(snap_id, direction, k)\n #else:\n # dump_images = None\n k += 1\n colliders = check_snap_collision(\n scene,\n [instance],\n snap,\n scene_instances=current_scene_instances,\n return_colliding_instances=True,\n frame_buffer=frame_buffer,\n #dump_images=dump_images,\n *args,\n **kwargs,\n )\n if len(colliders):\n colliders = set(int(i) for i in colliders)\n if 0 in colliders:\n raise ThisShouldNeverHappen\n #for s_id in snap_ids:\n # collision_map[instance_id][s_id] |= (\n # colliders)\n #collision_map[instance_id][tuple(snap_ids)] |= colliders\n collision_map[instance_id][map_key] |= colliders\n current_scene_instances -= colliders\n else:\n break\n \n return collision_map\n\ndef check_snap_collision(\n scene,\n target_instances,\n snap,\n *args,\n **kwargs,\n):\n \n if snap.polarity == '+':\n sign = 1\n elif snap.polarity == '-':\n sign = -1\n \n direction_transform = numpy.array([\n [ 1, 0, 0, 0],\n [ 0, 0, sign, 0],\n [ 0, 1, 0, 0],\n [ 0, 0, 0, 1]\n ])\n \n render_transform = snap.transform @ direction_transform\n \n return check_collision(\n scene,\n target_instances,\n render_transform,\n *args,\n **kwargs,\n )\n\ndef check_collision(\n scene,\n target_instances,\n render_transform,\n scene_instances=None,\n resolution=(64,64),\n frame_buffer=None,\n max_intersection=4,\n erosion=1,\n required_clearance=24,\n tolerance_spacing=8,\n dump_images=None,\n return_colliding_instances=False,\n):\n \n # setup ====================================================================\n # make sure the scene is renderable\n assert scene.renderable\n \n # get a list of the names of the target and scene instances\n target_instance_names = set(\n str(target_instance) for target_instance in target_instances)\n if scene_instances is None:\n scene_instance_names = set(\n scene.get_all_brick_instances()) - target_instance_names\n else:\n scene_instance_names = set(\n str(scene_instance) for scene_instance in scene_instances)\n \n # build a splendor frame buffer if a shared one was not specified ----------\n if frame_buffer is None:\n frame_buffer = make_collision_framebuffer(resolution)\n \n # store the camera info and which bricks are hidden ------------------------\n original_view_matrix = scene.get_view_matrix()\n original_projection = scene.get_projection()\n \n # render the scene depth map ===============================================\n # setup the camera ---------------------------------------------------------\n camera_transform = unscale_transform(render_transform)\n render_axis = camera_transform[:3,2]\n \n # compute the extents of the tarrget instance in camera space --------------\n local_target_vertices = []\n inv_camera_transform = numpy.linalg.inv(camera_transform)\n for target_instance in target_instances:\n vertices = target_instance.brick_shape.bbox_vertices\n transform = inv_camera_transform @ target_instance.transform\n local_target_vertices.append(transform @ vertices)\n local_target_vertices = numpy.concatenate(local_target_vertices, axis=1)\n box_min = numpy.min(local_target_vertices, axis=1)\n box_max = numpy.max(local_target_vertices, axis=1)\n thickness = box_max[2] - box_min[2]\n camera_distance = thickness + required_clearance + 2 * tolerance_spacing\n near_clip = 1 * tolerance_spacing\n far_clip = thickness * 2 + required_clearance + 3 * tolerance_spacing\n \n camera_transform[:3,3] += render_axis * camera_distance\n scene.set_view_matrix(numpy.linalg.inv(camera_transform))\n orthographic_projection = orthographic_matrix(\n l = box_max[0],\n r = box_min[0],\n b = -box_max[1],\n t = -box_min[1],\n n = near_clip,\n f = far_clip)\n \n scene.set_projection(orthographic_projection)\n \n # render -------------------------------------------------------------------\n frame_buffer.enable()\n scene.viewport_scissor(0,0,frame_buffer.width, frame_buffer.height)\n scene.mask_render(instances=scene_instance_names, ignore_hidden=True)\n if dump_images or return_colliding_instances:\n scene_mask = frame_buffer.read_pixels()\n scene_depth_map = frame_buffer.read_pixels(\n read_depth=True, projection=orthographic_projection)\n \n # render the target depth map ==============================================\n # setup the camera ---------------------------------------------------------\n camera_transform = unscale_transform(render_transform)\n camera_transform[:3,3] -= render_axis * camera_distance\n axis_flip = numpy.array([\n [ 1, 0, 0, 0],\n [ 0, 1, 0, 0],\n [ 0, 0,-1, 0],\n [ 0, 0, 0, 1]\n ])\n camera_transform = numpy.dot(camera_transform, axis_flip)\n scene.set_view_matrix(numpy.linalg.inv(camera_transform))\n scene.set_projection(orthographic_projection)\n \n # render -------------------------------------------------------------------\n frame_buffer.enable()\n scene.mask_render(instances=target_instance_names, ignore_hidden=True)\n target_mask = frame_buffer.read_pixels()\n target_depth_map = frame_buffer.read_pixels(\n read_depth=True, projection=orthographic_projection)\n \n # restore the previous camera ==============================================\n scene.set_view_matrix(original_view_matrix)\n scene.set_projection(original_projection)\n \n # check collision ==========================================================\n valid_pixels = numpy.sum(target_mask != 0, axis=-1) != 0\n \n scene_depth_map -= camera_distance\n scene_depth_map *= -1.\n target_depth_map -= camera_distance\n offset = (scene_depth_map - target_depth_map).reshape(valid_pixels.shape)\n offset *= valid_pixels\n \n # dump images ==============================================================\n if dump_images is not None:\n save_image(scene_mask, './%s_scene_mask.png'%dump_images)\n save_image(target_mask, './%s_target_mask.png'%dump_images)\n save_depth(scene_depth_map, './%s_scene_depth.npy'%dump_images)\n save_depth(target_depth_map, './%s_target_depth.npy'%dump_images)\n \n collision_pixels = (offset > max_intersection).astype(numpy.uint8)\n collision_pixels = collision_pixels * 255\n save_image(collision_pixels, './%s_collision.png'%dump_images)\n \n if erosion or return_colliding_instances:\n collision = offset > max_intersection\n if erosion:\n collision = binary_erosion(collision, iterations=erosion)\n \n if return_colliding_instances:\n colliding_y, colliding_x = numpy.where(collision)\n colliding_colors = scene_mask[colliding_y, colliding_x]\n colliding_bricks = numpy.unique(color_byte_to_index(colliding_colors))\n return colliding_bricks\n \n else:\n if erosion:\n collision = numpy.any(collision)\n else:\n collision = numpy.max(offset) > max_intersection\n \n return collision\n\n\ndef check_collision_old(\n scene,\n target_instances,\n snap_transform,\n target_snap_polarity,\n resolution=(64,64),\n frame_buffer=None,\n max_intersection=4,\n dump_images=None,\n ):\n \n # setup ====================================================================\n # make sure the scene is renderable\n assert scene.renderable\n \n # get a list of the non-target instances\n target_names = set(\n str(target_instance) for target_instance in target_instances)\n non_target_names = set(scene.get_all_brick_instances()) - target_names\n \n # build a splendor frame buffer if a shared one was not specified ----------\n if frame_buffer is None:\n frame_buffer = FrameBufferWrapper(\n resolution[0],\n resolution[1],\n anti_alias=False)\n \n # store the camera info and which bricks are hidden ------------------------\n original_view_matrix = scene.get_view_matrix()\n original_projection = scene.get_projection()\n #hidden_instances = {i : scene.instance_hidden(i) for i in scene.instances}\n \n # compute the camera distance, clipping plane and the orthgraphic width ----\n #camera_distance = 500 # TMP\n #orthographic_width = 100 # TMP\n #orthographic_height = 100 # TMP\n #near_clip = 1 # TMP\n #far_clip = 2000 # TMP\n \n # render the scene depth map ===============================================\n # show everything except for the target instances --------------------------\n \n # setup the camera ---------------------------------------------------------\n camera_transform = snap_transform.copy()\n render_axis = snap_transform[:3,1]\n render_axis /= numpy.linalg.norm(render_axis)\n p_direction = -1\n n_direction = 1\n p_rotate = numpy.array([\n [1, 0, 0, 0],\n [0, 0,-1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1]])\n n_rotate = numpy.array([\n [1, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1]])\n\n if target_snap_polarity == '+':\n scene_axis = render_axis * n_direction\n scene_rotate = n_rotate\n target_axis = render_axis * p_direction\n target_rotate = p_rotate\n elif target_snap_polarity == '-':\n scene_axis = render_axis * p_direction\n scene_rotate = p_rotate\n target_axis = render_axis * n_direction\n target_rotate = n_rotate\n else:\n raise NotImplementedError\n \n # compute the relevant extents ---------------------------------------------\n local_vertices = []\n inv_snap_transform = numpy.linalg.inv(snap_transform)\n # this could be done by transforming the bounding box corners\n # (bbox of transformed bbox)\n for target_instance in target_instances:\n vertices = target_instance.brick_shape.vertices\n transform = inv_snap_transform @ target_instance.transform\n local_vertices.append(transform @ vertices)\n local_vertices = numpy.concatenate(local_vertices, axis=1)\n box_min = numpy.min(local_vertices, axis=1)\n box_max = numpy.max(local_vertices, axis=1)\n thickness = box_max[1] - box_min[1]\n camera_distance = thickness + 2\n near_clip = 1\n far_clip = 2 * thickness + 3\n \n camera_transform[:3,3] += scene_axis * camera_distance\n camera_transform = numpy.dot(camera_transform, scene_rotate)\n scene.set_view_matrix(numpy.linalg.inv(camera_transform))\n orthographic_projection = orthographic_matrix(\n #l = -orthographic_width,\n #r = orthographic_width,\n #b = -orthographic_height,\n #t = orthographic_height,\n l = box_max[0],\n r = box_min[0],\n b = -box_max[2],\n t = -box_min[2],\n n = near_clip,\n f = far_clip)\n scene.set_projection(orthographic_projection)\n \n # render -------------------------------------------------------------------\n frame_buffer.enable()\n scene.mask_render(instances=non_target_names)\n scene_mask = frame_buffer.read_pixels()\n scene_depth_map = frame_buffer.read_pixels(\n read_depth=True, projection=orthographic_projection)\n \n #scene.color_render()\n #scene_color = frame_buffer.read_pixels()\n \n # render the instance depth map ============================================\n # hide everything except for the target instances --------------------------\n #scene.hide_all_instances()\n #for instance in target_instances:\n # scene.show_instance(instance)\n \n # setup the camera ---------------------------------------------------------\n camera_transform = snap_transform.copy()\n camera_transform[:3,3] += target_axis * camera_distance\n camera_transform = numpy.dot(camera_transform, target_rotate)\n scene.set_view_matrix(numpy.linalg.inv(camera_transform))\n scene.set_projection(orthographic_projection)\n # render -------------------------------------------------------------------\n frame_buffer.enable()\n scene.mask_render(instances=target_names)\n instance_mask = frame_buffer.read_pixels()\n instance_depth_map = frame_buffer.read_pixels(\n read_depth=True, projection=orthographic_projection)\n \n #scene.color_render()\n #instance_color = frame_buffer.read_pixels()\n \n # restore the previous camera and hidden state =============================\n scene.set_view_matrix(original_view_matrix)\n scene.set_projection(original_projection)\n #for instance, hidden in hidden_instances.items():\n # if hidden:\n # scene.hide_instance(instance)\n # else:\n # scene.show_instance(instance)\n \n # check collision ==========================================================\n valid_pixels = numpy.sum(instance_mask != 0, axis=-1) != 0\n \n scene_depth_map -= camera_distance\n scene_depth_map *= -1.\n instance_depth_map -= camera_distance\n offset = (scene_depth_map - instance_depth_map).reshape(valid_pixels.shape)\n offset *= valid_pixels\n \n collision = numpy.max(offset) > max_intersection\n \n # dump images ==============================================================\n if dump_images is not None:\n save_image(scene_mask, './%s_scene_mask.png'%dump_images)\n save_image(instance_mask, './%s_instance_mask.png'%dump_images)\n save_depth(scene_depth_map, './%s_scene_depth.npy'%dump_images)\n save_depth(instance_depth_map, './%s_instance_depth.npy'%dump_images)\n #save_image(scene_color, './%s_scene_color.png'%dump_images)\n #save_image(instance_color, './%s_instance_color.png'%dump_images)\n \n collision_pixels = (offset > max_intersection).astype(numpy.uint8)\n collision_pixels = collision_pixels * 255\n save_image(collision_pixels, './%s_collision.png'%dump_images)\n \n return collision\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.dot", "numpy.linalg.norm", "scipy.ndimage.binary_erosion", "numpy.sum", "numpy.min", "numpy.where", "numpy.any", "numpy.linalg.inv" ] ]
RohanAsnani/fingppmatch
[ "50d0d4ae51c29b0d3d4a449262397293665aa1a1" ]
[ "utils.py" ]
[ "import math\nimport numpy as np\nimport cv2 as cv\nimport urllib.request\nimport IPython\nimport base64\nimport html\n\n# Utility function to show an image\ndef show(*images, enlarge_small_images = True, max_per_row = -1, font_size = 0):\n if len(images) == 2 and type(images[1])==str:\n images = [(images[0], images[1])]\n\n def convert_for_display(img):\n if img.dtype!=np.uint8:\n a, b = img.min(), img.max()\n if a==b:\n offset, mult, d = 0, 0, 1\n elif a<0:\n offset, mult, d = 128, 127, max(abs(a), abs(b))\n else:\n offset, mult, d = 0, 255, b\n img = np.clip(offset + mult*(img.astype(float))/d, 0, 255).astype(np.uint8)\n return img\n\n def convert(imgOrTuple):\n try:\n img, title = imgOrTuple\n if type(title)!=str:\n img, title = imgOrTuple, ''\n except ValueError:\n img, title = imgOrTuple, '' \n if type(img)==str:\n data = img\n else:\n img = convert_for_display(img)\n if enlarge_small_images:\n REF_SCALE = 100\n h, w = img.shape[:2]\n if h<REF_SCALE or w<REF_SCALE:\n scale = max(1, min(REF_SCALE//h, REF_SCALE//w))\n img = cv.resize(img,(w*scale,h*scale), interpolation=cv.INTER_NEAREST)\n data = 'data:image/png;base64,' + base64.b64encode(cv.imencode('.png', img)[1]).decode('utf8')\n return data, title\n \n if max_per_row == -1:\n max_per_row = len(images)\n\n rows = [images[x:x+max_per_row] for x in range(0, len(images), max_per_row)]\n font = f\"font-size: {font_size}px;\" if font_size else \"\"\n\n html_content = \"\"\n for r in rows:\n l = [convert(t) for t in r]\n html_content += \"\".join([\"<table><tr>\"] \n + [f\"<td style='text-align:center;{font}'>{html.escape(t)}</td>\" for _,t in l] \n + [\"</tr><tr>\"] \n + [f\"<td style='text-align:center;'><img src='{d}'></td>\" for d,_ in l]\n + [\"</tr></table>\"])\n IPython.display.display(IPython.display.HTML(html_content))\n\n# Utility function to load an image from an URL\ndef load_from_url(url):\n resp = urllib.request.urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=np.uint8)\n return cv.imdecode(image, cv.IMREAD_GRAYSCALE)\n\n# Utility function to draw orientations over an image\ndef draw_orientations(fingerprint, orientations, strengths, mask, scale = 3, step = 8, border = 0):\n if strengths is None:\n strengths = np.ones_like(orientations)\n h, w = fingerprint.shape\n sf = cv.resize(fingerprint, (w*scale, h*scale), interpolation = cv.INTER_NEAREST)\n res = cv.cvtColor(sf, cv.COLOR_GRAY2BGR)\n d = (scale // 2) + 1\n sd = (step+1)//2\n c = np.round(np.cos(orientations) * strengths * d * sd).astype(int)\n s = np.round(-np.sin(orientations) * strengths * d * sd).astype(int) # minus for the direction of the y axis\n thickness = 1 + scale // 5\n for y in range(border, h-border, step):\n for x in range(border, w-border, step):\n if mask is None or mask[y, x] != 0:\n ox, oy = c[y, x], s[y, x]\n cv.line(res, (d+x*scale-ox,d+y*scale-oy), (d+x*scale+ox,d+y*scale+oy), (255,0,0), thickness, cv.LINE_AA)\n return res\n\n# Utility function to draw a set of minutiae over an image\ndef draw_minutiae(fingerprint, minutiae, termination_color = (255,0,0), bifurcation_color = (0,0,255)):\n res = cv.cvtColor(fingerprint, cv.COLOR_GRAY2BGR)\n \n for x, y, t, *d in minutiae:\n color = termination_color if t else bifurcation_color\n if len(d)==0:\n cv.drawMarker(res, (x,y), color, cv.MARKER_CROSS, 8)\n else:\n d = d[0]\n ox = int(round(math.cos(d) * 7))\n oy = int(round(math.sin(d) * 7))\n cv.circle(res, (x,y), 3, color, 1, cv.LINE_AA)\n cv.line(res, (x,y), (x+ox,y-oy), color, 1, cv.LINE_AA) \n return res\n\n# Utility function to generate gabor filter kernels\n\n_sigma_conv = (3.0/2.0)/((6*math.log(10))**0.5)\n# sigma is adjusted according to the ridge period, so that the filter does not contain more than three effective peaks \ndef _gabor_sigma(ridge_period):\n return _sigma_conv * ridge_period\n\ndef _gabor_size(ridge_period):\n p = int(round(ridge_period * 2 + 1))\n if p % 2 == 0:\n p += 1\n return (p, p)\n\ndef gabor_kernel(period, orientation):\n f = cv.getGaborKernel(_gabor_size(period), _gabor_sigma(period), np.pi/2 - orientation, period, gamma = 1, psi = 0)\n f /= f.sum()\n f -= f.mean()\n return f\n\n\n# Utility functions for minutiae\ndef angle_abs_difference(a, b):\n return math.pi - abs(abs(a - b) - math.pi)\n\ndef angle_mean(a, b):\n return math.atan2((math.sin(a)+math.sin(b))/2, ((math.cos(a)+math.cos(b))/2))\n\n# Utility functions for MCC\ndef draw_minutiae_and_cylinder(fingerprint, origin_cell_coords, minutiae, values, i, show_cylinder = True):\n\n def _compute_actual_cylinder_coordinates(x, y, t, d):\n c, s = math.cos(d), math.sin(d)\n rot = np.array([[c, s],[-s, c]]) \n return (rot@origin_cell_coords.T + np.array([x,y])[:,np.newaxis]).T\n \n res = draw_minutiae(fingerprint, minutiae) \n if show_cylinder:\n for v, (cx, cy) in zip(values[i], _compute_actual_cylinder_coordinates(*minutiae[i])):\n cv.circle(res, (int(round(cx)), int(round(cy))), 3, (0,int(round(v*255)),0), 1, cv.LINE_AA)\n return res\n\ndef draw_match_pairs(f1, m1, v1, f2, m2, v2, cells_coords, pairs, i, show_cylinders = True):\n #nd = _current_parameters.ND\n h1, w1 = f1.shape\n h2, w2 = f2.shape\n p1, p2 = pairs\n res = np.full((max(h1,h2), w1+w2, 3), 255, np.uint8)\n res[:h1,:w1] = draw_minutiae_and_cylinder(f1, cells_coords, m1, v1, p1[i], show_cylinders)\n res[:h2,w1:w1+w2] = draw_minutiae_and_cylinder(f2, cells_coords, m2, v2, p2[i], show_cylinders)\n for k, (i1, i2) in enumerate(zip(p1, p2)):\n (x1, y1, *_), (x2, y2, *_) = m1[i1], m2[i2]\n cv.line(res, (int(x1), int(y1)), (w1+int(x2), int(y2)), (0,0,255) if k!=i else (0,255,255), 1, cv.LINE_AA)\n return res" ]
[ [ "numpy.array", "numpy.ones_like", "numpy.sin", "numpy.cos" ] ]
gsoxley/OpenMDAO
[ "709401e535cf6933215abd942d4b4d49dbf61b2b" ]
[ "openmdao/core/tests/test_expl_comp.py" ]
[ "\"\"\"Simple example demonstrating how to implement an explicit component.\"\"\"\nfrom __future__ import division\n\nfrom six import assertRaisesRegex\n\nfrom six.moves import cStringIO\nimport unittest\n\nimport numpy as np\n\nfrom openmdao.api import Problem, ExplicitComponent, NewtonSolver, ScipyKrylov, Group, \\\n IndepVarComp, LinearBlockGS, AnalysisError\nfrom openmdao.utils.assert_utils import assert_rel_error\nfrom openmdao.test_suite.components.double_sellar import SubSellar\nfrom openmdao.test_suite.components.expl_comp_simple import TestExplCompSimple, \\\n TestExplCompSimpleDense\nfrom openmdao.utils.general_utils import printoptions\n\n\n# Note: The following class definitions are used in feature docs\n\nclass RectangleComp(ExplicitComponent):\n \"\"\"\n A simple Explicit Component that computes the area of a rectangle.\n \"\"\"\n\n def setup(self):\n self.add_input('length', val=1.)\n self.add_input('width', val=1.)\n self.add_output('area', val=1.)\n\n self.declare_partials('*', '*')\n\n def compute(self, inputs, outputs):\n outputs['area'] = inputs['length'] * inputs['width']\n\n\nclass RectanglePartial(RectangleComp):\n\n def compute_partials(self, inputs, partials):\n partials['area', 'length'] = inputs['width']\n partials['area', 'width'] = inputs['length']\n\n\nclass RectangleJacVec(RectangleComp):\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n if mode == 'fwd':\n if 'area' in d_outputs:\n if 'length' in d_inputs:\n d_outputs['area'] += inputs['width'] * d_inputs['length']\n if 'width' in d_inputs:\n d_outputs['area'] += inputs['length'] * d_inputs['width']\n elif mode == 'rev':\n if 'area' in d_outputs:\n if 'length' in d_inputs:\n d_inputs['length'] += inputs['width'] * d_outputs['area']\n if 'width' in d_inputs:\n d_inputs['width'] += inputs['length'] * d_outputs['area']\n\n\nclass RectangleGroup(Group):\n\n def setup(self):\n comp1 = self.add_subsystem('comp1', IndepVarComp())\n comp1.add_output('length', 1.0)\n comp1.add_output('width', 1.0)\n\n self.add_subsystem('comp2', RectanglePartial())\n self.add_subsystem('comp3', RectangleJacVec())\n\n self.connect('comp1.length', 'comp2.length')\n self.connect('comp1.length', 'comp3.length')\n self.connect('comp1.width', 'comp2.width')\n self.connect('comp1.width', 'comp3.width')\n\n\nclass ExplCompTestCase(unittest.TestCase):\n\n def test_simple(self):\n prob = Problem(RectangleComp())\n prob.setup(check=False)\n prob.run_model()\n\n def test_feature_simple(self):\n from openmdao.api import Problem\n from openmdao.core.tests.test_expl_comp import RectangleComp\n\n prob = Problem(RectangleComp())\n prob.setup(check=False)\n prob.run_model()\n\n def test_compute_and_list(self):\n prob = Problem(RectangleGroup())\n prob.setup(check=False)\n\n msg = \"Unable to list inputs until model has been run.\"\n try:\n prob.model.list_inputs()\n except Exception as err:\n self.assertTrue(msg == str(err))\n else:\n self.fail(\"Exception expected\")\n\n msg = \"Unable to list outputs until model has been run.\"\n try:\n prob.model.list_outputs()\n except Exception as err:\n self.assertTrue(msg == str(err))\n else:\n self.fail(\"Exception expected\")\n\n prob['comp1.length'] = 3.\n prob['comp1.width'] = 2.\n prob.run_model()\n assert_rel_error(self, prob['comp2.area'], 6.)\n assert_rel_error(self, prob['comp3.area'], 6.)\n\n # total derivs\n total_derivs = prob.compute_totals(\n wrt=['comp1.length', 'comp1.width'],\n of=['comp2.area', 'comp3.area']\n )\n assert_rel_error(self, total_derivs['comp2.area', 'comp1.length'], [[2.]])\n assert_rel_error(self, total_derivs['comp3.area', 'comp1.length'], [[2.]])\n assert_rel_error(self, total_derivs['comp2.area', 'comp1.width'], [[3.]])\n assert_rel_error(self, total_derivs['comp3.area', 'comp1.width'], [[3.]])\n\n # list inputs\n inputs = prob.model.list_inputs(out_stream=None)\n self.assertEqual(sorted(inputs), [\n ('comp2.length', {'value': [3.]}),\n ('comp2.width', {'value': [2.]}),\n ('comp3.length', {'value': [3.]}),\n ('comp3.width', {'value': [2.]}),\n ])\n\n # list explicit outputs\n outputs = prob.model.list_outputs(implicit=False, out_stream=None)\n self.assertEqual(sorted(outputs), [\n ('comp1.length', {'value': [3.]}),\n ('comp1.width', {'value': [2.]}),\n ('comp2.area', {'value': [6.]}),\n ('comp3.area', {'value': [6.]}),\n ])\n\n # list states\n states = prob.model.list_outputs(explicit=False, out_stream=None)\n self.assertEqual(states, [])\n\n # list excluding both explicit and implicit components raises error\n msg = \"You have excluded both Explicit and Implicit components.\"\n\n with assertRaisesRegex(self, RuntimeError, msg):\n prob.model.list_outputs(explicit=False, implicit=False)\n\n def test_simple_list_vars_options(self):\n\n from openmdao.api import IndepVarComp, Group, Problem, ExecComp\n\n prob = Problem()\n prob.model = model = Group()\n\n model.add_subsystem('p1', IndepVarComp('x', 12.0,\n lower=1.0, upper=100.0,\n ref=1.1, ref0=2.1,\n units='inch'))\n model.add_subsystem('p2', IndepVarComp('y', 1.0,\n lower=2.0, upper=200.0,\n ref=1.2, res_ref=2.2,\n units='ft'))\n model.add_subsystem('comp', ExecComp('z=x+y',\n x={'value': 0.0, 'units': 'inch'},\n y={'value': 0.0, 'units': 'inch'},\n z={'value': 0.0, 'units': 'inch'}))\n model.connect('p1.x', 'comp.x')\n model.connect('p2.y', 'comp.y')\n\n prob.setup()\n prob.set_solver_print(level=0)\n prob.run_model()\n\n # list_inputs tests\n # Can't do exact equality here because units cause comp.y to be slightly different than 12.0\n stream = cStringIO()\n inputs = prob.model.list_inputs(units=True, out_stream=stream)\n tol = 1e-7\n for actual, expected in zip(sorted(inputs), [\n ('comp.x', {'value': [12.], 'units': 'inch'}),\n ('comp.y', {'value': [12.], 'units': 'inch'})\n ]):\n self.assertEqual(expected[0], actual[0])\n self.assertEqual(expected[1]['units'], actual[1]['units'])\n assert_rel_error(self, expected[1]['value'], actual[1]['value'], tol)\n\n text = stream.getvalue()\n self.assertEqual(1, text.count(\"Input(s) in 'model'\"))\n self.assertEqual(1, text.count('varname'))\n self.assertEqual(1, text.count('value'))\n self.assertEqual(1, text.count('top'))\n self.assertEqual(1, text.count(' comp'))\n self.assertEqual(1, text.count(' x'))\n self.assertEqual(1, text.count(' y'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(8, num_non_empty_lines)\n\n # list_outputs tests\n\n # list outputs for implicit comps - should get none\n outputs = prob.model.list_outputs(implicit=True, explicit=False, out_stream=None)\n self.assertEqual(outputs, [])\n\n # list outputs with out_stream - just check to see if it was logged to\n stream = cStringIO()\n outputs = prob.model.list_outputs(out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(1, text.count('Explicit Output'))\n self.assertEqual(1, text.count('Implicit Output'))\n\n # list outputs with out_stream and all the optional display values True\n stream = cStringIO()\n outputs = prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=False,\n print_arrays=False,\n out_stream=stream)\n\n self.assertEqual([\n ('comp.z', {'value': [24.], 'resids': [0.], 'units': 'inch', 'shape': (1,),\n 'lower': None, 'upper': None, 'ref': 1.0, 'ref0': 0.0, 'res_ref': 1.0}),\n ('p1.x', {'value': [12.], 'resids': [0.], 'units': 'inch', 'shape': (1,),\n 'lower': [1.], 'upper': [100.], 'ref': 1.1, 'ref0': 2.1, 'res_ref': 1.1}),\n ('p2.y', {'value': [1.], 'resids': [0.], 'units': 'ft', 'shape': (1,),\n 'lower': [2.], 'upper': [200.], 'ref': 1.2, 'ref0': 0.0, 'res_ref': 2.2}),\n ], sorted(outputs))\n\n text = stream.getvalue()\n self.assertEqual(1, text.count('varname'))\n self.assertEqual(1, text.count('value'))\n self.assertEqual(1, text.count('resids'))\n self.assertEqual(1, text.count('units'))\n self.assertEqual(1, text.count('shape'))\n self.assertEqual(1, text.count('lower'))\n self.assertEqual(1, text.count('upper'))\n self.assertEqual(3, text.count('ref'))\n self.assertEqual(1, text.count('ref0'))\n self.assertEqual(1, text.count('res_ref'))\n self.assertEqual(1, text.count('p1.x'))\n self.assertEqual(1, text.count('p2.y'))\n self.assertEqual(1, text.count('comp.z'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(9, num_non_empty_lines)\n\n def test_for_feature_docs_list_vars_options(self):\n\n from openmdao.api import IndepVarComp, Group, Problem, ExecComp\n\n prob = Problem()\n prob.model = model = Group()\n\n model.add_subsystem('p1', IndepVarComp('x', 12.0,\n lower=1.0, upper=100.0,\n ref=1.1, ref0=2.1,\n units='inch',\n ))\n model.add_subsystem('p2', IndepVarComp('y', 1.0,\n lower=2.0, upper=200.0,\n ref=1.2, res_ref=2.2,\n units='ft',\n ))\n model.add_subsystem('comp', ExecComp('z=x+y',\n x={'value': 0.0, 'units': 'inch'},\n y={'value': 0.0, 'units': 'inch'},\n z={'value': 0.0, 'units': 'inch'}))\n model.connect('p1.x', 'comp.x')\n model.connect('p2.y', 'comp.y')\n\n prob.setup()\n prob.set_solver_print(level=0)\n prob.run_model()\n\n inputs = prob.model.list_inputs(units=True)\n print(inputs)\n\n outputs = prob.model.list_outputs(implicit=False,\n values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=False,\n print_arrays=False)\n\n self.assertEqual(sorted(outputs), [\n ('comp.z', {'value': [24.], 'resids': [0.], 'units': 'inch', 'shape': (1,),\n 'lower': None, 'upper': None, 'ref': 1.0, 'ref0': 0.0, 'res_ref': 1.0}),\n ('p1.x', {'value': [12.], 'resids': [0.], 'units': 'inch', 'shape': (1,),\n 'lower': [1.], 'upper': [100.], 'ref': 1.1, 'ref0': 2.1, 'res_ref': 1.1}),\n ('p2.y', {'value': [1.], 'resids': [0.], 'units': 'ft', 'shape': (1,),\n 'lower': [2.], 'upper': [200.], 'ref': 1.2, 'ref0': 0.0, 'res_ref': 2.2}),\n ])\n\n outputs = prob.model.list_outputs(implicit=False,\n values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=True,\n print_arrays=False)\n\n def test_hierarchy_list_vars_options(self):\n\n prob = Problem()\n model = prob.model\n\n model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])))\n\n sub1 = model.add_subsystem('sub1', Group())\n sub2 = sub1.add_subsystem('sub2', Group())\n g1 = sub2.add_subsystem('g1', SubSellar())\n g2 = model.add_subsystem('g2', SubSellar())\n\n model.connect('pz.z', 'sub1.sub2.g1.z')\n model.connect('sub1.sub2.g1.y2', 'g2.x')\n model.connect('g2.y2', 'sub1.sub2.g1.x')\n\n model.nonlinear_solver = NewtonSolver()\n model.linear_solver = ScipyKrylov()\n model.nonlinear_solver.options['solve_subsystems'] = True\n model.nonlinear_solver.options['max_sub_solves'] = 0\n\n g1.nonlinear_solver = NewtonSolver()\n g1.linear_solver = LinearBlockGS()\n\n g2.nonlinear_solver = NewtonSolver()\n g2.linear_solver = ScipyKrylov()\n g2.linear_solver.precon = LinearBlockGS()\n g2.linear_solver.precon.options['maxiter'] = 2\n\n prob.setup(check=False)\n prob.run_driver()\n\n # logging inputs\n # out_stream - not hierarchical - extras - no print_arrays\n stream = cStringIO()\n prob.model.list_inputs(values=True,\n units=True,\n hierarchical=False,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(1, text.count(\"10 Input(s) in 'model'\"))\n # make sure they are in the correct order\n self.assertTrue(text.find(\"sub1.sub2.g1.d1.z\") <\n text.find('sub1.sub2.g1.d1.x') <\n text.find('sub1.sub2.g1.d1.y2') <\n text.find('sub1.sub2.g1.d2.z') <\n text.find('sub1.sub2.g1.d2.y1') <\n text.find('g2.d1.z') <\n text.find('g2.d1.x') <\n text.find('g2.d1.y2') <\n text.find('g2.d2.z') <\n text.find('g2.d2.y1'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(14, num_non_empty_lines)\n\n # out_stream - hierarchical - extras - no print_arrays\n stream = cStringIO()\n prob.model.list_inputs(values=True,\n units=True,\n hierarchical=True,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(1, text.count(\"10 Input(s) in 'model'\"))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(23, num_non_empty_lines)\n self.assertEqual(1, text.count('top'))\n self.assertEqual(1, text.count(' sub1'))\n self.assertEqual(1, text.count(' sub2'))\n self.assertEqual(1, text.count(' g1'))\n self.assertEqual(1, text.count(' d1'))\n self.assertEqual(2, text.count(' z'))\n\n # logging outputs\n # out_stream - not hierarchical - extras - no print_arrays\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=False,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count('5 Explicit Output'), 1)\n # make sure they are in the correct order\n self.assertTrue(text.find(\"pz.z\") < text.find('sub1.sub2.g1.d1.y1') <\n text.find('sub1.sub2.g1.d2.y2') <\n text.find('g2.d1.y1') < text.find('g2.d2.y2'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(11, num_non_empty_lines)\n\n # Hierarchical\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=True,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count('top'), 1)\n self.assertEqual(text.count(' y1'), 1)\n self.assertEqual(text.count(' g2'), 1)\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(num_non_empty_lines, 21)\n\n def test_array_list_vars_options(self):\n\n class ArrayAdder(ExplicitComponent):\n \"\"\"\n Just a simple component that has array inputs and outputs\n \"\"\"\n\n def __init__(self, size):\n super(ArrayAdder, self).__init__()\n self.size = size\n\n def setup(self):\n self.add_input('x', val=np.zeros(self.size), units='inch')\n self.add_output('y', val=np.zeros(self.size), units='ft')\n\n def compute(self, inputs, outputs):\n outputs['y'] = inputs['x'] + 10.0\n\n size = 100 # how many items in the array\n\n prob = Problem()\n prob.model = Group()\n\n prob.model.add_subsystem('des_vars', IndepVarComp('x', np.ones(size), units='inch'),\n promotes=['x'])\n prob.model.add_subsystem('mult', ArrayAdder(size), promotes=['x', 'y'])\n\n prob.setup(check=False)\n\n prob['x'] = np.ones(size)\n\n prob.run_driver()\n\n # logging inputs\n # out_stream - not hierarchical - extras - no print_arrays\n stream = cStringIO()\n prob.model.list_inputs(values=True,\n units=True,\n hierarchical=False,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(1, text.count(\"1 Input(s) in 'model'\"))\n self.assertEqual(1, text.count('mult.x'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(5, num_non_empty_lines)\n\n # out_stream - hierarchical - extras - no print_arrays\n stream = cStringIO()\n prob.model.list_inputs(values=True,\n units=True,\n hierarchical=True,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(1, text.count(\"1 Input(s) in 'model'\"))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(7, num_non_empty_lines)\n self.assertEqual(1, text.count('top'))\n self.assertEqual(1, text.count(' mult'))\n self.assertEqual(1, text.count(' x'))\n\n # logging outputs\n # out_stream - not hierarchical - extras - no print_arrays\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=False,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count('2 Explicit Output'), 1)\n # make sure they are in the correct order\n self.assertTrue(text.find(\"des_vars.x\") < text.find('mult.y'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(8, num_non_empty_lines)\n\n # Promoted names - no print arrays\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n prom_name=True,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count(' x |10.0| x'), 1)\n self.assertEqual(text.count(' y |110.0| y'), 1)\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(num_non_empty_lines, 11)\n\n # Hierarchical - no print arrays\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=True,\n print_arrays=False,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count('top'), 1)\n self.assertEqual(text.count(' des_vars'), 1)\n self.assertEqual(text.count(' x'), 1)\n self.assertEqual(text.count(' mult'), 1)\n self.assertEqual(text.count(' y'), 1)\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(num_non_empty_lines, 11)\n\n # Need to explicitly set this to make sure all ways of running this test\n # result in the same format of the output. When running this test from the\n # top level via testflo, the format comes out different than if the test is\n # run individually\n opts = {\n 'edgeitems': 3,\n 'infstr': 'inf',\n 'linewidth': 75,\n 'nanstr': 'nan',\n 'precision': 8,\n 'suppress': False,\n 'threshold': 1000,\n }\n\n from distutils.version import LooseVersion\n if LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n opts['legacy'] = '1.13'\n\n with printoptions(**opts):\n # logging outputs\n # out_stream - not hierarchical - extras - print_arrays\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=False,\n print_arrays=True,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count('2 Explicit Output'), 1)\n self.assertEqual(text.count('value:'), 2)\n self.assertEqual(text.count('resids:'), 2)\n self.assertEqual(text.count('['), 4)\n # make sure they are in the correct order\n self.assertTrue(text.find(\"des_vars.x\") < text.find('mult.y'))\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(37, num_non_empty_lines)\n\n # Hierarchical\n stream = cStringIO()\n prob.model.list_outputs(values=True,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=True,\n print_arrays=True,\n out_stream=stream)\n text = stream.getvalue()\n self.assertEqual(text.count('2 Explicit Output'), 1)\n self.assertEqual(text.count('value:'), 2)\n self.assertEqual(text.count('resids:'), 2)\n self.assertEqual(text.count('['), 4)\n self.assertEqual(text.count('top'), 1)\n self.assertEqual(text.count(' des_vars'), 1)\n self.assertEqual(text.count(' x'), 1)\n self.assertEqual(text.count(' mult'), 1)\n self.assertEqual(text.count(' y'), 1)\n num_non_empty_lines = sum([1 for s in text.splitlines() if s.strip()])\n self.assertEqual(num_non_empty_lines, 40)\n\n def test_for_docs_array_list_vars_options(self):\n\n import numpy as np\n from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent\n\n class ArrayAdder(ExplicitComponent):\n \"\"\"\n Just a simple component that has array inputs and outputs\n \"\"\"\n\n def __init__(self, size):\n super(ArrayAdder, self).__init__()\n self.size = size\n\n def setup(self):\n self.add_input('x', val=np.zeros(self.size), units='inch')\n self.add_output('y', val=np.zeros(self.size), units='ft')\n\n def compute(self, inputs, outputs):\n outputs['y'] = inputs['x'] + 10.0\n\n size = 30\n\n prob = Problem()\n prob.model = Group()\n prob.model.add_subsystem('des_vars', IndepVarComp('x', np.ones(size), units='inch'),\n promotes=['x'])\n prob.model.add_subsystem('mult', ArrayAdder(size), promotes=['x', 'y'])\n\n prob.setup(check=False)\n prob['x'] = np.arange(size)\n prob.run_driver()\n\n prob.model.list_inputs(values=True,\n units=True,\n hierarchical=True,\n print_arrays=True)\n\n with printoptions(edgeitems=3, infstr='inf',\n linewidth=75, nanstr='nan', precision=8,\n suppress=False, threshold=1000, formatter=None):\n\n prob.model.list_outputs(values=True,\n implicit=False,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=False,\n print_arrays=True)\n\n prob.model.list_outputs(values=True,\n implicit=False,\n units=True,\n shape=True,\n bounds=True,\n residuals=True,\n scaling=True,\n hierarchical=True,\n print_arrays=True)\n\n def test_compute_inputs_read_only(self):\n class BadComp(TestExplCompSimple):\n def compute(self, inputs, outputs):\n super(BadComp, self).compute(inputs, outputs)\n inputs['length'] = 0. # should not be allowed\n\n prob = Problem(BadComp())\n prob.setup()\n\n with self.assertRaises(ValueError) as cm:\n prob.run_model()\n\n self.assertEqual(str(cm.exception),\n \"Attempt to set value of 'length' in input vector \"\n \"when it is read only.\")\n\n def test_compute_inputs_read_only_reset(self):\n class BadComp(TestExplCompSimple):\n def compute(self, inputs, outputs):\n super(BadComp, self).compute(inputs, outputs)\n raise AnalysisError(\"It's just a scratch.\")\n\n prob = Problem(BadComp())\n prob.setup()\n with self.assertRaises(AnalysisError):\n prob.run_model()\n\n # verify read_only status is reset after AnalysisError\n prob['length'] = 111.\n\n def test_compute_partials_inputs_read_only(self):\n class BadComp(TestExplCompSimpleDense):\n def compute_partials(self, inputs, partials):\n super(BadComp, self).compute_partials(inputs, partials)\n inputs['length'] = 0. # should not be allowed\n\n prob = Problem(BadComp())\n prob.setup()\n prob.run_model()\n\n with self.assertRaises(ValueError) as cm:\n prob.check_partials()\n\n self.assertEqual(str(cm.exception),\n \"Attempt to set value of 'length' in input vector \"\n \"when it is read only.\")\n\n def test_compute_partials_inputs_read_only_reset(self):\n class BadComp(TestExplCompSimpleDense):\n def compute_partials(self, inputs, partials):\n super(BadComp, self).compute_partials(inputs, partials)\n raise AnalysisError(\"It's just a scratch.\")\n\n prob = Problem(BadComp())\n prob.setup()\n prob.run_model()\n\n with self.assertRaises(AnalysisError):\n prob.check_partials()\n\n # verify read_only status is reset after AnalysisError\n prob['length'] = 111.\n\n def test_compute_jacvec_product_inputs_read_only(self):\n class BadComp(RectangleJacVec):\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n super(BadComp, self).compute_jacvec_product(inputs, d_inputs, d_outputs, mode)\n inputs['length'] = 0. # should not be allowed\n\n prob = Problem(BadComp())\n prob.setup()\n prob.run_model()\n\n with self.assertRaises(ValueError) as cm:\n prob.check_partials()\n\n self.assertEqual(str(cm.exception),\n \"Attempt to set value of 'length' in input vector \"\n \"when it is read only.\")\n\n def test_compute_jacvec_product_inputs_read_only_reset(self):\n class BadComp(RectangleJacVec):\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n super(BadComp, self).compute_jacvec_product(inputs, d_inputs, d_outputs, mode)\n raise AnalysisError(\"It's just a scratch.\")\n\n prob = Problem(BadComp())\n prob.setup()\n prob.run_model()\n\n with self.assertRaises(AnalysisError):\n prob.check_partials()\n\n # verify read_only status is reset after AnalysisError\n prob['length'] = 111.\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.arange", "numpy.zeros" ] ]
jamesaphoenix/VEX-MMM
[ "bb052c0a6b7a42f2cdc8568068f94db596ac481b" ]
[ "mmm/engineer.py" ]
[ "import statsmodels.tsa as tsa\nimport pandas as pd\nfrom typing import List, Tuple\n\nfrom .select import get_cols_containing\n\ndef add_constant(df:pd.DataFrame) -> None:\n df['constant'] = 1\n\ndef add_adstocks(df:pd.DataFrame, column_label:str, adstock_rates:List[float]=None) -> List:\n if adstock_rates is None:\n adstock_rates = [round(i*0.1,1) for i in range(1,10)]\n\n added_columns = list()\n for ar in adstock_rates:\n ar_column = f\"{column_label} AR={ar}\"\n df[ar_column] = tsa.filters.filtertools.recursive_filter(df[column_label], ar)\n added_columns.append(ar_column)\n return added_columns\n\ndef add_diminishing_returns(df:pd.DataFrame, column_label:str, saturation_levels:List[float]=None) -> List:\n if saturation_levels is None:\n saturation_levels = [round(i*0.1,1) for i in range(1,10)]\n\n added_columns = list()\n for dr in saturation_levels:\n dr_column = f\"{column_label} DR={dr}\"\n df[dr_column] = df[column_label]**dr\n added_columns.append(dr_column)\n return added_columns\n\ndef add_lags(df:pd.DataFrame, column_label:str, lags:List[float]=None) -> List:\n if lags is None:\n lags = [1, 2, 3, 7, 14, 30, 60, 90, 180, 365]\n\n added_columns = list()\n for l in lags:\n l_column = f\"{column_label} Lag={l}\"\n df[l_column] = df[column_label].shift(l)\n df[l_column] = df[l_column].fillna(0)\n added_columns.append(l_column)\n return added_columns\n\ndef add_interaction_effect(df:pd.DataFrame, column_label_a:str, column_label_b:str) -> str:\n interaction_name = f'{column_label_a} x {column_label_b}'\n\n df[interaction_name] = df[column_label_a] * df[column_label_b]\n return interaction_name\n\ndef add_day_of_week_dummies(df:pd.DataFrame, date_label:str=None) -> Tuple[List[str], pd.DataFrame]:\n if date_label is None:\n dates_index = pd.to_datetime(df.index)\n date_label = '_date'\n df[date_label] = dates_index\n \n else:\n dates_index = pd.to_datetime(df[date_label])\n\n df['day_of_week'] = dates_index.day_name()\n df['day_of_week'] = df['day_of_week'].str.lower()\n dummies = pd.get_dummies(df['day_of_week'])\n \n dummies[date_label] = dates_index\n \n df = pd.merge(df, dummies, left_on=date_label, right_on=date_label, how='left')\n \n df.drop(['day_of_week'], axis=1, inplace=True)\n \n df.drop(['_date'], axis=1, inplace=True) # in case we added it\n dummies.drop([date_label], axis=1, inplace=True)\n \n return list(dummies.columns), df\n\ndef add_month_of_year_dummies(df:pd.DataFrame, date_label:str=None) -> Tuple[List[str], pd.DataFrame]:\n if date_label is None:\n dates_index = pd.to_datetime(df.index)\n date_label = '_date'\n df[date_label] = dates_index\n \n else:\n dates_index = pd.to_datetime(df[date_label])\n\n df['month_of_year'] = dates_index.month_name()\n df['month_of_year'] = df['month_of_year'].str.lower()\n \n dummies = pd.get_dummies(df['month_of_year'])\n \n dummies[date_label] = df[date_label]\n \n df = pd.merge(df, dummies, left_on=date_label, right_on=date_label, how='left')\n \n df.drop(['month_of_year'], axis=1, inplace=True)\n\n df.drop(['_date'], axis=1, inplace=True) # in case we added it\n dummies.drop([date_label], axis=1, inplace=True)\n \n return list(dummies.columns), df\n\ndef add_payday_dummies(df:pd.DataFrame, date_label:str) -> Tuple[str, pd.DataFrame]:\n payday_column = 'payday'\n df[payday_column] = df[date_label].apply(lambda x:1 if x.strftime('%d') in ('14','15','16','30','31','1','2') else 0)\n\n return payday_column, df\n\ndef categorize_campaigns(df:pd.DataFrame, containing:str) -> Tuple[str, pd.DataFrame]: \n containing_cols = get_cols_containing(df, containing)\n\n agg_label = df[f'\"{containing}\" Agg']\n df[agg_label] = df[containing_cols].sum()\n\n return agg_label, df\n" ]
[ [ "pandas.to_datetime", "pandas.merge", "pandas.get_dummies" ] ]
bryevdv/cunumeric
[ "7965ceb96d3252371c22cf32d38ac91c4db77a38" ]
[ "tests/universal_functions_tests/floor_divide_tests/operator_broadcast.py" ]
[ "# Copyright 2021-2022 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n anp = np.random.randn(4, 5)\n b = random.randint(1, 13)\n a = num.array(anp)\n\n assert np.array_equal(a // b, anp // b)\n assert np.array_equal(b // a, b // anp)\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n" ]
[ [ "numpy.random.randn", "numpy.array_equal" ] ]
aleung12/manga
[ "5d2b4076592205b411ecc48881b00a512b468b86" ]
[ "simdisk.py" ]
[ "### last changed: 08/28/2018\n\nfrom astropy.io import fits\nimport numpy as np\nimport os, time, gc, sys, types\nfrom dirs import *\n\n\ndef mkdisk(pos_angle_deg,inclination_deg,ext,dim,V_sys=0.,V_max=220.,h_rot=10.,sigma_cen=250.):\n\n pos_angle = pos_angle_deg *np.pi/180\n inclination = inclination_deg *np.pi/180\n\n r_ip = np.zeros((dim,dim))\n R_gp = np.zeros((dim,dim))\n phi_ip = np.zeros((dim,dim))\n theta_gp = np.zeros((dim,dim))\n image = np.zeros((dim,dim))\n cen_x = np.shape(image)[1]//2\n cen_y = np.shape(image)[0]//2\n\n a = 0.5 *0.8 *dim\n b = a * np.cos(inclination)\n \n if 0 <= pos_angle < 1.5*np.pi: alpha = pos_angle + 0.5*np.pi\n else: alpha = pos_angle % (0.5*np.pi)\n\n ### for each image pixel, calculate radius r and azimuthal angle phi in image plane\n for y in range(np.shape(image)[0]):\n for x in range(np.shape(image)[1]):\n\n r = np.sqrt( (x-cen_x)**2 +(y-cen_y)**2 )\n\n ### azimuthal angle in image plane\n if (x == cen_x) and (y == cen_y): \n phi = pos_angle +0.5*np.pi\n else:\n phi = np.arctan2(y-cen_y,x-cen_x)\n if (x <= cen_x) and (y >= cen_y): phi -= 0.5*np.pi\n else: phi += 1.5*np.pi\n\n ### azimuthal angle in galaxy disk plane\n theta = np.arctan( np.tan(phi-pos_angle+0.5*np.pi) *np.cos(inclination) )\n if phi-pos_angle == 0:\n theta -= 0.5*np.pi\n elif 0 < pos_angle <= np.pi:\n if 0 < phi-pos_angle <= np.pi: theta += 0.5*np.pi\n else: theta += 1.5*np.pi\n elif np.pi < pos_angle < 2*np.pi:\n if pos_angle <= phi <= 2*np.pi: theta += 0.5*np.pi\n elif 0 <= phi < pos_angle-np.pi: theta += 0.5*np.pi\n else: theta += 1.5*np.pi\n\n r_ip[y,x] = r\n phi_ip[y,x] = phi\n theta_gp[y,x] = theta\n\n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n X = x-cen_x\n Y = y-cen_y\n\n ### (square of) radial coordinate in galaxy plane (ellipse de-projected) normalized to disk radius R\n p = (X*cos_alpha +Y*sin_alpha)**2 /a**2 + (X*sin_alpha -Y*cos_alpha)**2 /b**2\n\n ### radius in galaxy plane\n R = a * p**0.5\n R_gp[y,x] = R\n\n if True: #p <= 1: ### truncate after convolution (02/27/17)\n if ext == 'vel':\n image[y,x] = V_sys + V_max *np.sin(inclination) *np.tanh(R/h_rot) *np.cos(theta)\n \n elif ext == 'disp':\n image[y,x] = sigma_cen * np.exp(-p)\n\n writedir = modeldir\n \n #print writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+str(ext)+'disk.fits'\n fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+str(ext)+'disk.fits',image,overwrite=True)\n fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'tanh.fits',np.tanh(R_gp/h_rot),overwrite=True)\n if not ext == 'disp':\n fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'R_gp.fits',R_gp,overwrite=True)\n fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'r_im.fits',r_ip,overwrite=True)\n fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'theta_gp.fits',theta_gp,overwrite=True)\n fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'phi_im.fits',phi_ip,overwrite=True)\n\n\nif __name__ == '__main__':\n\n #PA_deg = [45, 135, 225, 315] #[0, 5, 15, 30, 45, 60, 75, 90, 120, 150, 175, 180]\n #PA_deg = [-45, -135]\n PA_deg = 10*(np.array(range(35))+1)\n inc_deg = [60] #s[30, 45, 60, 75] #1, 2, 3, 4, 5, 15, 30, 45, 60, 75, 85, 95, 105, 120, 135, 150, 165, 175, 180]\n exts = ['vel'] #,'disp']\n\n for PA in PA_deg:\n for inc in inc_deg:\n for ext in exts:\n mkdisk(PA,inc,ext,dim=72)\n print(' ### PA (degrees) = '+str(PA))\n print(' ### inclination (degrees) = '+str(inc))\n print(' ### time now: '+time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime()))\n print('')\n" ]
[ [ "numpy.sin", "numpy.zeros", "numpy.tan", "numpy.exp", "numpy.shape", "numpy.tanh", "numpy.arctan2", "numpy.sqrt", "numpy.cos" ] ]
SaashaJoshi/quantum-computing
[ "53ce0f9a3ca1ecdf3184c97f356a8ee3925498aa" ]
[ "pennylane-xanadu/pytorch-ml.py" ]
[ "import torch\nfrom torch.autograd import Variable\n# import matplotlib.pyplot as plt\n\ntensor_data = torch.tensor([(0, 0), (0.1, 0.1), (0.2, 0.2)])\t# (data, label)\n\ndef function(phi, input_data = None):\n\treturn phi*input_data\t# Some Linear model/function.\n\ndef loss_function(output, label):\n\treturn torch.abs(output-label)**2\t# Squared loss\n\ndef gradient(phi):\t\t# Minimize the average loss i.e. Gradient Descent\n\tc = 0\n\tfor data, label in data:\n\t\tc += loss_function(function(phi, x = data), label)\n\treturn c\n\nif __name__ == '__main__':\n\tphi = Variable(torch.tensor(0.1), requires_grad = True)\n\topt = torch.optim.Adam([phi], lr = 0.01)\t# Adam Optimizer with learning rate of 0.01\n\n\tfor i in range(5):\n\t\tloss = gradient(phi)\n\t\tloss.backward()\t\t# Gradients i.e. move backwards\n\t\topt.step() \t\t\t# Update parameters" ]
[ [ "torch.abs", "torch.optim.Adam", "torch.tensor" ] ]
khakhalin/mtg
[ "00a6707fb3d21aa29e6277050288d9ae36e130a0" ]
[ "bots/archived/danbot.py" ]
[ "from bot import Bot\nfrom csv import reader\nimport re\nfrom numpy import argsort\n\n\n\n# Because this bot works off of the draftsim ratings,\n# you need to pass it the filename for a set's rating csv\n\nclass DanBot(Bot):\n\n '''Class Constants'''\n PACK_SIZE = 15\n RATING_THRESHOLD = 2.0\n COLOR_COMMIT_THRESHOLD = 3.5\n TIME_TO_COMMIT = 1*PACK_SIZE+3\n MAX_BONUS_SPEC = 0.9\n ON_COLOR_BONUS = 2.0\n OFF_COLOR_PENALTY = 1.0\n SINGLE_COLOR_BIAS_FACTOR = 2.0\n SECOND_COLOR_FRACTION = 0.8\n MULTICOLOR_PENALTY = 0.6\n COLORS = ['W', 'U', 'B', 'R', 'G']\n HYBRID = {'A' : 'WB', 'D' : 'UB', 'M' : 'UG', 'L' : 'RG', 'I' : 'BG',\n 'V' : 'WB', 'S' : 'WR', 'Z' : 'UR', 'Y' : 'WG', 'K' : 'BR'}\n\n def __init__(self, card_csv=\"Draftsim Ratings - GRN.csv\", other_csv='Draftsim Ratings - GRN_land.csv'):\n Bot.__init__(self)\n\n self.card_set = self.get_card_dict(card_csv, other_csv)\n\n self.color_commitment = [0,0,0,0,0]\n self.eval_pack = []\n\n def get_choice(self, draft_frame):\n collection = draft_frame[0]\n pack = draft_frame[1]\n pick = draft_frame[2]\n\n evaluated_pack = [self.get_color_bias(card, self.get_color_commitment(collection)) + float(self.card_set[card]['rating']) for card in pack]\n self.eval_pack = evaluated_pack\n return argsort(evaluated_pack)\n\n # Input: card string name, array of color commitment to each color\n # output: float color bias\n # Note: must be run after calculating color commitment from collection\n # In the original code there was a lot of self.COLOR_COMMIT_THRESH/denom but I think thats just self.MAX_SPEC_BONUS\n def get_color_bias(self, card, commitment):\n card_colors = self.get_card_colors(card)\n num_card_colors = sum([1 if symbol != 0 else 0 for symbol in card_colors])\n num_commit_colors = sum([1 if symbol >= self.COLOR_COMMIT_THRESHOLD else 0 for symbol in commitment])\n\n denom = self.COLOR_COMMIT_THRESHOLD / self.MAX_BONUS_SPEC\n\n card_cost = self.get_card_cost(card)\n\n # 4-5 color cards get no bonus\n if num_card_colors > 3:\n return 0\n\n # 0 color cards\n elif num_card_colors == 0:\n # no bonus when the player has cards of only one color\n if num_commit_colors <= 1:\n return 0\n # min of max bonus or largest commitment/denom\n # originally the first option was the threshold over the denom but that should just be the max bonus\n else:\n return min(self.MAX_BONUS_SPEC, max(commitment) / denom)\n\n # 2-3 color cards\n elif num_card_colors in [2, 3]:\n bias = 0\n # loop through colors\n for i in range(len(commitment)):\n if self.COLORS[i] in card_cost:\n bias += commitment[i]\n else:\n bias -= commitment[i]\n return bias - self.MULTICOLOR_PENALTY\n\n # 1 color cards\n elif num_card_colors == 1:\n\n hybrid = self.is_hybrid(card) # boolean does this card have hybrid mana symbols?\n\n # Note that the commit_argsorted is ASCENDING\n commit_argsorted = argsort(commitment) # an list of the players color commitments\n\n # The hybrid case\n # Maybe this should actually be first?\n\n # TODO: Hybrid Mana Case\n\n # this one is kind of complicated. Might need to rework some of the variables from the other cases.\n # This needs to be reworked and moved to a different case.\n # The problem here is the way that we are tracking the number of colors in a card.\n # A hybrid card will get two colors in num_card_colors\n # drafting.js has only three cases for the hybrid cards: the player has only cards of one color\n # or the the player is committed to one color and neither of the card's colors are that color\n # the general case is just the highest bias among all colors that the card is in\n\n if hybrid:\n # this seems wrong. Can't tell what hybrid_color_index is\n bias = 0\n\n # set bias to highest bias among biases of colors in card\n for i in range(len(card_colors)):\n if card_colors[i] != 0:\n bias = max(bias, min(self.MAX_BONUS_SPEC, commitment[i] / denom))\n\n # the player is committed to only one color.\n if num_commit_colors == 1:\n bias /= self.SINGLE_COLOR_BIAS_FACTOR\n # If the the player is committed to one color, and neither of the hybrid colors are that color\n if num_commit_colors == 1:\n for i in range(len(card_colors)):\n if card_colors[i] != 0 and i == commit_argsorted[-1]:\n bias = max(self.SECOND_COLOR_FRACTION * self.MAX_BONUS_SPEC, bias)\n\n return bias\n\n # get the index of which color the card is\n # should be the max of the card colors if its the only non-zero entry\n color_index = card_colors.index(max(card_colors)) # index of the card's color\n\n # base bias\n bias = min(self.MAX_BONUS_SPEC, commitment[color_index] / denom)\n\n # the player is committed to no colors Case 2a in original code\n if num_commit_colors == 0:\n return bias\n\n # if player only has cards of one color\n if sum([1 if count != 0 else 0 for count in commitment]) == 1:\n bias /= self.SINGLE_COLOR_BIAS_FACTOR\n\n # if player is committed to only one color\n # and this card is of the second highest color in commitment\n # give it a slight bonus\n\n if num_commit_colors == 1 and color_index == commit_argsorted[-2]:\n bias = max(self.SECOND_COLOR_FRACTION * self.COLOR_COMMIT_THRESHOLD / denom, bias)\n\n return bias\n\n # Input: collection from dataset. Should be a list of card names\n # Output: array of number of mana symbols of each type shape 5,1\n def get_color_commitment(self, collection):\n temp_color_commitment = [0,0,0,0,0]\n\n for card in collection:\n new_card = self.get_card_colors(card)\n # pythonic vector addition\n temp_color_commitment = list(map(sum, zip(new_card, temp_color_commitment)))\n return temp_color_commitment\n\n def on_color(self, card):\n # TODO: write helper function\n # take in card string name\n # return boolean for on color or not\n pass\n\n # INPUT: Card name string\n # OUTPUT: Array with number of mana symbols of each type\n def get_card_colors(self, card):\n temp_colors = [0,0,0,0,0]\n cost = self.get_card_cost(card)\n\n # loop through characters in the manacost\n for char in cost:\n if char in self.COLORS:\n temp_colors[self.COLORS.index(char)] += 1\n\n return temp_colors\n\n # Helper function to get card costs\n # Input: card name string\n # Output: string of all costs with hybrid costs converted\n def get_card_cost(self, card):\n cost = self.card_set[card]['cost']\n if self.card_set[card]['cost2'] != 'none':\n cost += self.card_set[card]['cost2']\n\n for i in range(len(cost)):\n if cost[i] in self.HYBRID:\n cost += self.HYBRID[cost[i]]\n cost = cost[:i] + cost[i+1:]\n return cost\n\n # Input: card string name\n # Output: Boolean\n def is_hybrid(self, card):\n for char in self.card_set[card]['cost']:\n if char in self.HYBRID:\n return True\n else:\n continue\n if self.card_set[card]['cost2'] != 'none':\n for char in self.card_set[card]['cost2']:\n if char in self.HYBRID:\n return True\n else:\n continue\n return False\n\n @staticmethod\n def get_card_dict(file_name, other_file = 'none'):\n card_dict = {}\n with open(file_name, 'r') as f:\n r = reader(f)\n for row in r:\n card_dict[re.sub(',_', '_', row[0])] = {\n 'cost' : row[1],\n 'cost2' : row[2],\n 'rarity' : row[4],\n 'rating' : row[5]\n }\n if other_file != 'none':\n with open(other_file, 'r') as f:\n r = reader(f)\n for row in r:\n card_dict[re.sub(',_', '_', row[0])] = {\n 'cost': row[1],\n 'cost2': row[2],\n 'rarity': row[4],\n 'rating': row[5]\n }\n\n return card_dict\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.argsort" ] ]
SDomarecki/Specusticc
[ "c2c2c487cce1e55f7ce6c149bc798a47a967c97c" ]
[ "specusticc/model_creating/models/transformer_classes/multi_head_attention.py" ]
[ "import tensorflow as tf\n\n\n# source: https://www.tensorflow.org/tutorials/text/transformer#multi-head_attention\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n\n self.depth = d_model // self.num_heads\n\n self.wq = tf.keras.layers.Dense(d_model)\n self.wk = tf.keras.layers.Dense(d_model)\n self.wv = tf.keras.layers.Dense(d_model)\n\n self.dense = tf.keras.layers.Dense(d_model)\n\n def split_heads(self, x, batch_size):\n \"\"\"Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n \"\"\"\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, inputs, **kwargs): # pragma: no cover\n [v, k, q] = inputs\n batch_size = tf.shape(q)[0]\n\n q = self.wq(q) # (batch_size, seq_len, d_model)\n k = self.wk(k) # (batch_size, seq_len, d_model)\n v = self.wv(v) # (batch_size, seq_len, d_model)\n\n q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)\n k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)\n v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)\n\n # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)\n # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)\n scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)\n\n scaled_attention = tf.transpose(\n scaled_attention, perm=[0, 2, 1, 3]\n ) # (batch_size, seq_len_q, num_heads, depth)\n\n concat_attention = tf.reshape(\n scaled_attention, (batch_size, -1, self.d_model)\n ) # (batch_size, seq_len_q, d_model)\n\n output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)\n\n return output, attention_weights\n\n\ndef scaled_dot_product_attention(q, k, v):\n \"\"\"Calculate the attention weights.\n q, k, v must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must be broadcastable for addition.\n\n Args:\n q: query shape == (..., seq_len_q, depth)\n k: key shape == (..., seq_len_k, depth)\n v: value shape == (..., seq_len_v, depth_v)\n\n Returns:\n output, attention_weights\n \"\"\"\n\n matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)\n\n # scale matmul_qk\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n # softmax is normalized on the last axis (seq_len_k) so that the scores\n # add up to 1.\n attention_weights = tf.nn.softmax(\n scaled_attention_logits, axis=-1\n ) # (..., seq_len_q, seq_len_k)\n\n output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)\n\n return output, attention_weights\n" ]
[ [ "tensorflow.shape", "tensorflow.matmul", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.keras.layers.Dense", "tensorflow.math.sqrt", "tensorflow.nn.softmax" ] ]
jscarlson/EasyOCR
[ "69010985a582c7eebf8dfd8db812371e77a26755" ]
[ "trainer/tk1957_split.py" ]
[ "from numpy import save\nimport pandas as pd\nimport os\nfrom glob import glob\nimport numpy as np\nfrom shutil import copy\nimport json\n\n\nif __name__ == \"__main__\":\n\n save_dir = \"./easyocr_data\"\n os.makedirs(save_dir, exist_ok=True)\n train_dir = os.path.join(save_dir, \"tk1957_train\")\n val_dir = os.path.join(save_dir, \"tk1957_val\")\n os.makedirs(train_dir, exist_ok=True)\n os.makedirs(val_dir, exist_ok=True)\n\n root_dir = \"/mnt/122a7683-fa4b-45dd-9f13-b18cc4f4a187/ocr_datasets/teikoku/1957\"\n chars_dir = os.path.join(root_dir, \"char_crops\")\n seg_dir_unlab = os.path.join(root_dir, \"seg_unlabeled\")\n seg_dir_lab = os.path.join(root_dir, \"seg_labeled\")\n coco_train_path = os.path.join(root_dir, \"tk1957_ann_file_train70.json\")\n coco_val_path = os.path.join(root_dir, \"tk1957_ann_file_test30.json\")\n seg_paths = glob(os.path.join(seg_dir_unlab, \"*.png\"))\n seg_basenames = [os.path.basename(x) for x in seg_paths]\n\n with open(coco_train_path) as f: coco_train = json.load(f)\n with open(coco_val_path) as f: coco_val = json.load(f)\n \n train_seg_basenames = [x['file_name'] for x in coco_train[\"images\"]]\n val_seg_basenames = [x['file_name'] for x in coco_val[\"images\"]]\n print(f\"Len val segs {len(val_seg_basenames)}; len train segs {len(train_seg_basenames)}\")\n\n train_labels = []\n for sbname in train_seg_basenames:\n sbname_w_labels = [x for x in os.listdir(seg_dir_lab) if \"_\".join(x.split(\"_\")[:-1]) == os.path.splitext(sbname)[0]][0]\n seq_str = os.path.splitext(sbname_w_labels)[0].split(\"_\")[-1]\n if \"ใ‚ฑใ‚™\" in seq_str:\n seq_str = seq_str.replace(\"ใ‚ฑใ‚™\", \"ใ‚ฒ\") \n train_labels.append((sbname, seq_str))\n copy(os.path.join(seg_dir_unlab, sbname), train_dir)\n train_labels_df = pd.DataFrame(train_labels, columns=[\"filename\", \"words\"])\n train_labels_df.to_csv(os.path.join(train_dir, \"labels.csv\"))\n\n val_labels = []\n for sbname in val_seg_basenames:\n sbname_w_labels = [x for x in os.listdir(seg_dir_lab) if \"_\".join(x.split(\"_\")[:-1]) == os.path.splitext(sbname)[0]][0]\n seq_str = os.path.splitext(sbname_w_labels)[0].split(\"_\")[-1]\n if \"ใ‚ฑใ‚™\" in seq_str:\n seq_str = seq_str.replace(\"ใ‚ฑใ‚™\", \"ใ‚ฒ\") \n val_labels.append((sbname, seq_str))\n copy(os.path.join(seg_dir_unlab, sbname), val_dir)\n val_labels_df = pd.DataFrame(val_labels, columns=[\"filename\", \"words\"])\n val_labels_df.to_csv(os.path.join(val_dir, \"labels.csv\"))" ]
[ [ "pandas.DataFrame" ] ]
trianglecity/theano-develop-docker
[ "ece376fd7819ffe840871cc6ab8ee7c06c4984fb" ]
[ "theano/gpuarray/type.py" ]
[ "from __future__ import absolute_import, print_function, division\nimport numpy\nimport six.moves.copyreg as copyreg\nfrom six import iteritems\nimport warnings\n\nimport theano\nfrom theano.tensor.type import TensorType\nfrom theano.tensor.var import _tensor_py_operators\nfrom theano import Type, Variable, Constant, tensor, config, scalar\nfrom theano.compile import SharedVariable\n\n# Make sure this is importable even if pygpu is absent\n# (it will not work though)\ntry:\n import pygpu\n from pygpu import gpuarray\n from pygpu.elemwise import compare, elemwise2\nexcept ImportError:\n pygpu = None\n\n_context_reg = {}\n\n\ndef move_to_gpu(data):\n \"\"\"\n Do we want to move this computation to the GPU?\n\n Currently, we don't move complex and scalar int.\n\n Parameters\n ----------\n data : numpy.ndarray or TensorVariable\n (it must have dtype and ndim parameter)\n \"\"\"\n # We don't support complex on the GPU\n if str(data.dtype) in tensor.basic.complex_dtypes:\n return False\n # We don't want scalar int on the GPU.\n if data.ndim == 0 and str(data.dtype) in tensor.basic.discrete_dtypes:\n return False\n return True\n\n\nclass ContextNotDefined(ValueError):\n pass\n\n\ndef reg_context(name, ctx):\n \"\"\"\n Register a context by mapping it to a name.\n\n The context must be of type `GpuContext` and the name can be\n anything hashable (but is usually a string). Only one context can\n be registered per name and the second registration for a given\n name will raise an error.\n\n Parameters\n ----------\n name : hashable object\n Name to associate the context with (usually a string)\n ctx : GpuContext\n Context instance\n\n \"\"\"\n if name in _context_reg:\n raise ValueError(\"context name %s is already defined\" % (name,))\n if not isinstance(ctx, gpuarray.GpuContext):\n raise TypeError(\"context is not GpuContext\")\n _context_reg[name] = ctx\n\n\ndef get_context(name):\n \"\"\"\n Retrive the context associated with a name.\n\n Return the context object mapped to `ref` that was previously\n register through :func:`reg_context`. Trying to get the context\n for an unregistered `ref` will raise a exception.\n\n Parameters\n ----------\n name : hashable object\n Name associated with the context we want (usually a string)\n\n \"\"\"\n if name not in _context_reg:\n raise ContextNotDefined(\"context name %s not defined\" % (name,))\n return _context_reg[name]\n\n\ndef list_contexts():\n \"\"\"\n Return an iterable of all the registered context names.\n\n \"\"\"\n return _context_reg.keys()\n\n\n# Private method\ndef _name_for_ctx(ctx):\n for k, v in iteritems(_context_reg):\n if v == ctx:\n return k\n raise ContextNotDefined('context is not registered')\n\n\n# This is a private method for use by the tests only\ndef _unreg_context(name):\n del _context_reg[name]\n\n\nclass GpuArrayType(Type):\n \"\"\"\n The type that represents an array on a gpu.\n\n The `dtype` indicates what scalar data type the elements of\n variables of this type will be.\n\n `broadcastable` indicates whether each dimension is broadcastable\n or not (to be broadcastable a dimension must always be of length\n 1).\n\n The `context_name` is the name of the context on will values of\n variables of this type will be stored.\n\n Parameters\n ----------\n dtype : str\n The name of a numpy dtype\n broadcastable : tuple of bools\n A tuple that indicates both the number of dimensions (by its\n length) and whether those dimensions are broadcastable or not\n (by the boolean values).\n context_name : str\n The name of the context the that this type is attached to\n (default: None, which is the context specified by\n config.device).\n name : string, optional\n A name for the type that will be used in printouts.\n\n Attributes\n ----------\n dtype : str\n Data type used for scalar elements of variables.\n broadcastable : tuple of bools\n Indicates whether the dimensions are broadcastable or not.\n ndim : int\n The number of dimensions\n context_name : str\n The name of a gpu context on which variables will have their values.\n name : str\n A string used to print the type if given.\n typecode : int\n The gpuarray typecode for `dtype`\n\n See Also\n --------\n theano.gof.type.PureType\n\n \"\"\"\n def __init__(self, dtype, broadcastable, context_name=None, name=None):\n # In case this was not provided and no global value is available\n self.dtype = str(dtype)\n self.broadcastable = tuple(bool(b) for b in broadcastable)\n self.ndim = len(self.broadcastable)\n self.name = name\n self.context_name = context_name\n # This will check that the passed context name is valid and registered.\n get_context(self.context_name)\n try:\n self.typecode = gpuarray.dtype_to_typecode(self.dtype)\n except gpuarray.GpuArrayException:\n raise TypeError(\"Unsupported dtype for %s: %s\" %\n (self.__class__.__name__, self.dtype))\n\n def clone(self, dtype=None, broadcastable=None):\n if dtype is None:\n dtype = self.dtype\n if broadcastable is None:\n broadcastable = self.broadcastable\n return self.__class__(dtype=dtype, broadcastable=broadcastable,\n context_name=self.context_name, name=self.name)\n\n # This is a property to keep the type pickleable\n @property\n def context(self):\n \"\"\"\n The context object mapped to the type's :attr:`context_name`.\n This is a property.\n\n \"\"\"\n return get_context(self.context_name)\n\n def __repr__(self):\n return \"GpuArrayType<%s>(%s, %s)\" % (self.context_name, self.dtype,\n self.broadcastable)\n\n def filter(self, data, strict=False, allow_downcast=None):\n if (isinstance(data, gpuarray.GpuArray) and\n data.typecode == self.typecode):\n # This is just to make this condition not enter the\n # following branches\n pass\n elif strict:\n if not isinstance(data, gpuarray.GpuArray):\n raise TypeError(\"%s expected a GpuArray object.\" % self,\n data, type(data))\n if self.typecode != data.typecode:\n raise TypeError(\"%s expected typecode %d (dtype %s), \"\n \"got %d (dtype %s).\" %\n (self, self.typecode, self.dtype,\n data.typecode, str(data.dtype)))\n if self.context != data.context:\n raise TypeError(\"data context does not match type context\")\n # fallthrough to ndim check\n elif (allow_downcast or\n (allow_downcast is None and\n type(data) == float and\n self.dtype == config.floatX)):\n data = gpuarray.array(data, dtype=self.typecode, copy=False,\n ndmin=len(self.broadcastable),\n context=self.context)\n else:\n if not hasattr(data, 'dtype'):\n converted_data = theano._asarray(data, self.dtype)\n # We use the `values_eq` static function from TensorType\n # to handle NaN values.\n if TensorType.values_eq(numpy.asarray(data),\n converted_data,\n force_same_dtype=False):\n data = converted_data\n data = gpuarray.array(data, context=self.context)\n\n up_dtype = scalar.upcast(self.dtype, data.dtype)\n if up_dtype == self.dtype:\n data = gpuarray.array(data, dtype=self.dtype, copy=False,\n context=self.context)\n else:\n raise TypeError(\"%s cannot store a value of dtype %s \"\n \"without risking loss of precision.\" %\n (self, data.dtype))\n\n if self.ndim != data.ndim:\n raise TypeError(\"Wrong number of dimensions: expected %s, \"\n \"got %s with shape %s.\" % (self.ndim, data.ndim,\n data.shape), data)\n shp = data.shape\n for i, b in enumerate(self.broadcastable):\n if b and shp[i] != 1:\n raise TypeError(\"Non-unit value on shape on a broadcastable\"\n \" dimension.\", shp, self.broadcastable)\n return data\n\n def filter_variable(self, other, allow_convert=True):\n from theano.gpuarray.basic_ops import gpu_from_host\n\n if hasattr(other, '_as_GpuArrayVariable'):\n other = other._as_GpuArrayVariable(self.context_name)\n\n if not isinstance(other, Variable):\n other = self.Constant(type=self, data=other)\n\n if other.type == self:\n return other\n\n if not isinstance(other.type, tensor.TensorType):\n raise TypeError('Incompatible type', (self, other.type))\n if (other.type.dtype != self.dtype):\n raise TypeError('Incompatible dtype', (self.dtype,\n other.type.dtype))\n if other.type.ndim != self.ndim:\n raise TypeError('Incompatible number of dimensions.'\n ' Expected %d, got %d.' % (self.ndim, other.ndim))\n if other.type.broadcastable != self.broadcastable:\n if allow_convert:\n type2 = other.type.clone(broadcastable=self.broadcastable)\n other2 = type2.convert_variable(other)\n else:\n other2 = None\n if other2 is None:\n raise TypeError('Incompatible broadcastable dimensions.'\n ' Expected %s, got %s.' %\n (str(other.type.broadcastable),\n str(self.broadcastable)))\n other = other2\n\n return gpu_from_host(self.context_name)(other)\n\n @staticmethod\n def values_eq(a, b, force_same_dtype=True):\n if a.shape != b.shape:\n return False\n if force_same_dtype and a.typecode != b.typecode:\n return False\n a_eq_b = numpy.asarray(compare(a, '==', b))\n if a_eq_b.all():\n return True\n\n # maybe the trouble is that there are NaNs\n a = numpy.asarray(a)\n b = numpy.asarray(b)\n\n a_missing = numpy.isnan(a)\n if a_missing.any():\n b_missing = numpy.isnan(b)\n return numpy.all(a_eq_b + (a_missing == b_missing))\n else:\n return False\n\n @staticmethod\n def values_eq_approx(a, b,\n allow_remove_inf=False, allow_remove_nan=False,\n rtol=None, atol=None):\n if a.shape != b.shape or a.dtype != b.dtype:\n return False\n if str(a.dtype) in theano.tensor.discrete_dtypes:\n return GpuArrayType.values_eq(a, b)\n else:\n if allow_remove_inf or allow_remove_nan:\n raise NotImplementedError(\n \"GpuArrayType.values_eq_approx() don't implemented the\"\n \" allow_remove_inf and allow_remove_nan parameter\")\n atol_, rtol_ = theano.tensor.basic._get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n res = elemwise2(a, '', b, a, odtype=numpy.dtype('bool'),\n op_tmpl=\"res = (fabs(a - b) <\"\n \"(%(atol_)s + %(rtol_)s * fabs(b)))\" %\n locals())\n ret = numpy.asarray(res).all()\n if ret:\n return True\n # maybe the trouble is that there are NaNs\n an = numpy.asarray(a)\n bn = numpy.asarray(b)\n return tensor.TensorType.values_eq_approx(\n an, bn, allow_remove_inf=allow_remove_inf,\n allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)\n\n @staticmethod\n def may_share_memory(a, b):\n if (not isinstance(a, gpuarray.GpuArray) or\n not isinstance(b, gpuarray.GpuArray)):\n return False\n return pygpu.gpuarray.may_share_memory(a, b)\n\n def value_zeros(self, shape):\n return pygpu.gpuarray.zeros(shape, dtype=self.typecode,\n context=self.context)\n\n def make_variable(self, name=None):\n return self.Variable(self, name=name)\n\n def __eq__(self, other):\n return (type(self) == type(other) and\n self.typecode == other.typecode and\n self.broadcastable == other.broadcastable and\n self.context_name == other.context_name)\n\n def convert_variable(self, var):\n vt = var.type\n if (type(self) == type(vt) and\n self.typecode == vt.typecode and\n self.ndim == vt.ndim and\n self.context_name == vt.context_name and\n all(sb == ob or ob for sb, ob in zip(self.broadcastable,\n vt.broadcastable))):\n return theano.tensor.patternbroadcast(var, self.broadcastable)\n\n def __hash__(self):\n return hash((type(self), self.typecode, self.broadcastable,\n self.context_name))\n\n def dtype_specs(self):\n \"\"\"\n Return a tuple (python type, c type, numpy typenum) that corresponds\n to self.dtype.\n\n This function is used internally as part of C code generation.\n\n \"\"\"\n try:\n return {\n 'float16': (float, 'npy_float16', 'NPY_FLOAT16'),\n 'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\n 'bool': (int, 'npy_bool', 'NPY_BOOL'),\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\n # 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),\n # 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')\n }[self.dtype]\n except KeyError:\n raise TypeError(\"Unsupported dtype for %s: %s\" %\n (self.__class__.__name__, self.dtype))\n\n def get_shape_info(self, obj):\n return obj.shape\n\n def get_size(self, shape_info):\n if shape_info:\n return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize\n else:\n return numpy.dtype(self.dtype).itemsize\n\n def c_declare(self, name, sub, check_input=True):\n return \"\"\"\n PyGpuArrayObject *%(name)s;\n \"\"\" % locals()\n\n def c_init(self, name, sub):\n return \"%s = NULL;\" % (name,)\n\n def c_extract(self, name, sub, check_input=True):\n # TODO I don't check broadcast stuff for now.\n return \"\"\"\n %(name)s = NULL;\n if (py_%(name)s == Py_None) {\n PyErr_SetString(PyExc_ValueError, \"expected a GpuArray, not None\");\n %(fail)s\n }\n /* First check if we are the base type exactly (the most common case),\n then do the full subclass check if needed. */\n if (py_%(name)s->ob_type != &PyGpuArrayType &&\n !PyObject_TypeCheck(py_%(name)s, &PyGpuArrayType)) {\n PyErr_SetString(PyExc_ValueError, \"expected a GpuArray\");\n %(fail)s\n }\n %(name)s = (PyGpuArrayObject *)py_%(name)s;\n Py_INCREF(%(name)s);\n \"\"\" % {'name': name, 'fail': sub['fail']}\n\n def c_cleanup(self, name, sub):\n return \"Py_XDECREF(%(name)s); %(name)s = NULL;\" % {'name': name}\n\n def c_sync(self, name, sub):\n return \"\"\"\n if (!%(name)s) {\n Py_XDECREF(py_%(name)s);\n Py_INCREF(Py_None);\n py_%(name)s = Py_None;\n } else if ((void *)py_%(name)s != (void *)%(name)s) {\n Py_XDECREF(py_%(name)s);\n py_%(name)s = (PyObject *)%(name)s;\n Py_INCREF(py_%(name)s);\n }\n \"\"\" % {'name': name}\n\n def c_init_code(self):\n # We don't actually need the numpy API except in\n # HostFromGpu and GpuFromHost and those case will be covered\n # by the TensorType parameter\n return ['import_pygpu__gpuarray();']\n\n def c_headers(self):\n # We need arrayobject for the PyArrayDescr struct def\n # (even if we just use a pointer to it in a function def)\n return ['<gpuarray/array.h>', '<gpuarray/kernel.h>',\n '<gpuarray/error.h>', '<gpuarray/buffer.h>',\n '<gpuarray/buffer_blas.h>', '<numpy/arrayobject.h>',\n '<gpuarray_api.h>']\n\n def c_header_dirs(self):\n return [pygpu.get_include(), numpy.get_include()]\n\n def c_libraries(self):\n return ['gpuarray']\n\n def c_code_cache_version(self):\n ver = pygpu.gpuarray.abi_version()\n # we only use the major version since the minor revision are compatible.\n return (2, ver[0])\n\n\nclass _operators(_tensor_py_operators):\n def _as_TensorVariable(self):\n from .basic_ops import host_from_gpu\n return host_from_gpu(self)\n\n def _as_GpuArrayVariable(self, context_name):\n if self.type.context_name == context_name:\n return self\n else:\n from .basic_ops import GpuToGpu\n return GpuToGpu(context_name)(self)\n\n\nclass GpuArrayVariable(_operators, Variable):\n \"\"\"\n A variable representing a computation on a certain GPU.\n\n This supports all the operations that :class:`TensorType`\n supports.\n\n See Also\n --------\n Variable\n\n \"\"\"\n\n # override the default\n def __repr_test_value__(self):\n return repr(numpy.array(theano.gof.op.get_test_value(self)))\n\n\nGpuArrayType.Variable = GpuArrayVariable\n\n\nclass GpuArraySignature(tensor.TensorConstantSignature):\n # might do something better if we can run the sum on the GPU, but\n # for now this will suffice.\n pass\n\n\nclass GpuArrayConstant(_operators, Constant):\n \"\"\"\n A constant representing a value on a certain GPU.\n\n This supports all the operations that :class:`TensorType`\n supports.\n\n See Also\n --------\n Constant\n\n \"\"\"\n def signature(self):\n return GpuArraySignature((self.type, numpy.asarray(self.data)))\n\n def __str__(self):\n if self.name is not None:\n return self.name\n try:\n np_data = numpy.asarray(self.data)\n except gpuarray.GpuArrayException:\n np_data = self.data\n return \"GpuArrayConstant{%s}\" % np_data\n\n\nGpuArrayType.Constant = GpuArrayConstant\n\n\nclass GpuArraySharedVariable(_operators, SharedVariable):\n \"\"\"\n A variable representing a shared value on a certain GPU.\n\n This supports all the operations that :class:`TensorType`\n supports.\n\n See Also\n --------\n SharedVariable\n\n \"\"\"\n def get_value(self, borrow=False, return_internal_type=False):\n if return_internal_type:\n if borrow:\n return self.container.value\n else:\n return self.container.value.copy()\n else:\n return numpy.asarray(self.container.value)\n\n def set_value(self, value, borrow=False):\n if isinstance(value, pygpu.gpuarray.GpuArray):\n value = pygpu.gpuarray.array(value, copy=(not borrow),\n context=self.type.context)\n self.container.value = value\n\n def __getitem__(self, *args):\n return _operators.__getitem__(self, *args)\n\n\nGpuArrayType.SharedVariable = GpuArraySharedVariable\nnotset = object()\n\n\ndef gpuarray_shared_constructor(value, name=None, strict=False,\n allow_downcast=None, borrow=False,\n broadcastable=None, target=notset):\n \"\"\"\n SharedVariable constructor for GpuArrayType.\n\n See :func:`theano.shared`.\n\n :target: default None\n The device target. As None is a valid value and we need to\n differentiate from the parameter notset and None, we use a\n notset object.\n\n \"\"\"\n if target == 'gpu' or target == 'cpu':\n raise TypeError('not for me')\n\n if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)):\n raise TypeError('ndarray or GpuArray required')\n\n if target is notset:\n target = None\n if not move_to_gpu(value):\n raise TypeError('We do not move that data by default to the GPU')\n try:\n get_context(target)\n except ContextNotDefined:\n # Don't make this a hard error if we attempt to make a shared\n # variable while there is no default context.\n if target is None:\n raise TypeError('No default context and no context specified')\n raise\n\n if broadcastable is None:\n broadcastable = (False,) * value.ndim\n type = GpuArrayType(value.dtype, broadcastable, context_name=target)\n deviceval = pygpu.gpuarray.array(value, copy=(not borrow),\n context=type.context)\n return GpuArraySharedVariable(type=type, value=deviceval, name=name,\n strict=strict)\n\ntheano.compile.register_view_op_c_code(GpuArrayType, \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n\"\"\", version=(0,))\n\n# Register GpuArrayType C code for Shape Op.\ntheano.compile.register_shape_c_code(\n GpuArrayType,\n \"\"\"\n npy_intp shape[] = {%(iname)s->ga.nd};\n if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))\n {\n Py_XDECREF(%(oname)s);\n %(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);\n }\n for(int i=0;i<shape[0];i++)\n {\n ((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = %(iname)s->ga.dimensions[i];\n }\n \"\"\",\n version=1)\n\ntheano.compile.register_shape_i_c_code(\n GpuArrayType,\n \"\"\"\n if(!%(oname)s)\n %(oname)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);\n ((npy_int64*)PyArray_DATA(%(oname)s))[0] =\n %(iname)s->ga.dimensions[%(i)s];\n \"\"\",\n \"\"\"\n if (%(i)s>=%(iname)s->ga.nd){\n PyErr_SetString(PyExc_TypeError,\n \"Number of dimensions lower than expected\");\n %(fail)s\n }\n \"\"\",\n version=(1,))\n\ntheano.compile.register_deep_copy_op_c_code(GpuArrayType, \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = pygpu_copy(%(iname)s, GA_ANY_ORDER);\n if (!%(oname)s) { %(fail)s }\n\"\"\", version=(5,))\n\ntheano.compile.register_rebroadcast_c_code(\n GpuArrayType,\n \"\"\"\n if(%(iname)s->ga.dimensions[%(axis)s] != 1){\n PyErr_Format(PyExc_ValueError,\n \"Dimension %(axis)s in Rebroadcast's input was\"\n \" supposed to be 1 (got %%d instead)\",\n %(iname)s->ga.dimensions[%(axis)s]);\n %(fail)s\n }\n \"\"\",\n version=1)\n\ntheano.compile.register_specify_shape_c_code(\n GpuArrayType,\n \"\"\"\n if (PyGpuArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {\n PyErr_Format(PyExc_AssertionError,\n \"SpecifyShape: vector of shape has %%d elements,\"\n \" but the input has %%d dimensions.\",\n PyGpuArray_NDIM(%(iname)s),\n PyArray_DIMS(%(shape)s)[0]);\n %(fail)s;\n }\n for(int i = 0; i < PyGpuArray_NDIM(%(iname)s); i++){\n dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,\n i))[0];\n if (PyGpuArray_DIMS(%(iname)s)[i] != shp) {\n PyErr_Format(PyExc_AssertionError,\n \"SpecifyShape: dim %%d of input has shape %%d,\"\n \" expected %%d.\",\n i, PyGpuArray_DIMS(%(iname)s)[i],\n shp);\n %(fail)s;\n }\n }\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n \"\"\",\n version=1,\n c_support_code_apply='#include <numpy_compat.h>')\n\n\nclass GpuContextType(Type):\n \"\"\"\n Minimal type used for passing contexts to nodes.\n\n This Type is not a complete type and should never be used for\n regular graph operations.\n\n \"\"\"\n def filter(self, data, strict=False, allow_downcast=None):\n if not isinstance(data, gpuarray.GpuContext):\n raise TypeError('context is not a GpuContext')\n return data\n\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __hash__(self):\n return hash(type(self))\n\n @staticmethod\n def values_eq(a, b):\n return a == b\n\n def c_declare(self, name, sub, check_input=True):\n return \"PyGpuContextObject *%s;\" % (name,)\n\n def c_init(self, name, sub):\n return \"%s = NULL;\" % (name,)\n\n def c_extract(self, name, sub, check_input=True):\n if check_input:\n res = \"\"\"\nif (!PyObject_TypeCheck(py_%(name)s, &PyGpuContextType)) {\n PyErr_SetString(PyExc_TypeError, \"expected a GpuContext\");\n %(fail)s\n}\n\"\"\" % dict(name=name, fail=sub['fail'])\n else:\n res = \"\"\n return res + \"\"\"\n%(name)s = (PyGpuContextObject *)py_%(name)s;\nPy_INCREF(%(name)s);\n\"\"\" % dict(name=name)\n\n def c_cleanup(self, name, sub):\n return \"Py_XDECREF(%(name)s); %(name)s = NULL;\" % dict(name=name)\n\n # c_sync is intentionally not declared to prevent normal usage\n\n def c_init_code(self):\n return ['import_pygpu__gpuarray();']\n\n def c_headers(self):\n return ['<gpuarray_api.h>']\n\n def c_header_dirs(self):\n return [pygpu.get_include()]\n\n def c_code_cache_version(self):\n ver = pygpu.gpuarray.api_version()\n return (0, ver[0])\n\n # Variable, Contstant, ... not declared\n\n\"\"\"\nInstance of :class:`GpuContextType` to use for the context_type\ndeclaration of an operation.\n\"\"\"\ngpu_context_type = GpuContextType()\n\n\n# THIS WORKS But GpuArray instances don't compare equal to one\n# another, and what about __hash__ ? So the unpickled version doesn't\n# equal the pickled version, and the cmodule cache is not happy with\n# the situation. The old back-end have this same comment and use the\n# same mechanism.\ndef GpuArray_unpickler(npa, ctx_name):\n if config.experimental.unpickle_gpu_on_cpu:\n # directly return numpy array\n warnings.warn(\n \"config.experimental.unpickle_gpu_on_cpu is set to True. \"\n \"Unpickling GpuArray as numpy.ndarray\")\n return npa\n elif pygpu:\n ctx = get_context(ctx_name)\n return pygpu.gpuarray.array(npa, copy=True, context=ctx)\n else:\n raise ImportError(\"pygpu not found. Cannot unpickle GpuArray\")\n\ncopyreg.constructor(GpuArray_unpickler)\n\n\ndef GpuArray_pickler(cnda):\n ctx_name = _name_for_ctx(cnda.context)\n return (GpuArray_unpickler, (numpy.asarray(cnda), ctx_name))\n\n# In case pygpu is not imported.\nif pygpu is not None:\n copyreg.pickle(pygpu.gpuarray.GpuArray,\n GpuArray_pickler,\n GpuArray_unpickler)\n" ]
[ [ "numpy.isnan", "numpy.asarray", "numpy.prod", "numpy.all", "numpy.get_include", "numpy.dtype" ] ]
SunYH66/adn-master
[ "d69a73e2f9cf2a4472c1d97f7347677b1947543a" ]
[ "adn/utils/log.py" ]
[ "import os\nimport os.path as path\nimport csv\nimport numpy as np\nimport SimpleITK as sitk\nimport yaml\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom collections import defaultdict, OrderedDict\n\n\nclass Logger(object):\n def __init__(self, log_dir, epoch=0, name=\"log\"):\n self.log_dir = log_dir\n self.epoch = epoch\n self.name = name if name != \"\" else \"log\"\n self.iter_visual_freq = float('inf')\n self.loss_freq = float('inf')\n self.save_freq = float('inf')\n self.format_float = \\\n lambda x: np.format_float_scientific(x, exp_digits=1, precision=2)\n\n def _to_dict(self, d):\n # TODO: two dicts pointing to each other triggers an infinite recursion\n if type(d) is defaultdict:\n d = dict(d)\n for k, v in d.items():\n if type(v) is dict or type(v) is defaultdict:\n d[k] = self._to_dict(v)\n return d\n\n def reset(self):\n if hasattr(self, 'loss'): self.loss = defaultdict(list)\n if hasattr(self, 'metrics'): self.metrics = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n def add_loss_log(self, loss_fcn, loss_freq, window_size=100):\n self.loss = defaultdict(list)\n self.loss_fcn = loss_fcn\n self.loss_freq = loss_freq\n self.window_size = window_size\n\n def add_save_log(self, save_fcn, save_freq):\n self.save_fcn = save_fcn\n self.save_freq = save_freq\n \n if hasattr(self.save_fcn, \"__self__\"):\n model = self.save_fcn.__self__\n with open(path.join(self.log_dir, \"graph.txt\"), \"w\") as f:\n f.write(self.get_graph(model))\n\n def add_eval_log(self, eval_fcn, eval_freq):\n self.eval_fcn = eval_fcn\n self.eval_freq = eval_freq\n\n def add_metric_log(self, pair_fcn, metrics_fcns, metrics_freq=1):\n self.pair_fcn = pair_fcn\n self.metrics_cnt = 0\n self.metrics = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n self.metrics_fcns = metrics_fcns\n self.metrics_freq = metrics_freq\n\n def add_iter_visual_log(self, iter_visual_fcn, iter_visual_freq, name=\"\"):\n self.iter_visual_fcn = iter_visual_fcn\n self.iter_visual_freq = iter_visual_freq\n self.iter_visual_name = name\n\n def add_epoch_visual_log(self, epoch_visual_fcn, epoch_visual_freq, name=\"\"):\n self.epoch_visual_fcn = epoch_visual_fcn\n self.epoch_visual_freq = epoch_visual_freq\n self.epoch_visual_name = name\n\n def set_progress(self, progress):\n desc = '[{}][epoch{}]'.format(self.name, self.epoch)\n if hasattr(self, 'loss'):\n if len(self.loss) < 5:\n loss_str = \" \".join([\"{} {:.2e}({:.2e})\".format(\n k, v[-1], np.mean(v)) for k, v in self.loss.items()])\n else:\n loss_str = \" \".join([\"{} {}\".format(\n k, self.format_float(np.mean(v)))\n for k, v in self.loss.items()])\n\n desc += loss_str\n if hasattr(self, 'metrics'):\n res_str = \" \"\n for k, res in self.metrics['mean'].items():\n res_str += \"{}-> \".format(k)\n for j, m in res.items():\n res_str += \"{}: {:.2e} \".format(j, m)\n res_str += \" \"\n desc += res_str\n\n progress.set_description(desc=desc)\n\n def get_graph(self, model):\n model_str = \"\"\n if hasattr(model, 'parameters'):\n model_str += model.__repr__() + \"\\n\"\n else:\n for k in model.__dir__():\n if not k.startswith(\"_\"):\n v = getattr(model, k)\n if hasattr(v, 'parameters'):\n model_str += k + \":\\n\"\n model_str += self.get_graph(v)\n return model_str\n\n def __call__(self, iterable):\n progress = tqdm(iterable, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}')\n for it, obj in enumerate(progress):\n yield obj\n\n if hasattr(self, 'loss_fcn') and it % self.loss_freq == 0:\n loss = self.loss_fcn()\n for k, v in loss.items():\n if len(self.loss[k]) > self.window_size:\n self.loss[k].pop(0)\n self.loss[k].append(v)\n\n log_file = path.join(self.log_dir, 'loss.csv')\n with open(log_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([self.epoch, it] + list(loss.values()))\n\n if hasattr(self, 'iter_visual_fcn') and it % self.iter_visual_freq == 0:\n for k, v in self.iter_visual_fcn().items():\n v = v.detach().cpu().numpy()\n iter_visual_dir = path.join(self.log_dir, self.iter_visual_name)\n if not path.isdir(iter_visual_dir): os.makedirs(iter_visual_dir)\n visual_file = path.join(iter_visual_dir,\n \"epoch{}_iter{:0>3d}_{}.nii.gz\".format(self.epoch, it, k))\n sitk.WriteImage(sitk.GetImageFromArray(v), visual_file)\n # Image.fromarray(v).convert('RGB').save(visual_file)\n\n if hasattr(self, 'pair_fcn') and it % self.metrics_freq == self.metrics_freq - 1:\n pairs, name = self.pair_fcn()\n for i in range(len(pairs[0][1][0])):\n n = len(self.metrics) - ('mean' in self.metrics)\n for j, pair in pairs:\n for k, metrics_fcn in self.metrics_fcns:\n m = metrics_fcn(pair[0][i], pair[1][i]).tolist()\n self.metrics[name[i] if name else n][j][k] = m\n self.metrics['mean'][j][k] = (self.metrics['mean'][j][k] * n + m) / (n + 1)\n\n metric_file = path.join(self.log_dir, \"metrics_{}.yaml\".format(self.epoch))\n metrics_str = yaml.dump(self._to_dict(self.metrics), default_flow_style=False)\n with open(metric_file, 'w') as f: f.write(metrics_str)\n\n self.set_progress(progress)\n\n if hasattr(self, 'save_fcn') and \\\n self.epoch % self.save_freq == self.save_freq - 1: \n save_file = path.join(self.log_dir, \"net_{}.pt\".format(self.epoch))\n print(\"[Epoch {}] Saving {}\".format(self.epoch, save_file))\n self.save_fcn(save_file)\n\n if hasattr(self, 'eval_fcn') and \\\n self.epoch % self.eval_freq == self.eval_freq - 1: \n self.eval_fcn()\n\n if hasattr(self, 'epoch_visual_fcn') and \\\n self.epoch % self.epoch_visual_freq == self.epoch_visual_freq - 1:\n epoch_visual_dir = path.join(self.log_dir, self.epoch_visual_name)\n visual_dir = path.join(epoch_visual_dir, \"epoch{}\".format(self.epoch))\n if not path.isdir(visual_dir): os.makedirs(visual_dir)\n\n print(\"[Epoch {}] Evaluating...\".format(self.epoch))\n for i, visuals in enumerate(self.epoch_visual_fcn()):\n for k, v in visuals.items():\n visual_file = path.join(visual_dir,\n \"epoch{}_{}_{}.png\".format(self.epoch, k, i))\n Image.fromarray(v).convert('RGB').save(visual_file)\n self.epoch += 1" ]
[ [ "numpy.format_float_scientific", "numpy.mean" ] ]
ToughBishop/censor-fix
[ "fada22628470eb3bbde65009cedf777df929b6e8" ]
[ "censorfix/test_censor.py" ]
[ "import numpy as np\nimport pandas as pd\nimport joblib\nfrom censorfix import censorfix\n\n\ndef create_data():\n \"\"\"\n Returns two dataframes a copy of each other\n \"\"\"\n c = 0.5\n n = 3\n cov = c + np.identity(n) * (1 - c)\n size = 100\n full_data = np.random.multivariate_normal(\n [0 for i in range(n)], cov, size=size)\n df = pd.DataFrame(full_data)\n df2 = df.copy()\n return df, df2\n\n\ndef single_dim_test():\n \"\"\"\n Test censorfix in one dimension \n with a gaussian distribution of data\n \"\"\"\n df, df2 = create_data() \n censor_high = 1.5\n censor_low =- 0.5\n df.loc[df[0] > censor_high, 0] = censor_high\n df.loc[df[0] < censor_low, 0] = censor_low\n imp = censorfix.censorImputer(\n debug=False, no_columns=2, sample_posterior=True)\n df = df.sort_values(by=0, ascending=True)\n imp.impute_once(df[0], df[[1, 2]], censor_high, censor_low)\n fig, ax = plt.subplots(1, 1)\n df2.plot(kind='scatter', x=0, y=2, ax=ax, color='pink',label='imputed')\n df.plot(kind='scatter', x=0, y=2, ax=ax,label='true')\n plt.title('single imputation of censored values')\n plt.show()\n return df,df2\n\n\ndef multi_imp_test(plot=True):\n \"\"\"\n Tests the creation of multiple imputations\n plots results or returns dataframe and the imputed data\n with gaussian distribution\n \"\"\"\n df, df2 = create_data()\n # censor the first dataframe\n censor_high_1=0.8\n censor_high_2=1\n censor_low_1=-0.6\n censor_low_2=-2\n df.loc[df[0] > censor_high_1, 0] = censor_high_1\n df.loc[df[0] < censor_low_1, 0] = censor_low_1\n df.loc[df[1] > censor_high_2, 1] = censor_high_2\n df.loc[df[1] < censor_low_2, 1] = censor_low_2\n\n imp = censorfix.censorImputer(\n debug=False, sample_posterior=True,number_imputations=3)\n U = [censor_high_1, censor_high_2, 'NA'] # the upper censor values\n L = [censor_low_1, censor_low_2, 'NA'] # the lower censor values\n\n data_mi = imp.impute(df, U, L, iter_val=2)\n\n if plot:\n fig, ax = plt.subplots(1, 1)\n colours=['red','yellow','green']\n for i,data in enumerate(data_mi):\n data.plot(kind='scatter',x=0,y=1,color=colours[i],label='imputation {}'.format(i),ax=ax)\n df2.plot(kind='scatter',x=0,y=1,color='blue',label='original',ax=ax)\n plt.title('Multiple imputations comparison')\n plt.legend()\n plt.show()\n return df2, data_mi\n\n\ndef multi_dim_test():\n \"\"\"\n Test censorfix for doing multiple imputation of multivariate\n gaussian distribution\n \"\"\"\n df, df2 = create_data() \n\n # censor the first dataframe\n censor_high_1=0.8\n censor_high_2=0.5\n censor_low_1=-0.3\n censor_low_2=-0.7\n df.loc[df[0] > censor_high_1, 0] = censor_high_1\n df.loc[df[0] < censor_low_1, 0] = censor_low_1\n df.loc[df[1] > censor_high_2, 1] = censor_high_2\n df.loc[df[1] < censor_low_2, 1] = censor_low_2\n\n imp = censorfix.censorImputer(\n debug=False, sample_posterior=True)\n U = [censor_high_1, censor_high_2, 'NA'] # the upper censor values\n L = [censor_low_1, censor_low_2, 'NA'] # the lower censor values\n \n fig, ax = plt.subplots(1, 1)\n df.plot(kind='scatter', x=0, y=1, ax=ax, color='yellow', label='censored')\n df = imp.impute(df, U, L, iter_val=2)\n df2.plot(\n kind='scatter',\n x=0,\n y=1,\n ax=ax,\n color='pink',\n label='imputed_values')\n df.plot(kind='scatter', x=0, y=1, ax=ax, label='actual')\n plt.legend()\n plt.title('Multivariate Censor Imputation')\n plt.show()\n return df,df2\n\n" ]
[ [ "numpy.identity", "pandas.DataFrame" ] ]
andyfaff/lmfit-py
[ "088f81d9f22de026b685382903a9f5ac45cbc1df" ]
[ "examples/doc_nistgauss.py" ]
[ "#!/usr/bin/env python\n#<examples/doc_nistgauss.py>\nimport numpy as np\nfrom lmfit.models import GaussianModel, ExponentialModel\nimport sys\nimport matplotlib.pyplot as plt\n\ndat = np.loadtxt('NIST_Gauss2.dat')\nx = dat[:, 1]\ny = dat[:, 0]\n\nexp_mod = ExponentialModel(prefix='exp_')\npars = exp_mod.guess(y, x=x)\n\ngauss1 = GaussianModel(prefix='g1_')\npars.update( gauss1.make_params())\n\npars['g1_center'].set(105, min=75, max=125)\npars['g1_sigma'].set(15, min=3)\npars['g1_amplitude'].set(2000, min=10)\n\ngauss2 = GaussianModel(prefix='g2_')\n\npars.update(gauss2.make_params())\n\npars['g2_center'].set(155, min=125, max=175)\npars['g2_sigma'].set(15, min=3)\npars['g2_amplitude'].set(2000, min=10)\n\nmod = gauss1 + gauss2 + exp_mod\n\n\ninit = mod.eval(pars, x=x)\nplt.plot(x, y)\nplt.plot(x, init, 'k--')\n\nout = mod.fit(y, pars, x=x)\n\ncomps = out.eval_components(x=x)\n\nprint(out.fit_report(min_correl=0.5))\n\nplt.plot(x, out.best_fit, 'r-')\nplt.plot(x, comps['g1_'], 'b--')\nplt.plot(x, comps['g2_'], 'b--')\nplt.plot(x, comps['exp_'], 'k--')\n\nplt.show()\n#<end examples/doc_nistgauss.py>\n" ]
[ [ "matplotlib.pyplot.show", "numpy.loadtxt", "matplotlib.pyplot.plot" ] ]
DuranAdrian/SpendingVisualization
[ "a1a1111e0fb19726f583b07772a57758dde4aacb" ]
[ "pie_chart.py" ]
[ "from math import pi\r\nimport pandas\r\nimport json\r\nfrom bokeh.io import output_file, show\r\nfrom bokeh.palettes import Category20c\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.transform import cumsum\r\n\r\n# TODO: Use Tinker as GUI to view current category json data and generate pie chart\r\n\r\n# If new categories/stores are added to json file, call this method to uppercase all keys and values\r\ndef upperCaseAllValues():\r\n # Open Category JSON file\r\n with open(\"categories.json\") as file:\r\n categories = json.load(file)\r\n file.close()\r\n\r\n for category in categories:\r\n new_value = category.upper()\r\n categories[new_value] = categories.pop(category)\r\n\r\n for index in range(0, len(categories[category]), 1):\r\n categories[category][index] = categories[category][index].upper() \r\n\r\n\r\n with open(\"categories.json\", \"w\") as file:\r\n json.dump(categories, file)\r\n file.close()\r\n\r\ndef findCategory(transaction):\r\n # Read in Categories to data variable \r\n for cat in categories:\r\n for index in range(0, len(categories[cat]), 1):\r\n try:\r\n transaction.upper().index(categories[cat][index])\r\n except ValueError:\r\n continue\r\n else:\r\n return cat\r\n return \"NO RESULTS FOUND\"\r\n\r\n# Set pie chart output file\r\noutput_file(\"pie.html\")\r\n\r\n# Open Category JSON file\r\nwith open(\"categories.json\") as file:\r\n categories = json.load(file)\r\n file.close()\r\n\r\n# TODO: Determine if csv file exist, if not, close program - alert user later\r\n# Show list of available csv files\r\n\r\n# Parse CSV file\r\naccountData = pandas.read_csv(\"BankData.csv\")\r\n\r\nresults = {}\r\nincome = 0.0\r\n\r\n# Assume successful file\r\nfor descrip, amount in zip(accountData[\"Description\"], accountData[\"Amount\"]):\r\n found_cat = findCategory(descrip)\r\n \r\n if found_cat != \"NO RESULTS FOUND\":\r\n if found_cat == \"INCOME\":\r\n income = income + amount\r\n elif found_cat in results:\r\n results[found_cat] = float(\"{:.2f}\".format(results[found_cat] + amount))\r\n else:\r\n results[found_cat] = amount\r\n else:\r\n # TODO: Let user know an manually insert to correct section\r\n # Temporary fix for missed income/deposits\r\n if \"MONEY TRANSFER\" in descrip and \"FROM\" in descrip:\r\n income = income + amount \r\n\r\n# Create Pie Chart from Data\r\ndata = pandas.Series(results).reset_index(name='value').rename(columns={'index':'category'})\r\ndata['angle'] = data['value']/data['value'].sum() * 2*pi\r\ndata['color'] = Category20c[len(results)]\r\n\r\nfig = figure(plot_height=350, sizing_mode='scale_both',title=\"Income %s\" % income, toolbar_location=None, tools=\"hover\", tooltips=\"@category: @value\", x_range=(-0.5, 1.0))\r\nfig.wedge(x=0, y=1, radius=0.35, start_angle=cumsum(\"angle\", include_zero=True), end_angle=cumsum(\"angle\"), line_color=\"white\", fill_color=\"color\", legend_field=\"category\", source=data)\r\n\r\nfig.axis.axis_label=None\r\nfig.axis.visible=False\r\nfig.grid.grid_line_color = None\r\n\r\nshow(fig)\r\n" ]
[ [ "pandas.read_csv", "pandas.Series" ] ]
alpha358/keras-retinanet
[ "a6ba70cf07c2b5e84338b39833bab75cde526dc6" ]
[ "keras_retinanet/bin/train.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nimport keras\nimport keras.preprocessing.image\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import layers # noqa: F401\nfrom .. import losses\nfrom .. import models\nfrom ..callbacks import RedirectModel\nfrom ..callbacks.eval import Evaluate\nfrom ..models.retinanet import retinanet_bbox\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..preprocessing.kitti import KittiGenerator\nfrom ..preprocessing.open_images import OpenImagesGenerator\nfrom ..preprocessing.pascal_voc import PascalVocGenerator\nfrom ..utils.anchors import make_shapes_callback\nfrom ..utils.config import read_config_file, parse_anchor_parameters\nfrom ..utils.keras_version import check_keras_version\nfrom ..utils.model import freeze as freeze_model\nfrom ..utils.transform import random_transform_generator\nfrom ..utils.anchors import AnchorParameters\n\n\ndef makedirs(path):\n # Intended behavior: try to create the directory,\n # pass if the directory exists already, fails otherwise.\n # Meant for Python 2.7/3.n compatibility.\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef get_session():\n \"\"\" Construct a modified tf session.\n \"\"\"\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n\ndef model_with_weights(model, weights, skip_mismatch):\n \"\"\" Load weights for model.\n\n Args\n model : The model to load weights for.\n weights : The weights to load.\n skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.\n \"\"\"\n if weights is not None:\n model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)\n return model\n\n\ndef create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,\n freeze_backbone=False, lr=1e-5,\n anchor_params = None,\n config=None, alpha=0.25, gamma=2.0, focal_weight = 1,\n clip_FL = False\n ):\n \"\"\" Creates three models (model, training_model, prediction_model).\n\n Args\n backbone_retinanet : A function to call to create a retinanet model with a given backbone.\n num_classes : The number of classes to train.\n weights : The weights to load into the model.\n multi_gpu : The number of GPUs to use for training.\n freeze_backbone : If True, disables learning for the backbone.\n config : Config parameters, None indicates the default configuration.\n\n Returns\n model : The base model. This is also the model that is saved in snapshots.\n training_model : The training model. If multi_gpu=0, this is identical to model.\n prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).\n \"\"\"\n\n modifier = freeze_model if freeze_backbone else None\n\n # load anchor parameters, or pass None (so that defaults will be used)\n\n if anchor_params == None:\n if config and 'anchor_parameters' in config:\n anchor_params = parse_anchor_parameters(config)\n else:\n anchor_params = AnchorParameters.default\n\n num_anchors = anchor_params.num_anchors()\n\n # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.\n # optionally wrap in a parallel model\n if multi_gpu > 1:\n from keras.utils import multi_gpu_model\n with tf.device('/cpu:0'):\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)\n training_model = multi_gpu_model(model, gpus=multi_gpu)\n else:\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)\n training_model = model\n\n # make prediction model\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n\n # compile model\n training_model.compile(\n loss={\n 'regression' : losses.smooth_l1(),\n 'classification': losses.focal(\n alpha=alpha, gamma=gamma,\n clip_FL=clip_FL,\n const_multiplier=focal_weight)\n },\n optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)\n )\n\n return model, training_model, prediction_model\n\n\ndef create_callbacks(model, training_model, prediction_model, validation_generator, args):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir = args.tensorboard_dir,\n histogram_freq = 0,\n batch_size = args.batch_size,\n write_graph = True,\n write_grads = False,\n write_images = False,\n embeddings_freq = 0,\n embeddings_layer_names = None,\n embeddings_metadata = None\n )\n callbacks.append(tensorboard_callback)\n\n if args.evaluation and validation_generator:\n if args.dataset_type == 'coco':\n from ..callbacks.coco import CocoEval\n\n # use prediction model for evaluation\n evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)\n else:\n evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(keras.callbacks.ReduceLROnPlateau(\n monitor = 'loss',\n factor = 0.1,\n patience = 2,\n verbose = 1,\n mode = 'auto',\n min_delta = 0.0001,\n cooldown = 0,\n min_lr = 0\n ))\n\n return callbacks\n\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n 'batch_size' : args.batch_size,\n 'config' : args.config,\n 'image_min_side' : args.image_min_side,\n 'image_max_side' : args.image_max_side,\n 'preprocess_image' : preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n 'train2017',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'trainval',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'test',\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator\n\n\ndef check_args(parsed_args):\n \"\"\" Function to check for inherent contradictions within parsed arguments.\n For example, batch_size < num_gpus\n Intended to raise errors prior to backend initialisation.\n\n Args\n parsed_args: parser.parse_args()\n\n Returns\n parsed_args\n \"\"\"\n\n if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:\n raise ValueError(\n \"Batch size ({}) must be equal to or higher than the number of GPUs ({})\".format(parsed_args.batch_size,\n parsed_args.multi_gpu))\n\n if parsed_args.multi_gpu > 1 and parsed_args.snapshot:\n raise ValueError(\n \"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.\".format(parsed_args.multi_gpu,\n parsed_args.snapshot))\n\n if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:\n raise ValueError(\"Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.\")\n\n if 'resnet' not in parsed_args.backbone:\n warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))\n\n return parsed_args\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n\n pascal_parser = subparsers.add_parser('pascal')\n pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')\n\n kitti_parser = subparsers.add_parser('kitti')\n kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')\n\n def csv_list(string):\n return string.split(',')\n\n oid_parser = subparsers.add_parser('oid')\n oid_parser.add_argument('main_dir', help='Path to dataset directory.')\n oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')\n oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)\n oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')\n oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--snapshot', help='Resume training from a snapshot.')\n group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)\n group.add_argument('--weights', help='Initialize the model with weights from a file.')\n group.add_argument('--no-weights', help='Don\\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)\n\n parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)\n parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)\n parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')\n parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)\n parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')\n parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)\n parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)\n parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)\n parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \\'./snapshots\\')', default='./snapshots')\n parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')\n parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')\n parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')\n parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')\n parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')\n parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n parser.add_argument('--config', help='Path to a configuration parameters .ini file.')\n parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')\n\n # Fit generator arguments\n parser.add_argument('--workers', help='Number of multiprocessing workers. To disable multiprocessing, set workers to 0', type=int, default=1)\n parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit generator.', type=int, default=10)\n\n return check_args(parser.parse_args(args))\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create object that stores backbone information\n backbone = models.backbone(args.backbone)\n\n # make sure keras is the minimum required version\n check_keras_version()\n\n # optionally choose specific GPU\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n keras.backend.tensorflow_backend.set_session(get_session())\n\n # optionally load config parameters\n if args.config:\n args.config = read_config_file(args.config)\n\n # create the generators\n train_generator, validation_generator = create_generators(args, backbone.preprocess_image)\n\n # create the model\n if args.snapshot is not None:\n print('Loading model, this may take a second...')\n model = models.load_model(args.snapshot, backbone_name=args.backbone)\n training_model = model\n anchor_params = None\n if args.config and 'anchor_parameters' in args.config:\n anchor_params = parse_anchor_parameters(args.config)\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n else:\n weights = args.weights\n # default to imagenet if nothing else is specified\n if weights is None and args.imagenet_weights:\n weights = backbone.download_imagenet()\n\n print('Creating model, this may take a second...')\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone.retinanet,\n num_classes=train_generator.num_classes(),\n weights=weights,\n multi_gpu=args.multi_gpu,\n freeze_backbone=args.freeze_backbone,\n lr=args.lr,\n config=args.config\n )\n\n # print model summary\n print(model.summary())\n\n # this lets the generator compute backbone layer shapes using the actual backbone model\n if 'vgg' in args.backbone or 'densenet' in args.backbone:\n train_generator.compute_shapes = make_shapes_callback(model)\n if validation_generator:\n validation_generator.compute_shapes = train_generator.compute_shapes\n\n # create the callbacks\n callbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n validation_generator,\n args,\n )\n\n # Use multiprocessing if workers > 0\n if args.workers > 0:\n use_multiprocessing = True\n else:\n use_multiprocessing = False\n\n # start training\n training_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks,\n workers=args.workers,\n use_multiprocessing=use_multiprocessing,\n max_queue_size=args.max_queue_size\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.device" ] ]
ChrisKeefe/q2-diversity
[ "97e9cc466244edf94425de0e9f3d93c00d4dcac2" ]
[ "q2_diversity/tests/test_beta.py" ]
[ "# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2020, QIIME 2 development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport unittest\nimport io\nimport os\nimport tempfile\nimport glob\nimport collections\n\nimport skbio\nimport numpy as np\nimport numpy.testing as npt\nfrom biom.table import Table\nimport pandas as pd\nimport qiime2\nfrom qiime2.plugin.testing import TestPluginBase\n\n\nfrom qiime2 import Artifact\nfrom q2_diversity import (bioenv, beta_group_significance, mantel)\nfrom q2_diversity._beta._visualizer import _get_distance_boxplot_data\n\n\nclass BetaDiversityTests(TestPluginBase):\n # Note that some of these tests replicate the cases in biocore/unifrac\n package = 'q2_diversity.tests'\n\n def setUp(self):\n super().setUp()\n self.beta = self.plugin.pipelines['beta']\n self.beta_phylogenetic = self.plugin.pipelines['beta_phylogenetic']\n\n two_feature_table = self.get_data_path('two_feature_table.biom')\n self.two_feature_table = Artifact.import_data(\n 'FeatureTable[Frequency]',\n two_feature_table)\n\n three_feature_tree = self.get_data_path('three_feature.tree')\n self.three_feature_tree = Artifact.import_data('Phylogeny[Rooted]',\n three_feature_tree)\n\n crawford_table = self.get_data_path('crawford.biom')\n self.crawford_table = Artifact.import_data('FeatureTable[Frequency]',\n crawford_table)\n crawford_tree = self.get_data_path('crawford.nwk')\n self.crawford_tree = Artifact.import_data('Phylogeny[Rooted]',\n crawford_tree)\n\n t = Table(np.array([[0, 1, 3], [1, 1, 2]]),\n ['O1', 'O2'],\n ['S1', 'S2', 'S3'])\n self.t = Artifact.import_data('FeatureTable[Frequency]', t)\n tree = skbio.TreeNode.read(io.StringIO(\n '((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))\n self.tree = Artifact.import_data('Phylogeny[Rooted]', tree)\n\n def test_beta(self):\n actual = self.beta(table=self.t, metric='braycurtis')\n actual = actual[0].view(skbio.DistanceMatrix)\n # expected computed with scipy.spatial.distance.braycurtis\n expected = skbio.DistanceMatrix([[0.0000000, 0.3333333, 0.6666667],\n [0.3333333, 0.0000000, 0.4285714],\n [0.6666667, 0.4285714, 0.0000000]],\n ids=['S1', 'S2', 'S3'])\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_parallel_beta(self):\n parallel = self.beta(table=self.t, metric='braycurtis', n_jobs='auto')\n parallel = parallel[0].view(skbio.DistanceMatrix)\n single_thread = self.beta(table=self.t, metric='braycurtis', n_jobs=1)\n single_thread = single_thread[0].view(skbio.DistanceMatrix)\n # expected computed with scipy.spatial.distance.braycurtis\n expected = skbio.DistanceMatrix([[0.0000000, 0.3333333, 0.6666667],\n [0.3333333, 0.0000000, 0.4285714],\n [0.6666667, 0.4285714, 0.0000000]],\n ids=['S1', 'S2', 'S3'])\n\n self.assertEqual(parallel.ids, expected.ids)\n self.assertEqual(single_thread.ids, expected.ids)\n for id1 in parallel.ids:\n for id2 in parallel.ids:\n npt.assert_almost_equal(parallel[id1, id2], expected[id1, id2])\n for id1 in single_thread.ids:\n for id2 in single_thread.ids:\n npt.assert_almost_equal(single_thread[id1, id2],\n expected[id1, id2])\n\n def test_beta_phylo_metric(self):\n with self.assertRaisesRegex(TypeError,\n 'received \\'unweighted_unifrac\\''):\n self.beta(table=self.t, metric='unweighted_unifrac')\n\n def test_beta_unknown_metric(self):\n with self.assertRaisesRegex(TypeError,\n 'received \\'not-a-metric\\''):\n self.beta(table=self.t, metric='not-a-metric')\n\n def test_beta_empty_table(self):\n t = Table(np.array([]), [], [])\n t = Artifact.import_data('FeatureTable[Frequency]', t)\n with self.assertRaisesRegex(ValueError, 'empty'):\n self.beta(table=t, metric='braycurtis')\n\n def test_beta_phylogenetic(self):\n t = self.two_feature_table\n tree = self.three_feature_tree\n actual = self.beta_phylogenetic(\n table=t, phylogeny=tree, metric='unweighted_unifrac')\n\n self.assertEqual(len(actual), 1)\n\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n\n # expected computed with skbio.diversity.beta_diversity\n expected = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['S1', 'S2', 'S3'])\n\n actual = actual[0].view(skbio.DistanceMatrix)\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_beta_phylogenetic_non_phylo_metric(self):\n t = Table(np.array([[0, 1, 3], [1, 1, 2]]),\n ['O1', 'O2'],\n ['S1', 'S2', 'S3'])\n t = Artifact.import_data('FeatureTable[Frequency]', t)\n tree = skbio.TreeNode.read(io.StringIO(\n '((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))\n tree = Artifact.import_data('Phylogeny[Rooted]', tree)\n with self.assertRaisesRegex(TypeError, 'received \\'braycurtis'):\n self.beta_phylogenetic(table=t, phylogeny=tree,\n metric='braycurtis')\n\n def test_beta_phylogenetic_unknown_metric(self):\n t = Table(np.array([[0, 1, 3], [1, 1, 2]]),\n ['O1', 'O2'],\n ['S1', 'S2', 'S3'])\n t = Artifact.import_data('FeatureTable[Frequency]', t)\n tree = skbio.TreeNode.read(io.StringIO(\n '((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))\n tree = Artifact.import_data('Phylogeny[Rooted]', tree)\n with self.assertRaisesRegex(TypeError, 'received \\'not-a-metric\\''):\n self.beta_phylogenetic(table=t, phylogeny=tree,\n metric='not-a-metric')\n\n def test_beta_phylogenetic_empty_table(self):\n t = self.get_data_path('empty.biom')\n t = Artifact.import_data('FeatureTable[Frequency]', t)\n tree = self.get_data_path('three_feature.tree')\n tree = Artifact.import_data('Phylogeny[Rooted]', tree)\n\n with self.assertRaisesRegex(ValueError, 'empty'):\n self.beta_phylogenetic(table=t, phylogeny=tree,\n metric='unweighted_unifrac')\n\n def test_beta_unweighted(self):\n actual = self.beta_phylogenetic(table=self.crawford_table,\n phylogeny=self.crawford_tree,\n metric='unweighted_unifrac')\n\n # computed with beta-phylogenetic\n data = np.array([0.71836067, 0.71317361, 0.69746044, 0.62587207,\n 0.72826674, 0.72065895, 0.72640581, 0.73606053,\n 0.70302967, 0.73407301, 0.6548042, 0.71547381,\n 0.78397813, 0.72318399, 0.76138933, 0.61041275,\n 0.62331299, 0.71848305, 0.70416337, 0.75258475,\n 0.79249029, 0.64392779, 0.70052733, 0.69832716,\n 0.77818938, 0.72959894, 0.75782689, 0.71005144,\n 0.75065046, 0.78944369, 0.63593642, 0.71283615,\n 0.58314638, 0.69200762, 0.68972056, 0.71514083])\n ids = ('10084.PC.481', '10084.PC.593', '10084.PC.356', '10084.PC.355',\n '10084.PC.354', '10084.PC.636', '10084.PC.635', '10084.PC.607',\n '10084.PC.634')\n expected = skbio.DistanceMatrix(data, ids=ids)\n\n self.assertEqual(len(actual), 1)\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n actual = actual[0].view(skbio.DistanceMatrix)\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_beta_unweighted_parallel(self):\n bt_fp = self.get_data_path('crawford.biom')\n bt = Artifact.import_data('FeatureTable[Frequency]', bt_fp)\n tree_fp = self.get_data_path('crawford.nwk')\n tree = Artifact.import_data('Phylogeny[Rooted]', tree_fp)\n\n actual = self.beta_phylogenetic(table=bt,\n phylogeny=tree,\n metric='unweighted_unifrac',\n threads=2)\n\n # computed with beta-phylogenetic\n data = np.array([0.71836067, 0.71317361, 0.69746044, 0.62587207,\n 0.72826674, 0.72065895, 0.72640581, 0.73606053,\n 0.70302967, 0.73407301, 0.6548042, 0.71547381,\n 0.78397813, 0.72318399, 0.76138933, 0.61041275,\n 0.62331299, 0.71848305, 0.70416337, 0.75258475,\n 0.79249029, 0.64392779, 0.70052733, 0.69832716,\n 0.77818938, 0.72959894, 0.75782689, 0.71005144,\n 0.75065046, 0.78944369, 0.63593642, 0.71283615,\n 0.58314638, 0.69200762, 0.68972056, 0.71514083])\n ids = ('10084.PC.481', '10084.PC.593', '10084.PC.356', '10084.PC.355',\n '10084.PC.354', '10084.PC.636', '10084.PC.635', '10084.PC.607',\n '10084.PC.634')\n expected = skbio.DistanceMatrix(data, ids=ids)\n\n self.assertEqual(len(actual), 1)\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n actual = actual[0].view(skbio.DistanceMatrix)\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_beta_weighted(self):\n actual = self.beta_phylogenetic(table=self.crawford_table,\n phylogeny=self.crawford_tree,\n metric='weighted_unifrac')\n\n # computed with beta-phylogenetic (weighted_unifrac)\n data = np.array([0.44656238, 0.23771096, 0.30489123, 0.23446002,\n 0.65723575, 0.44911772, 0.381904, 0.69144829,\n 0.39611776, 0.36568012, 0.53377975, 0.48908025,\n 0.35155196, 0.28318669, 0.57376916, 0.23395746,\n 0.24658122, 0.60271637, 0.39802552, 0.36567394,\n 0.68062701, 0.36862049, 0.48350632, 0.33024631,\n 0.33266697, 0.53464744, 0.74605075, 0.53951035,\n 0.49680733, 0.79178838, 0.37109012, 0.52629343,\n 0.22118218, 0.32400805, 0.43189708, 0.59705893])\n ids = ('10084.PC.481', '10084.PC.593', '10084.PC.356', '10084.PC.355',\n '10084.PC.354', '10084.PC.636', '10084.PC.635', '10084.PC.607',\n '10084.PC.634')\n expected = skbio.DistanceMatrix(data, ids=ids)\n\n self.assertEqual(len(actual), 1)\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n actual = actual[0].view(skbio.DistanceMatrix)\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_variance_adjusted_normalized(self):\n bt_fp = self.get_data_path('vaw.biom')\n bt = Artifact.import_data('FeatureTable[Frequency]', bt_fp)\n tree_fp = self.get_data_path('vaw.nwk')\n tree = Artifact.import_data('Phylogeny[Rooted]', tree_fp)\n\n actual = self.beta_phylogenetic(table=bt,\n phylogeny=tree,\n metric='weighted_normalized_unifrac',\n variance_adjusted=True)\n\n data = np.array([[0.0000000, 0.4086040, 0.6240185, 0.4639481,\n 0.2857143, 0.2766318],\n [0.4086040, 0.0000000, 0.3798594, 0.6884992,\n 0.6807616, 0.4735781],\n [0.6240185, 0.3798594, 0.0000000, 0.7713254,\n 0.8812897, 0.5047114],\n [0.4639481, 0.6884992, 0.7713254, 0.0000000,\n 0.6666667, 0.2709298],\n [0.2857143, 0.6807616, 0.8812897, 0.6666667,\n 0.0000000, 0.4735991],\n [0.2766318, 0.4735781, 0.5047114, 0.2709298,\n 0.4735991, 0.0000000]])\n ids = ('Sample1', 'Sample2', 'Sample3', 'Sample4', 'Sample5',\n 'Sample6')\n expected = skbio.DistanceMatrix(data, ids=ids)\n\n self.assertEqual(len(actual), 1)\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n actual = actual[0].view(skbio.DistanceMatrix)\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_generalized_unifrac(self):\n bt_fp = self.get_data_path('vaw.biom')\n bt = Artifact.import_data('FeatureTable[Frequency]', bt_fp)\n tree_fp = self.get_data_path('vaw.nwk')\n tree = Artifact.import_data('Phylogeny[Rooted]', tree_fp)\n\n actual = self.beta_phylogenetic(table=bt,\n phylogeny=tree,\n metric='generalized_unifrac',\n alpha=0.5)\n\n data = np.array([[0.0000000, 0.4040518, 0.6285560, 0.5869439,\n 0.4082483, 0.2995673],\n [0.4040518, 0.0000000, 0.4160597, 0.7071068,\n 0.7302479, 0.4860856],\n [0.6285560, 0.4160597, 0.0000000, 0.8005220,\n 0.9073159, 0.5218198],\n [0.5869439, 0.7071068, 0.8005220, 0.0000000,\n 0.4117216, 0.3485667],\n [0.4082483, 0.7302479, 0.9073159, 0.4117216,\n 0.0000000, 0.6188282],\n [0.2995673, 0.4860856, 0.5218198, 0.3485667,\n 0.6188282, 0.0000000]])\n ids = ('Sample1', 'Sample2', 'Sample3', 'Sample4', 'Sample5',\n 'Sample6')\n expected = skbio.DistanceMatrix(data, ids=ids)\n\n self.assertEqual(len(actual), 1)\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n actual = actual[0].view(skbio.DistanceMatrix)\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_generalized_unifrac_no_alpha(self):\n actual = self.beta_phylogenetic(table=self.crawford_table,\n phylogeny=self.crawford_tree,\n metric='generalized_unifrac',\n alpha=None)\n\n # alpha=1 should be equal to weighted normalized UniFrac\n data = np.array([0.2821874, 0.16148405, 0.20186143, 0.1634832,\n 0.40351108, 0.29135056, 0.24790944, 0.41967404,\n 0.24642185, 0.22218489, 0.34007547, 0.27722011,\n 0.20963881, 0.16897221, 0.3217958, 0.15237816,\n 0.16899207, 0.36445044, 0.25408941, 0.23358681,\n 0.4069374, 0.24615927, 0.28573888, 0.20578184,\n 0.20742006, 0.31249151, 0.46169893, 0.35294595,\n 0.32522355, 0.48437103, 0.21534558, 0.30558908,\n 0.12091004, 0.19817777, 0.24792853, 0.34293674])\n ids = ('10084.PC.481', '10084.PC.593', '10084.PC.356', '10084.PC.355',\n '10084.PC.354', '10084.PC.636', '10084.PC.635', '10084.PC.607',\n '10084.PC.634')\n expected = skbio.DistanceMatrix(data, ids=ids)\n\n self.assertEqual(len(actual), 1)\n self.assertEqual(repr(actual.distance_matrix.type), 'DistanceMatrix')\n actual = actual[0].view(skbio.DistanceMatrix)\n\n self.assertEqual(actual.ids, expected.ids)\n for id1 in actual.ids:\n for id2 in actual.ids:\n npt.assert_almost_equal(actual[id1, id2], expected[id1, id2])\n\n def test_not_generalized_passed_alpha(self):\n with self.assertRaisesRegex(ValueError,\n \"alpha.*only allowed.*when.*generalized\"):\n self.beta_phylogenetic(table=self.crawford_table,\n phylogeny=self.crawford_tree,\n metric='unweighted_unifrac',\n alpha=0.5)\n\n def test_beta_phylogenetic_too_many_jobs(self):\n with self.assertRaises(ValueError):\n # cannot guarantee that this will always be true, but it would be\n # odd to see a machine with these many CPUs\n self.beta_phylogenetic(table=self.crawford_table,\n phylogeny=self.crawford_tree,\n metric='unweighted_unifrac', threads=11117)\n\n\nclass BioenvTests(TestPluginBase):\n package = 'q2_diversity.tests'\n\n def test_bioenv(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.Metadata(\n pd.DataFrame(\n [[1.0, 'a'], [2.0, 'b'], [3.0, 'c']],\n index=pd.Index(['sample1', 'sample2', 'sample3'], name='id'),\n columns=['metadata1', 'metadata2']))\n with tempfile.TemporaryDirectory() as output_dir:\n bioenv(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n self.assertTrue('metadata1' in open(index_fp).read())\n\n self.assertTrue('not numeric:' in open(index_fp).read())\n self.assertTrue('<strong>metadata2' in open(index_fp).read())\n\n self.assertFalse('Warning' in open(index_fp).read())\n\n def test_bioenv_exclude_missing_data(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.Metadata(\n pd.DataFrame(\n [[1.0, 2.0], [2.0, np.nan], [3.0, 42.0]],\n index=pd.Index(['sample1', 'sample2', 'sample3'], name='id'),\n columns=['metadata1', 'metadata2']))\n with tempfile.TemporaryDirectory() as output_dir:\n bioenv(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n self.assertTrue('metadata1' in open(index_fp).read())\n self.assertTrue('metadata2' in open(index_fp).read())\n self.assertTrue('Warning' in open(index_fp).read())\n self.assertTrue('contained 3 samples' in open(index_fp).read())\n self.assertTrue('2 samples' in open(index_fp).read())\n\n def test_bioenv_extra_metadata(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.Metadata(\n pd.DataFrame(\n [[1.0, 'a'], [2.0, 'b'], [3.0, 'c'], [4.0, 'd']],\n index=pd.Index(['sample1', 'sample2', 'sample3', 'sample4'],\n name='id'),\n columns=['metadata1', 'metadata2']))\n with tempfile.TemporaryDirectory() as output_dir:\n bioenv(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n self.assertTrue('metadata1' in open(index_fp).read())\n\n self.assertTrue('not numeric:' in open(index_fp).read())\n self.assertTrue('<strong>metadata2' in open(index_fp).read())\n\n self.assertFalse('Warning' in open(index_fp).read())\n\n def test_bioenv_zero_variance_column(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.Metadata(\n pd.DataFrame(\n [[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n index=pd.Index(['sample1', 'sample2', 'sample3'], name='id'),\n columns=['metadata1', 'metadata2']))\n with tempfile.TemporaryDirectory() as output_dir:\n bioenv(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue('metadata1' in open(index_fp).read())\n\n self.assertTrue('no variance' in open(index_fp).read())\n self.assertTrue('<strong>metadata2' in open(index_fp).read())\n\n self.assertFalse('Warning' in open(index_fp).read())\n\n\nclass BetaGroupSignificanceTests(unittest.TestCase):\n\n def test_permanova(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3'],\n name='id')))\n\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n # all expected boxplots are generated\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.png')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.png')))\n # no extra boxplots are generated\n self.assertEqual(len(glob.glob('%s/*-boxplots.pdf' % output_dir)),\n 2)\n self.assertEqual(len(glob.glob('%s/*-boxplots.png' % output_dir)),\n 2)\n self.assertTrue('PERMANOVA results' in open(index_fp).read())\n self.assertFalse('Warning' in open(index_fp).read())\n self.assertFalse('Pairwise permanova' in open(index_fp).read())\n\n def test_anosim(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3'],\n name='id')))\n\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md, method='anosim',\n permutations=42)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n # all expected boxplots are generated\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.png')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.png')))\n # no extra boxplots are generated\n self.assertEqual(len(glob.glob('%s/*-boxplots.pdf' % output_dir)),\n 2)\n self.assertEqual(len(glob.glob('%s/*-boxplots.png' % output_dir)),\n 2)\n self.assertTrue('ANOSIM results' in open(index_fp).read())\n self.assertTrue('<td>42</td>' in open(index_fp).read())\n self.assertFalse('Warning' in open(index_fp).read())\n self.assertFalse('Pairwise anosim' in open(index_fp).read())\n\n def test_permanova_pairwise(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3'],\n name='id')))\n\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md, pairwise=True)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n # all expected boxplots are generated\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.png')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.png')))\n # no extra boxplots are generated\n self.assertEqual(len(glob.glob('%s/*-boxplots.pdf' % output_dir)),\n 2)\n self.assertEqual(len(glob.glob('%s/*-boxplots.png' % output_dir)),\n 2)\n self.assertTrue('PERMANOVA results' in open(index_fp).read())\n self.assertTrue('Pairwise permanova' in open(index_fp).read())\n self.assertFalse('Warning' in open(index_fp).read())\n\n def test_anosim_pairwise(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3'],\n name='id')))\n\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md, method='anosim',\n permutations=42, pairwise=True)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n # all expected boxplots are generated\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'a-boxplots.png')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.pdf')))\n self.assertTrue(os.path.exists(\n os.path.join(output_dir, 'b-boxplots.png')))\n # no extra boxplots are generated\n self.assertEqual(len(glob.glob('%s/*-boxplots.pdf' % output_dir)),\n 2)\n self.assertEqual(len(glob.glob('%s/*-boxplots.png' % output_dir)),\n 2)\n self.assertTrue('ANOSIM results' in open(index_fp).read())\n self.assertTrue('<td>42</td>' in open(index_fp).read())\n self.assertFalse('Warning' in open(index_fp).read())\n self.assertTrue('Pairwise anosim' in open(index_fp).read())\n\n def test_alt_permutations(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3'],\n name='id')))\n\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md, permutations=42)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue('<td>42</td>' in open(index_fp).read())\n\n def test_invalid_method(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3'],\n name='id')))\n\n with self.assertRaises(ValueError):\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md, method='bad!')\n\n def test_filtered_samples_numeric_metadata(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25, 0.66],\n [0.25, 0.00, 0.00, 0.66],\n [0.25, 0.00, 0.00, 0.66],\n [0.66, 0.66, 0.66, 0.00]],\n ids=['sample1', 'sample2', 'sample3',\n 'sample4'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['1.0', '2.0', '2.0', np.nan], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3',\n 'sample4'], name='id')))\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue('Warning' in open(index_fp).read())\n\n def test_filtered_samples_str_metadata(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25, 0.66],\n [0.25, 0.00, 0.00, 0.66],\n [0.25, 0.00, 0.00, 0.66],\n [0.66, 0.66, 0.66, 0.00]],\n ids=['sample1', 'sample2', 'sample3',\n 'sample4'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b', np.nan], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3',\n 'sample4'], name='id')))\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue('Warning' in open(index_fp).read())\n\n def test_extra_metadata(self):\n dm = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n md = qiime2.CategoricalMetadataColumn(\n pd.Series(['a', 'b', 'b', 'c'], name='a or b',\n index=pd.Index(['sample1', 'sample2', 'sample3',\n 'sample4'], name='id')))\n\n with tempfile.TemporaryDirectory() as output_dir:\n beta_group_significance(output_dir, dm, md, permutations=42)\n index_fp = os.path.join(output_dir, 'index.html')\n self.assertTrue('<td>2</td>' in open(index_fp).read())\n\n def test_get_distance_boxplot_data_two_groups(self):\n dm = skbio.DistanceMatrix([[0.00, 0.12, 0.13, 0.14, 0.15],\n [0.12, 0.00, 0.22, 0.23, 0.24],\n [0.13, 0.22, 0.00, 0.31, 0.32],\n [0.14, 0.23, 0.31, 0.00, 0.44],\n [0.15, 0.24, 0.32, 0.44, 0.00]],\n ids=['s1', 's2', 's3', 's4', 's5'])\n\n groupings = collections.OrderedDict(\n [('g1', ['s1', 's2']), ('g2', ['s3', 's4', 's5'])])\n obs = _get_distance_boxplot_data(dm, 'g1', groupings)\n exp_data = [[0.12], [0.13, 0.14, 0.15, 0.22, 0.23, 0.24]]\n exp_labels = ['g1 (n=1)', 'g2 (n=6)']\n self.assertEqual(obs[0], exp_data)\n self.assertEqual(obs[1], exp_labels)\n\n def test_get_distance_boxplot_data_within_always_first(self):\n dm = skbio.DistanceMatrix([[0.00, 0.12, 0.13, 0.14, 0.15],\n [0.12, 0.00, 0.22, 0.23, 0.24],\n [0.13, 0.22, 0.00, 0.31, 0.32],\n [0.14, 0.23, 0.31, 0.00, 0.44],\n [0.15, 0.24, 0.32, 0.44, 0.00]],\n ids=['s1', 's2', 's3', 's4', 's5'])\n\n groupings = collections.OrderedDict(\n [('g2', ['s3', 's4', 's5']), ('g1', ['s1', 's2'])])\n obs = _get_distance_boxplot_data(dm, 'g1', groupings)\n exp_data = [[0.12], [0.13, 0.14, 0.15, 0.22, 0.23, 0.24]]\n exp_labels = ['g1 (n=1)', 'g2 (n=6)']\n exp_summary = [('s2', 's1', 'g1', 'g1', 0.12),\n ('s1', 's3', 'g1', 'g2', 0.13),\n ('s1', 's4', 'g1', 'g2', 0.14000000000000001),\n ('s1', 's5', 'g1', 'g2', 0.14999999999999999),\n ('s2', 's3', 'g1', 'g2', 0.22),\n ('s2', 's4', 'g1', 'g2', 0.23000000000000001),\n ('s2', 's5', 'g1', 'g2', 0.23999999999999999)]\n self.assertEqual(obs[0], exp_data)\n self.assertEqual(obs[1], exp_labels)\n self.assertEqual(obs[2], exp_summary)\n\n def test_get_distance_boxplot_data_three_groups(self):\n dm = skbio.DistanceMatrix([[0.00, 0.12, 0.13, 0.14, 0.15],\n [0.12, 0.00, 0.22, 0.23, 0.24],\n [0.13, 0.22, 0.00, 0.31, 0.32],\n [0.14, 0.23, 0.31, 0.00, 0.44],\n [0.15, 0.24, 0.32, 0.44, 0.00]],\n ids=['s1', 's2', 's3', 's4', 's5'])\n\n groupings = collections.OrderedDict(\n [('g1', ['s1', 's2']), ('g2', ['s3', 's5']), ('g3', ['s4'])])\n obs = _get_distance_boxplot_data(dm, 'g1', groupings)\n exp_data = [[0.12], [0.13, 0.15, 0.22, 0.24], [0.14, 0.23]]\n exp_labels = ['g1 (n=1)', 'g2 (n=4)', 'g3 (n=2)']\n self.assertEqual(obs[0], exp_data)\n self.assertEqual(obs[1], exp_labels)\n\n def test_get_distance_boxplot_data_between_order_retained(self):\n dm = skbio.DistanceMatrix([[0.00, 0.12, 0.13, 0.14, 0.15],\n [0.12, 0.00, 0.22, 0.23, 0.24],\n [0.13, 0.22, 0.00, 0.31, 0.32],\n [0.14, 0.23, 0.31, 0.00, 0.44],\n [0.15, 0.24, 0.32, 0.44, 0.00]],\n ids=['s1', 's2', 's3', 's4', 's5'])\n\n groupings = collections.OrderedDict(\n [('g1', ['s1', 's2']), ('g3', ['s4']), ('g2', ['s3', 's5'])])\n obs = _get_distance_boxplot_data(dm, 'g1', groupings)\n exp_data = [[0.12], [0.14, 0.23], [0.13, 0.15, 0.22, 0.24]]\n exp_labels = ['g1 (n=1)', 'g3 (n=2)', 'g2 (n=4)']\n self.assertEqual(obs[0], exp_data)\n self.assertEqual(obs[1], exp_labels)\n\n\nclass TestMantel(unittest.TestCase):\n def setUp(self):\n self.dm1 = skbio.DistanceMatrix([[0.00, 0.25, 0.25],\n [0.25, 0.00, 0.00],\n [0.25, 0.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n\n # Positive correlation with `dm1`\n self.dm2 = skbio.DistanceMatrix([[0.00, 1.00, 2.00],\n [1.00, 0.00, 1.00],\n [2.00, 1.00, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n\n # Perfect negative correlation with `dm1`\n self.dm3 = skbio.DistanceMatrix([[0.00, 0.00, 0.00],\n [0.00, 0.00, 0.25],\n [0.00, 0.25, 0.00]],\n ids=['sample1', 'sample2', 'sample3'])\n\n self.dm2_reordered = skbio.DistanceMatrix(\n [[0.00, 2.00, 1.00],\n [2.00, 0.00, 1.00],\n [1.00, 1.00, 0.00]],\n ids=['sample3', 'sample1', 'sample2'])\n\n self.mismatched_dm = skbio.DistanceMatrix(\n [[0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 2.0, 3.0],\n [0.0, 1.0, 0.0, 1.0, 2.0],\n [0.0, 2.0, 1.0, 0.0, 1.0],\n [0.0, 3.0, 2.0, 1.0, 0.0]],\n ids=['foo', 'sample1', 'sample2', 'sample3', 'x'])\n\n self.output_dir_obj = tempfile.TemporaryDirectory(\n prefix='q2-diversity-test-temp-')\n self.output_dir = self.output_dir_obj.name\n\n def tearDown(self):\n self.output_dir_obj.cleanup()\n\n def assertBasicVizValidity(self, viz_dir, sample_size, method='spearman',\n permutations=999, label1='Distance Matrix 1',\n label2='Distance Matrix 2',\n mismatched_ids=None, exp_test_stat=None,\n exp_p_value=None):\n index_fp = os.path.join(viz_dir, 'index.html')\n self.assertTrue(os.path.exists(index_fp))\n\n with open(index_fp, 'r') as fh:\n index_contents = fh.read()\n\n self.assertIn('Mantel test results', index_contents)\n self.assertIn('<td>%d</td>' % sample_size, index_contents)\n self.assertIn('<td>%d</td>' % permutations, index_contents)\n\n method_labels = {'spearman': \"Spearman rho\", 'pearson': \"Pearson r\"}\n self.assertIn(method_labels[method], index_contents)\n\n if mismatched_ids is None:\n self.assertNotIn('Warning:', index_contents)\n else:\n self.assertIn('Warning', index_contents)\n self.assertIn('%d ID(s)' % len(mismatched_ids), index_contents)\n self.assertIn('remaining <strong>%d IDs</strong>' % sample_size,\n index_contents)\n self.assertIn(', '.join(sorted(mismatched_ids)), index_contents)\n\n if exp_test_stat is not None:\n self.assertIn('<td>%r</td>' % exp_test_stat, index_contents)\n\n if exp_p_value is not None:\n self.assertIn('<td>%s</td>' % exp_p_value, index_contents)\n\n svg_fp = os.path.join(viz_dir, 'mantel-scatter.svg')\n self.assertTrue(os.path.exists(svg_fp))\n\n with open(svg_fp, 'r') as fh:\n svg_contents = fh.read()\n\n self.assertIn('Pairwise Distance (%s)' % label1, svg_contents)\n self.assertIn('Pairwise Distance (%s)' % label2, svg_contents)\n\n def test_defaults_positive_correlation(self):\n mantel(self.output_dir, self.dm1, self.dm2)\n\n self.assertBasicVizValidity(self.output_dir, 3, exp_test_stat=0.5,\n exp_p_value=1)\n\n def test_defaults_negative_correlation(self):\n mantel(self.output_dir, self.dm1, self.dm3)\n\n # p-value will be stochastic with this dataset so not asserting its\n # value.\n self.assertBasicVizValidity(self.output_dir, 3, exp_test_stat=-1)\n\n def test_defaults_reverse_comparison(self):\n # Comparing X to Y should be the same as comparing Y to X.\n mantel(self.output_dir, self.dm2, self.dm1)\n\n self.assertBasicVizValidity(self.output_dir, 3, exp_test_stat=0.5,\n exp_p_value=1)\n\n def test_defaults_reordered(self):\n # Order of IDs in distance matrices shouldn't change the results.\n mantel(self.output_dir, self.dm1, self.dm2_reordered)\n\n self.assertBasicVizValidity(self.output_dir, 3, exp_test_stat=0.5,\n exp_p_value=1)\n\n def test_pearson(self):\n mantel(self.output_dir, self.dm1, self.dm2, method='pearson')\n\n self.assertBasicVizValidity(self.output_dir, 3, method='pearson',\n exp_test_stat=0.5, exp_p_value=1)\n\n def test_alt_permutations(self):\n mantel(self.output_dir, self.dm1, self.dm2, permutations=42)\n\n self.assertBasicVizValidity(self.output_dir, 3, permutations=42,\n exp_test_stat=0.5, exp_p_value=1)\n\n def test_zero_permutations(self):\n mantel(self.output_dir, self.dm1, self.dm2, permutations=0)\n\n self.assertBasicVizValidity(self.output_dir, 3, permutations=0,\n exp_test_stat=0.5, exp_p_value='NaN')\n\n def test_alt_labels(self):\n mantel(self.output_dir, self.dm1, self.dm2, label1='Peanut',\n label2='Milo')\n\n self.assertBasicVizValidity(self.output_dir, 3, label1='Peanut',\n label2='Milo', exp_test_stat=0.5,\n exp_p_value=1)\n\n def test_error_on_sample_mismatch(self):\n with self.assertRaisesRegex(ValueError,\n 'intersect_ids.*mismatches.*\\n\\nfoo, x'):\n mantel(self.output_dir, self.dm1, self.mismatched_dm)\n\n def test_warn_on_sample_mismatch(self):\n mantel(self.output_dir, self.dm1, self.mismatched_dm,\n intersect_ids=True)\n\n self.assertBasicVizValidity(self.output_dir, 3,\n mismatched_ids={'foo', 'x'},\n exp_test_stat=0.5, exp_p_value=1)\n\n def test_warn_on_sample_mismatch_reverse_comparison(self):\n # Comparing X to Y should be the same as comparing Y to X.\n mantel(self.output_dir, self.mismatched_dm, self.dm1,\n intersect_ids=True)\n\n self.assertBasicVizValidity(self.output_dir, 3,\n mismatched_ids={'foo', 'x'},\n exp_test_stat=0.5, exp_p_value=1)\n\n def test_same_matrices(self):\n mantel(self.output_dir, self.dm1, self.dm1)\n\n self.assertBasicVizValidity(self.output_dir, 3, exp_test_stat=1,\n exp_p_value=1)\n\n def test_same_filtered_matrices(self):\n # These two matrices filter down to the same data.\n mantel(self.output_dir, self.dm2, self.mismatched_dm,\n intersect_ids=True)\n\n self.assertBasicVizValidity(self.output_dir, 3,\n mismatched_ids={'foo', 'x'},\n exp_test_stat=1, exp_p_value=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.array", "pandas.Index" ] ]
eugtsa/led_wabbit
[ "04c56b438518d65a385788fa9a118adbe74a1771" ]
[ "examples/example_simple_sklearn_logreg.py" ]
[ "from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import log_loss\nfrom sklearn.utils import shuffle\nfrom itertools import chain\nimport numpy as np\nimport random\n\nfrom led_wabbit.models import LogisticRegressionBinary\n\nif __name__ == '__main__':\n X1 = [[0, 1, random.random()*3] for i in range(40)]\n X2 = [[0, 2, random.random()*3-1] for i in range(40)]\n X3 = [[1, 0, random.random()*3+1] for i in range(40)]\n X4 = [[0, 2, random.random()*3-2] for i in range(3)]\n\n X = np.array([x for x in chain(X1, X2, X3, X4)])\n\n Y1 = [0 for i in range(40)]\n Y2 = [1 for i in range(40)]\n Y3 = [0 for i in range(40)]\n Y4 = [1 for i in range(3)]\n\n Y = np.array([y for y in chain(Y1, Y2, Y3, Y4)])\n\n X, Y = shuffle(X, Y)\n\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)\n\n header_dict = {0: ('n', 'X', 'x0'), 1: ('n', 'Y', 'y0'), 2: ('n', 'Z', 'z0')}\n\n clf = LogisticRegressionBinary(passes=50,\n learning_rate=0.5,\n header_dict=header_dict,\n quadratic='XY YZ')\n\n clf.fit(X_train, y_train)\n\n preds = clf.predict_proba(X_test)\n\n print(log_loss(y_test,preds))" ]
[ [ "sklearn.model_selection.train_test_split", "sklearn.metrics.log_loss", "sklearn.utils.shuffle" ] ]
wkvanderveen/object_segmenter
[ "b63f61123919ba36632cf99bd78949387e6338fc" ]
[ "hparam_optimizer.py" ]
[ "\"\"\"Helper file for automatic hyperparameter grid search.\n This file should not be modified -- for changing variables, go to\n parameters.py.\n Copyright 2018 Werner van der Veen\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied. See the License for the specific language governing\n permissions and limitations under the License.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport object_segmenter\nimport parameters as par\nimport utils\nimport pandas as pd\n\n# Set filename of csv file containing the search results\nFILENAME = 'results.csv'\n\nVAR1 = par.hyperparameter1_search\nVAR2 = par.hyperparameter2_search\n\n# Overwrite the results file if it already exists\nif os.path.isfile(f\"./{FILENAME}\"):\n os.remove(f\"./{FILENAME}\")\n\n# Empty the folder where the prediction plots will be stored\nutils.prepare_dir(par.pred_dir, empty=True)\n\n\ndef write_to_csv(row):\n \"\"\"Write a text row to a csv file.\"\"\"\n with open(FILENAME, 'a', newline='') as file:\n writer = csv.writer(file,\n delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n writer.writerow(row)\n\n\n# Write the column titles to the results file.\nwrite_to_csv([VAR1['Name'], VAR2['Name'], 'Accuracy'])\n\n# Set a counter for terminal progress printing\nRUN_COUNT = 0\n\nVAR1_RANGE = range(VAR1[\"min_val\"], VAR1[\"max_val\"]+VAR1[\"step\"], VAR1[\"step\"])\nVAR2_RANGE = range(VAR2[\"min_val\"], VAR2[\"max_val\"]+VAR2[\"step\"], VAR2[\"step\"])\n\nfor val1 in VAR1_RANGE:\n for val2 in VAR2_RANGE:\n RUN_COUNT += 1\n\n # Print a box containing progress information\n utils.print_big_title([\"Hyperoptimization:\",\n f\"Progress: {RUN_COUNT}/\" +\n str(len(VAR1_RANGE)*len(VAR2_RANGE)),\n f\"Now testing with:\",\n f\" {VAR1['Name']} = {val1},\",\n f\" {VAR2['Name']} = {val2}\"])\n\n # Empty the model directory before training the network\n utils.prepare_dir(par.model_dir, empty=True)\n config = ['object_segmenter.py',\n [VAR1[\"Name\"], val1],\n [VAR2[\"Name\"], val2]]\n\n # Run the network and write the results to the results file.\n accuracy = object_segmenter.main(config)\n write_to_csv([str(config[1][1]),\n str(config[2][1]),\n accuracy])\n print(f\"The accuracy of this configuration is {accuracy:.4f}\",\n f\"and has been appended to the {FILENAME} file.\")\n\nutils.print_big_title([\"Results of hyperparameter optimization\"])\n\n# Construct a pretty-print data frame and sort by 'Accuracy'.\nDATAFRAME = pd.DataFrame(pd.read_csv(FILENAME))\nprint(DATAFRAME.sort_values(by=['Accuracy'], ascending=False))\n" ]
[ [ "pandas.read_csv" ] ]
VaeterchenFrost/ComputationalPhysics2016
[ "3a673ee0eca720a3dd9ab5acdd5e1126cb45b9c3" ]
[ "6_1_martin_roebke.py" ]
[ "\"\"\"Computational Physics Aufgabe 6.1, Autor: Martin Roebke 05.06.16\n Quantenmechanik von 1D-Potentialen II - Zeitentwicklung\n\nBerechnet und zeichnet die Zeitentwicklung eines Gauss'schen Wellen-\npaketes in einer Doppelmulde mit Hilfe des Moduls quantenmechanik.py .\nPer Mausklick wird ein spezifiziertes Wellenpaket von der x-Position der Maus\nin der Basis der Eigenfunktionen dargestellt, und fuer\ndiskrete Zeiten per Zeitoperator entwickelt dargestellt.\n\nEnergetisch niedrige EF sind numerisch gut (2. Ableitung klein).\n => Hoehere EF: Beitrag~Null erwuenscht fuer geringe Fehler in Entwicklung.\nBei neuen Starts waehrend der Zeitentwickelten Darstellung wird lediglich das\njeweils neueste Paket berechnet und verfolgt.\nAlle vorherigen Wellenpakete werden verworfen.\n\"\"\"\n\nfrom __future__ import division, print_function # problemlose Ganzzahl-Division\nimport numpy as np # Arrays, Mathe etc\nimport matplotlib.pyplot as plt # Plotten\nimport quantenmechanik as qm # Eigenwerte und -funktionen 1D\nfrom functools import partial # Voreinstellung *args **kwargs\n\n\ndef doppelmulde(x=None, A=0.05, string=False):\n \"\"\"doppelmulde(x=None, A=0.05, string=False)\n Rueckgabe : x**4-x*x-A*x\n Parameter:\n x : array_like\n Argument der Funktion.\n A : reelle Zahl\n Parameter der Funktion.\n string : boolean, optional\n Wenn True wird eine Funktionsbeschreibung als String zurueckgegeben.\n \"\"\"\n if string:\n if A == 0:\n return r\"$x^4-x^2$\"\n if A > 0:\n return r\"$x^4-x^2-{}*x$\".format(A)\n if A < 0:\n return r\"$x^4-x^2+{}*x$\".format(-A)\n return x**4 - x*x - A*x\n\n\ndef gaussian_wave_paket(xa, sigma=1., x0=0., heff=1., p0=0.):\n \"\"\"gaussian_wave_paket(xa, sigma=1., x0=0., heff=1., p0=0.)\n Berechnung Gauss'sches Wellenpaket mit\n mittlerem Ort `x0`, Standardabweichung `sigma`,\n mittlerem Impuls `p0`, effektives hquer `heff` an den Orten `xa`.\n (2*np.pi*sigma**2)**-0.25 * np.exp(-((x-x0)/(2*sigma))**2 + 1j/heff*p0*x)\n\n Rueckgabe:\n phi : array-like Wellenpaket.\n \"\"\"\n # Umwandeln des Ausgangsarrays `x` zu komplex:\n x = np.array(xa, dtype=complex)\n fak = (2*np.pi*sigma*sigma)**-0.25 # Vorfaktor\n arg = -((x - x0)/(2*sigma))**2 # Erstes Argument von exp\n if p0 == 0.: # p0 gleich 0.\n phi = fak*np.exp(arg)\n else: # Wenn p0!=0.\n phi = fak*np.exp(arg)*np.exp(1j/heff*p0*x)\n return phi\n\n\nclass QMZeitentwicklung(object):\n \"\"\"Berechnung und Zeichnung der Zeitentwicklung eines Gauss'schen Wellen-\n paketes in einem 1D-Potential mit Hilfe des Modules `quantenmechanik`.\n Per Mausklick an Position x wird von dieser Position ein spezifiziertes\n Wellenpaket in der Basis der Eigenfunktionen dargestellt, und fuer\n gewaehlte Zeiten per Zeitoperator entwickelt.\n \"\"\"\n\n def __init__(self, axis, potential, emax, p0, heff, sigma, xr_s, xr_e, nr,\n tmin=0, tmax=10, num_t=200, title=None, fak=.01,\n phi_color='k', update_ef=True, wait_dt=0.01):\n \"\"\"Initialisierung der Parameter.\n Pruefung auf `mindestens eine gegebene Zeit`, da Zeitentwicklung sonst\n hinfaellig.\n\n Parameter:\n axis: AxesSubplot Zeichenbereich.\n potential: Funktion des Potentials ueber Ort.\n emax: reelle Zahl; maximale Energie des Plots.\n p0: reelle Zahl; Startimpuls des Wellenpaketes.\n heff: reelle Zahl; Zugehoeriges hquer effektiv.\n sigma: reelle Zahl; Start-Breite Gauss-Wellenpaket.\n xr_s, xr_e: reelle Zahl; Start- und Endpunkt der Potentialauswertung.\n nr: integer; Anzahl Stuetzstellen in x, Matrixgroesse N.\n tmin, tmax: reelle Zahl; Start- und Endzeit der Darstellung.\n num_t: integer; Anzahl Zeitschritte der Darstellung.\n title: string; Titel der Zeichnung.\n `None` nutzt Default der qm.plot_eigenfunktionen.\n fak: reelle Zahl; Faktor Eigenfunktions-Skalierung.\n phi_color: Farbspezifikation fuer `plt.plot`. Phi-Farbe.\n update_ef: boolean; Passe Erscheinung der Eigenfunktionen an c_n an.\n wait_dt: Zahl >=0; Plot-Pause zwischen Zeitschritten.\n \"\"\"\n print(\"QMZeitentwicklung Initialisierung mit \\np0 = {}, heff = {}, \"\n \"sigma = {}, N = {}.\".format(p0, heff, sigma, nr))\n self.axis = axis\n self.potential = potential\n self.emax = emax\n self.p0 = p0\n self.heff = heff\n self.sigma = sigma\n self.xr_s = xr_s\n self.xr_e = xr_e\n self.nr = nr\n self.t = np.linspace(tmin, tmax, num_t)\n assert len(self.t) > 0\n self.title = title\n self.fak = fak\n self.phi_color = phi_color\n self.update_ef = update_ef\n self.wait_dt = wait_dt\n self.startnum = 0\n self.berechnet = False # Schalter: Berechnung ok.\n\n def berechnung_esys(self):\n \"\"\"Diagonalisierung der Hamilton-Matrix.\n Diskretisierung `self.x` des Ortes zu Matrixgroesse `self.nr`.\n Berechnung von `self.ew`, `self.ef` mittels `qm.diagonalisierung`.\n \"\"\"\n # Berechnung\n self.x, self.dx = qm.diskretisierung(self.xr_s, self.xr_e, self.nr,\n retstep=True)\n self.pot_x = self.potential(self.x)\n self.ew, self.ef = qm.diagonalisierung(self.heff, self.x, self.pot_x)\n self.berechnet = True\n\n def plot(self):\n \"\"\"Initialisieren und Beschriften des Plotbereiches entsprechend der\n Methode `qm.plot_eigenfunktionen` und der Betragsquadrate def EF.\n Dabei wird erstellt:\n `self.eflines` als Array der geordneten Eigenfunkions-Linien im Plot.\n `self.zeitentw`: Vorbereitung der Linie fuer die Wellenpaket-Darst.\n Verbindet 'button_press_event' mit `self.mausklick`.\n \"\"\"\n if self.berechnet is False:\n self.berechnung_esys()\n # Plottet: Potential - Eigenwerte Basislinie, Eigenfunktionen\n qm.plot_eigenfunktionen(self.axis, self.ew, self.ef, self.x, self.pot_x,\n Emax=self.emax, fak=self.fak,\n betragsquadrat=True, title=self.title)\n if self.update_ef: # Abspeichern der EF-lines.\n self.num_ef = (len(self.axis.lines) - 1) / 2\n self.ef_iter = range(int(self.num_ef+1), len(self.axis.lines))\n self.eflines = np.array(self.axis.lines)[self.ef_iter]\n plt.setp(self.axis.title, fontsize=20) # Passe Titelgroesse an.\n\n # Bereitstellen der Plotlinie fuer `zeitentw`\n self.zeitentw, = self.axis.plot([], [], self.phi_color, linewidth=1.2)\n # Verknuepfung des button_press_event mit Funktion\n figc = self.axis.get_figure()\n figc.canvas.mpl_connect('button_press_event', self._mausklick)\n\n def _mausklick(self, event):\n \"\"\"Bei Klick mit linker Maustaste in `self.axis`:\n Erstelle Wellenpaket und zeitentwicklung, und stelle diese dar.\n Fuehrt eine Fortschrittsbeschreibung auf der Konsole.\n Startpunkt des Wellenpaketes `self.phi`durch x-Koordinate\n des Klicks bestimmt.\n Fortlaufende Nummerierung der Wellenpakete mit\n `self.startnum`: Abbrechen aelterer Wellenpakete als das aktuelle.\n Berechne Entwicklungskoeffizienten `self.c`.\n Berechne rekonstruiertes Wellenaket `self.phi_rec`.\n Wenn `self.update_ef`: Visualisiere Beitrag der Eigenfunktionen\n an `self.phi_rec` als linewidth.\n \"\"\"\n mode = plt.get_current_fig_manager().toolbar.mode\n # Test ob Klick mit linker Maustaste und im Koordinatensystem\n # erfolgt ist, sowie ob Funktionen des Plotfensters deaktiviert sind:\n if not (event.button == 1 and event.inaxes == self.axis and mode == ''):\n return\n x0 = event.xdata\n self.phi = gaussian_wave_paket(self.x, self.sigma, x0,\n self.heff, self.p0)\n # Wellenpaket in Eigenfunktionen entwickeln\n self.c = self.dx*np.dot(self.ef.conj().T, self.phi)\n # Rekonstruktion aus Entwicklung nach EF\n self.phi_rec = np.dot(self.ef, self.c)\n normdiff = np.linalg.norm(self.phi - self.phi_rec) * np.sqrt(self.dx)\n\n # Energieerwartungswert unter Verwendung von c_n:\n self.phi_ew = np.dot(abs(self.c)**2, self.ew)\n\n if self.update_ef:\n # Visualisiere EF-Width\n for i, line in enumerate(self.eflines):\n line.set_linewidth(abs(self.c[i])*2.5)\n\n # Zeitentwicklung berechnen\n self.startnum += 1\n startnum = self.startnum\n print(\"Paket [{}]: Start an x0 = {:.3f}\".format(startnum, x0))\n print(\" Differenz in Rekonstruktion: {:.3e}\".format(normdiff))\n print(\" Zeit-Darstellung von t={} bis {} in {} Schritten.\"\n \"\".format(self.t[0], self.t[-1], len(self.t)))\n\n self.zeitentw.set_xdata(self.x)\n for t in self.t: # Zeitschritte darstellen.\n # Nicht-Nachholen bei Unterbrechung.\n if startnum < self.startnum:\n return\n # Zeitentwicklung auf Phi-Koeffizienten:\n self.phi_rec = np.dot(self.ef,\n self.c * np.exp(-1j * self.ew * t / self.heff))\n self.plot_phi = self.fak*np.abs(self.phi_rec)**2 + self.phi_ew\n self.zeitentw.set_ydata(self.plot_phi)\n plt.pause(self.wait_dt)\n print(\"Paket [{}]: Zeit-Darstellung beendet.\".format(startnum))\n\n def show(self):\n \"\"\"Zeige alle erstellten Figuren. Warte auf Benutzerinteraktion.\"\"\"\n plt.show()\n\n\ndef main():\n \"\"\"Mainfunktion Quantenmechanik von 1D-Potentialen II - Zeitentwicklung.\n Eingabe der Parameter und Starten gewuenschter Realisierungen.\n \"\"\"\n # Potential\n A = 0.00\n potential = partial(doppelmulde, A=A)\n emax = 0.3\n p0 = 0.0 # Startimpuls\n # Zugehoeriges hquer effektiv ~ Masse.\n heff = 0.07\n # Start-Breite Gauss-Wellenpaket.\n sigma = 0.1\n # Start- Endbereich Rechnen, V(x<=xr_s)->unendlich, V(x>=xr_e)->unendlich.\n xr_s = -1.7\n xr_e = 1.7\n # Stuetzstellen, Matrixgroesse N\n N = 200\n # Zeitdarstellung von `tmin` bis `tmax` in `num_t` Schritten\n tmin = 0\n tmax = 100000\n num_t = 300\n # Allgemeiner Faktor EF-Skalierung.\n fak = 0.01\n title = \"$Potential:\\quad$\" + potential(string=True)\n update_ef = True # Anpassung EF an c_n\n # Plot Figure\n fig, axis = plt.subplots(figsize=(12, 14))\n\n # Anfangstext Konsole\n print(__doc__)\n\n # Instanziierung\n rea = QMZeitentwicklung(axis, potential, emax, p0, heff, sigma, xr_s, xr_e,\n N, tmin, tmax, num_t, title, fak=fak, update_ef=update_ef)\n rea.plot() # Starte Darstellung\n rea.show() # Benutzerinteraktion\n\n\n# -------------Main Programm----------------\nif __name__ == \"__main__\":\n main() # Rufe Mainroutine\n\n\"\"\"Kommentar:\nDas qm. Tunneln ist kaum klassisch zu erklaeren - wohl aber durch den\nTunneleffekt eines quantenmechanischen Wellenpaketes und die Unschaerfe auf\nvergleichsweise kleinen Skalen.\n\na) Das Paket zerfliesst ohne Startimpuls vom Maximum in die beiden Mulden.\n Es hat anschliessend wenig Aufenthaltswahrscheinlichkeit (AWK)\n an dem Maximum. Ein erwartetes Eindringen in klassisch verbotenen\n Bereich bei starker Abnahme der AWK, sowie Reflexion an\n den Potentialwaenden, sind zu beobachten.\n Start in Potential-Minimum: Die urspruengliche Form wird hauptsaechlich\n beibehalten, schwingt jedoch in ihrer Startmulde nach links und rechts.\n Wieder ist vorherige Beobachtung moeglich, zusaetzlich besteht die\n Moeglichkeit des Tunnelns durch den Potentialhuegel.\n Nach der etwa exp. Daempfung ist in der anderen Mulde fuer eine gewisse\n Zeit nur eine sehr geringe Aufenthaltswahrscheinlichkeit zu erwarten.\n\nb) Das Paket bewegt sich bei p0 = 0.3 zu Beginn vorzugsweise nach \"rechts\".\n Nach den oben beschriebenen Reflexionen zeigt sich ueber laengeren Zeitraum\n eine aehnliche AWK.\n Insgesamt starten Wellenpakete mit einer groesseren Energieerwartung,\n da der kinetische Term E_kin>0 wird.\n Damit koennen Pakete bei gleicher Startposition staerker\n tunneln als ohne Startimpuls.\n\nc) A=0, p0=0 fuer grosse Zeiten bei Start in einer Mulde:\n Hier ist ein periodisches Wechseln der AWK zwischen beiden Mulden\n zu beobachten. Da das Potential hier symmetrisch verlaeuft,\n bleibt die Gesamtenergie auch dabei erhalten.\n Das Wellenpaket 'tunnelt' periodisch hin und her und wechselt\n von hoher AWK zu niedrigerer AWK beider Mulden.\n\"\"\"\n" ]
[ [ "numpy.array", "numpy.dot", "matplotlib.pyplot.setp", "numpy.linalg.norm", "matplotlib.pyplot.get_current_fig_manager", "numpy.exp", "matplotlib.pyplot.subplots", "matplotlib.pyplot.pause", "numpy.sqrt", "numpy.abs", "matplotlib.pyplot.show", "numpy.linspace" ] ]
Jarvan-Wang/k2
[ "7f164ecb804d15006fd30e8564d80e0fa212f011" ]
[ "k2/python/tests/dense_fsa_vec_test.py" ]
[ "#!/usr/bin/env python3\n#\n# Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)\n#\n# See ../../../LICENSE for clarification regarding multiple authors\n\n# To run this single test, use\n#\n# ctest --verbose -R dense_fsa_vec_test_py\n\nimport unittest\n\nimport k2\nimport torch\n\n\nclass TestDenseFsaVec(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.devices = [torch.device('cpu')]\n if torch.cuda.is_available() and k2.with_cuda:\n cls.devices.append(torch.device('cuda', 0))\n if torch.cuda.device_count() > 1:\n torch.cuda.set_device(1)\n cls.devices.append(torch.device('cuda', 1))\n\n def test_dense_fsa_vec(self):\n for device in self.devices:\n log_prob = torch.arange(20, dtype=torch.float32,\n device=device).reshape(\n 2, 5, 2).log_softmax(dim=-1)\n supervision_segments = torch.tensor(\n [\n # seq_index, start_time, duration\n [0, 0, 3],\n [0, 1, 4],\n [1, 0, 2],\n [0, 2, 3],\n [1, 3, 2],\n ],\n dtype=torch.int32)\n\n dense_fsa_vec = k2.DenseFsaVec(log_prob, supervision_segments)\n assert dense_fsa_vec.dim0() == 5, 'It should contain 5 segments'\n assert dense_fsa_vec.device == device\n assert dense_fsa_vec.duration.device == torch.device('cpu')\n assert torch.all(\n torch.eq(dense_fsa_vec.duration, supervision_segments[:, 2]))\n\n del dense_fsa_vec._duration\n assert torch.all(\n torch.eq(dense_fsa_vec.duration, supervision_segments[:, 2]))\n\n assert torch.allclose(dense_fsa_vec.scores[:3, 1:],\n log_prob[0][0:3])\n\n offset = 3 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 4, 1:],\n log_prob[0][1:5])\n\n offset += 4 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 2, 1:],\n log_prob[1][0:2])\n\n offset += 2 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 3, 1:],\n log_prob[0][2:5])\n\n offset += 3 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 2, 1:],\n log_prob[1][3:5])\n\n dense_fsa_vec.to('cpu')\n\n def test_duration(self):\n for device in self.devices:\n log_prob = torch.arange(20, dtype=torch.float32,\n device=device).reshape(\n 2, 5, 2).log_softmax(dim=-1)\n\n supervision_segments = torch.tensor(\n [\n # seq_index, start_time, duration\n [0, 0, 3],\n [0, 4, 2], # exceed 1\n [0, 3, 4], # exceed 2\n [1, 1, 7], # exceed 3\n ],\n dtype=torch.int32)\n\n dense_fsa_vec = k2.DenseFsaVec(log_prob,\n supervision_segments,\n allow_truncate=3)\n assert torch.all(\n torch.eq(dense_fsa_vec.duration, torch.tensor([3, 1, 2, 4])))\n\n assert torch.allclose(dense_fsa_vec.scores[:3, 1:],\n log_prob[0][0:3])\n\n offset = 3 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 1, 1:],\n log_prob[0][4:5])\n\n offset += 1 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 2, 1:],\n log_prob[0][3:5])\n\n offset += 2 + 1\n assert torch.allclose(dense_fsa_vec.scores[offset:offset + 4, 1:],\n log_prob[1][1:5])\n\n dense_fsa_vec.to('cpu')\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.device", "torch.eq", "torch.arange", "torch.cuda.device_count", "torch.cuda.set_device", "torch.cuda.is_available", "torch.tensor", "torch.allclose" ] ]
JEHoctor/fairlearn
[ "9ffe6e1166db6159c181f8f0c925b1a9402856f7" ]
[ "fairlearn/_input_validation.py" ]
[ "# Copyright (c) Microsoft Corporation and Fairlearn contributors.\n# Licensed under the MIT License.\n\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils.validation import check_X_y, check_consistent_length, check_array\n\n\nlogger = logging.getLogger(__file__)\n\n_KW_SENSITIVE_FEATURES = \"sensitive_features\"\n_KW_CONTROL_FEATURES = \"control_features\"\n\n_MESSAGE_X_NONE = \"Must supply X\"\n_MESSAGE_Y_NONE = \"Must supply y\"\n_MESSAGE_SENSITIVE_FEATURES_NONE = \"Must specify {0} (for now)\".format(_KW_SENSITIVE_FEATURES)\n_MESSAGE_X_Y_ROWS = \"X and y must have same number of rows\"\n_MESSAGE_X_SENSITIVE_ROWS = \"X and the sensitive features must have same number of rows\"\n_MESSAGE_RATIO_NOT_IN_RANGE = \"ratio must lie between (0,1]\"\n_INPUT_DATA_FORMAT_ERROR_MESSAGE = \"The only allowed input data formats for {} are: {}. \" \\\n \"Your provided data was of type {}.\"\n_EMPTY_INPUT_ERROR_MESSAGE = \"At least one of sensitive_features, labels, or scores are empty.\"\n_LABELS_NOT_0_1_ERROR_MESSAGE = \"Supplied y labels are not 0 or 1\"\n_MORE_THAN_ONE_COLUMN_ERROR_MESSAGE = \"{} is a {} with more than one column\"\n_NOT_ALLOWED_TYPE_ERROR_MESSAGE = \"{} is not an ndarray, Series or DataFrame\"\n_NDARRAY_NOT_TWO_DIMENSIONAL_ERROR_MESSAGE = \"{} is an ndarray which is not 2D\"\n_NOT_ALLOWED_MATRIX_TYPE_ERROR_MESSAGE = \"{} is not an ndarray or DataFrame\"\n\n_ALLOWED_INPUT_TYPES_X = [np.ndarray, pd.DataFrame]\n_ALLOWED_INPUT_TYPES_SENSITIVE_FEATURES = [np.ndarray, pd.DataFrame, pd.Series, list]\n_ALLOWED_INPUT_TYPES_Y = [np.ndarray, pd.DataFrame, pd.Series, list]\n\n_MERGE_COLUMN_SEPARATOR = \",\"\n\n\ndef _validate_and_reformat_input(X, y=None, expect_y=True, enforce_binary_labels=False, **kwargs):\n \"\"\"Validate input data and return the data in an appropriate format.\n\n The :code:`**kwargs` can contain :code:`sensitive_features=` and :code:`control_features=`\n parameters.\n\n Parameters\n ----------\n X : numpy.ndarray, pandas.DataFrame\n The feature matrix\n y : numpy.ndarray, pandas.DataFrame, pandas.Series, or list\n The label vector\n expect_y : bool\n If True y needs to be provided, otherwise ignores the argument; default True\n enforce_binary_labels : bool\n If True raise exception if there are more than two distinct\n values in the `y` data; default False\n\n Returns\n -------\n Tuple(pandas.DataFrame, pandas.Series, pandas.Series, pandas.Series)\n The validated and reformatted X, y, sensitive_features and control_features; note\n that certain estimators rely on metadata encoded in X which may be stripped during\n the reformatting process, so mitigation methods should ideally use the input X instead\n of the returned X for training estimators and leave potential reformatting of X to the\n estimator.\n \"\"\"\n if y is not None:\n # calling check_X_y with a 2-dimensional y causes a warning, so ensure it is 1-dimensional\n if isinstance(y, np.ndarray) and len(y.shape) == 2 and y.shape[1] == 1:\n y = y.reshape(-1)\n elif isinstance(y, pd.DataFrame) and y.shape[1] == 1:\n y = y.to_numpy().reshape(-1)\n\n X, y = check_X_y(X, y)\n y = check_array(y, ensure_2d=False, dtype='numeric')\n if enforce_binary_labels and not set(np.unique(y)).issubset(set([0, 1])):\n raise ValueError(_LABELS_NOT_0_1_ERROR_MESSAGE)\n elif expect_y:\n raise ValueError(_MESSAGE_Y_NONE)\n else:\n X = check_array(X)\n\n sensitive_features = kwargs.get(_KW_SENSITIVE_FEATURES)\n if sensitive_features is None:\n raise ValueError(_MESSAGE_SENSITIVE_FEATURES_NONE)\n\n check_consistent_length(X, sensitive_features)\n sensitive_features = check_array(sensitive_features, ensure_2d=False, dtype=None)\n\n # compress multiple sensitive features into a single column\n if len(sensitive_features.shape) > 1 and sensitive_features.shape[1] > 1:\n sensitive_features = _merge_columns(sensitive_features)\n\n # Handle the control features\n control_features = kwargs.get(_KW_CONTROL_FEATURES)\n if control_features is not None:\n check_consistent_length(X, control_features)\n control_features = check_array(control_features, ensure_2d=False, dtype=None)\n\n # compress multiple control features into a single column\n if len(control_features.shape) > 1 and control_features.shape[1] > 1:\n control_features = _merge_columns(control_features)\n\n control_features = pd.Series(control_features.squeeze())\n\n # If we don't have a y, then need to fiddle with return type to\n # avoid a warning from pandas\n if y is not None:\n result_y = pd.Series(y)\n else:\n result_y = pd.Series(dtype=\"float64\")\n\n return pd.DataFrame(X), result_y, pd.Series(sensitive_features.squeeze()), control_features\n\n\ndef _merge_columns(feature_columns: np.ndarray) -> np.ndarray:\n \"\"\"Merge multiple columns into a single new column.\n\n Compresses multiple columns into a single column by concatenating the\n stringified values. For example, given two columns\n :code:`['A', 'A', 'B', 'B']` and :code:`[4, 5, 4, 5]` the resultant\n column will be\n :code:`['A,4', 'A,5', 'B,4', 'B,5']`.\n\n Parameters\n ----------\n feature_column : numpy.ndarray\n Multi-dimensional array of columns to be merged\n\n Returns\n -------\n numpy.ndarray\n One-dimensional array of merged columns\n \"\"\"\n if not isinstance(feature_columns, np.ndarray):\n raise ValueError(\"Received argument of type {} instead of expected numpy.ndarray\"\n .format(type(feature_columns).__name__))\n return np.apply_along_axis(\n lambda row: _MERGE_COLUMN_SEPARATOR.join(\n [str(row[i])\n .replace(\"\\\\\", \"\\\\\\\\\") # escape backslash and separator\n .replace(_MERGE_COLUMN_SEPARATOR,\n \"\\\\\" + _MERGE_COLUMN_SEPARATOR)\n for i in range(len(row))]),\n axis=1,\n arr=feature_columns)\n" ]
[ [ "sklearn.utils.validation.check_X_y", "pandas.DataFrame", "sklearn.utils.validation.check_array", "sklearn.utils.validation.check_consistent_length", "pandas.Series", "numpy.unique" ] ]
quiltdata/quilt-renovate
[ "e57a0a35d0e819499a50e07785418c505dbb9055" ]
[ "api/python/tests/test_data_transfer.py" ]
[ "\"\"\" Testing for data_transfer.py \"\"\"\n\n### Python imports\nimport pathlib\n\nfrom unittest import mock\n\n### Third-party imports\nfrom botocore.stub import ANY\nimport pandas as pd\nimport pytest\n\n### Project imports\nfrom quilt3 import data_transfer\n\nfrom .utils import QuiltTestCase\n\n### Code\n\n# parquet test moved to test_formats.py\n\nDATA_DIR = pathlib.Path(__file__).parent / 'data'\n\n\nclass DataTransferTest(QuiltTestCase):\n def test_select(self):\n # Note: The boto3 Stubber doesn't work properly with s3_client.select_object_content().\n # The return value expects a dict where an iterable is in the actual results.\n chunks = [\n b'{\"foo\": ',\n b'9, \"b',\n b'ar\": 3',\n b'}\\n{\"foo\"',\n b': 9, \"bar\": 1}\\n{\"foo\": 6, \"bar\": 9}\\n{\"foo\":',\n b' 1, \"bar\": 7}\\n{\"foo\":',\n b' 6, \"bar\": 1}\\n{\"foo\": 6, \"bar\": 6}',\n b'\\n{\"foo\": 9, \"bar\": 6}',\n b'\\n{\"foo\": 6, \"bar\": 4}\\n',\n b'{\"foo\": 2, \"bar\": 0}',\n b'\\n{\"foo\": 2, \"bar\": 0}\\n',\n ]\n records = [{'Records': {'Payload': chunk}} for chunk in chunks]\n # noinspection PyTypeChecker\n records.append({'Stats': {\n 'BytesScanned': 100,\n 'BytesProcessed': 100,\n 'BytesReturned': 210,\n }})\n records.append({'End': {}})\n\n expected_result = pd.DataFrame.from_records([\n {'foo': 9, 'bar': 3},\n {'foo': 9, 'bar': 1},\n {'foo': 6, 'bar': 9},\n {'foo': 1, 'bar': 7},\n {'foo': 6, 'bar': 1},\n {'foo': 6, 'bar': 6},\n {'foo': 9, 'bar': 6},\n {'foo': 6, 'bar': 4},\n {'foo': 2, 'bar': 0},\n {'foo': 2, 'bar': 0},\n ])\n\n # test normal use from extension\n expected_args = {\n 'Bucket': 'foo',\n 'Key': 'bar/baz.json',\n 'Expression': 'select * from S3Object',\n 'ExpressionType': 'SQL',\n 'InputSerialization': {\n 'CompressionType': 'NONE',\n 'JSON': {'Type': 'DOCUMENT'}\n },\n 'OutputSerialization': {'JSON': {}},\n }\n boto_return_val = {'Payload': iter(records)}\n with mock.patch.object(self.s3_client, 'select_object_content', return_value=boto_return_val) as patched:\n result = data_transfer.select('s3://foo/bar/baz.json', 'select * from S3Object')\n\n patched.assert_called_once_with(**expected_args)\n assert result.equals(expected_result)\n\n with mock.patch.object(self.s3_client, 'select_object_content'):\n # No format determined.\n with pytest.raises(data_transfer.QuiltException):\n result = data_transfer.select('s3://foo/bar/baz', 'select * from S3Object')\n\n # test format-specified in metadata\n expected_args = {\n 'Bucket': 'foo',\n 'Key': 'bar/baz',\n 'Expression': 'select * from S3Object',\n 'ExpressionType': 'SQL',\n 'InputSerialization': {\n 'CompressionType': 'NONE',\n 'JSON': {'Type': 'DOCUMENT'}\n },\n 'OutputSerialization': {'JSON': {}},\n }\n\n boto_return_val = {'Payload': iter(records)}\n with mock.patch.object(self.s3_client, 'select_object_content', return_value=boto_return_val) as patched:\n result = data_transfer.select('s3://foo/bar/baz', 'select * from S3Object', meta={'target': 'json'})\n assert result.equals(expected_result)\n patched.assert_called_once_with(**expected_args)\n\n # test compression is specified\n expected_args = {\n 'Bucket': 'foo',\n 'Key': 'bar/baz.json.gz',\n 'Expression': 'select * from S3Object',\n 'ExpressionType': 'SQL',\n 'InputSerialization': {\n 'CompressionType': 'GZIP',\n 'JSON': {'Type': 'DOCUMENT'}\n },\n 'OutputSerialization': {'JSON': {}},\n }\n boto_return_val = {'Payload': iter(records)}\n with mock.patch.object(self.s3_client, 'select_object_content', return_value=boto_return_val) as patched:\n # result ignored -- returned data isn't compressed, and this has already been tested.\n data_transfer.select('s3://foo/bar/baz.json.gz', 'select * from S3Object')\n patched.assert_called_once_with(**expected_args)\n\n def test_get_size_and_version(self):\n response = {\n 'ETag': '12345',\n 'VersionId': '1.0',\n 'ContentLength': 123,\n }\n expected_params = {\n 'Bucket': 'my_bucket',\n 'Key': 'my_obj',\n }\n self.s3_stubber.add_response('head_object', response, expected_params)\n\n # Verify the verion is present\n assert data_transfer.get_size_and_version('s3://my_bucket/my_obj')[1] == '1.0'\n\n def test_list_local_url(self):\n dir_path = DATA_DIR / 'dir'\n contents = set(list(data_transfer.list_url(dir_path.as_uri())))\n assert contents == set([\n ('foo.txt', 4),\n ('x/blah.txt', 6)\n ])\n\n def test_etag(self):\n assert data_transfer._calculate_etag(DATA_DIR / 'small_file.csv') == '\"0bec5bf6f93c547bc9c6774acaf85e1a\"'\n assert data_transfer._calculate_etag(DATA_DIR / 'buggy_parquet.parquet') == '\"dfb5aca048931d396f4534395617363f\"'\n\n\n def test_simple_upload(self):\n path = DATA_DIR / 'small_file.csv'\n\n # Unversioned bucket\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'null'\n },\n expected_params={\n 'Body': ANY,\n 'Bucket': 'example',\n 'Key': 'foo.csv',\n }\n )\n\n data_transfer.copy_file(path.as_uri(), 's3://example/foo.csv')\n\n def test_multi_upload(self):\n path1 = DATA_DIR / 'small_file.csv'\n path2 = DATA_DIR / 'dir/foo.txt'\n\n # Unversioned bucket\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'null'\n },\n expected_params={\n 'Body': ANY,\n 'Bucket': 'example1',\n 'Key': 'foo.csv',\n }\n )\n\n # Versioned bucket\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'v123'\n },\n expected_params={\n 'Body': ANY,\n 'Bucket': 'example2',\n 'Key': 'foo.txt',\n }\n )\n\n # stubber expects responses in order, so disable multi-threading.\n with mock.patch('quilt3.data_transfer.s3_threads', 1):\n urls = data_transfer.copy_file_list([\n (path1.as_uri(), 's3://example1/foo.csv', path1.stat().st_size),\n (path2.as_uri(), 's3://example2/foo.txt', path2.stat().st_size),\n ])\n\n assert urls[0] == 's3://example1/foo.csv'\n assert urls[1] == 's3://example2/foo.txt?versionId=v123'\n\n\n def test_upload_large_file(self):\n path = DATA_DIR / 'large_file.npy'\n\n self.s3_stubber.add_client_error(\n method='head_object',\n http_status_code=404,\n expected_params={\n 'Bucket': 'example',\n 'Key': 'large_file.npy',\n }\n )\n\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'v1'\n },\n expected_params={\n 'Body': ANY,\n 'Bucket': 'example',\n 'Key': 'large_file.npy',\n }\n )\n\n urls = data_transfer.copy_file_list([\n (path.as_uri(), 's3://example/large_file.npy', path.stat().st_size),\n ])\n assert urls[0] == 's3://example/large_file.npy?versionId=v1'\n\n\n def test_upload_large_file_etag_match(self):\n path = DATA_DIR / 'large_file.npy'\n\n self.s3_stubber.add_response(\n method='head_object',\n service_response={\n 'ContentLength': path.stat().st_size,\n 'ETag': data_transfer._calculate_etag(path),\n 'VersionId': 'v1',\n },\n expected_params={\n 'Bucket': 'example',\n 'Key': 'large_file.npy',\n }\n )\n\n urls = data_transfer.copy_file_list([\n (path.as_uri(), 's3://example/large_file.npy', path.stat().st_size),\n ])\n assert urls[0] == 's3://example/large_file.npy?versionId=v1'\n\n\n def test_upload_large_file_etag_mismatch(self):\n path = DATA_DIR / 'large_file.npy'\n\n self.s3_stubber.add_response(\n method='head_object',\n service_response={\n 'ContentLength': path.stat().st_size,\n 'ETag': '\"123\"',\n 'VersionId': 'v1',\n },\n expected_params={\n 'Bucket': 'example',\n 'Key': 'large_file.npy',\n }\n )\n\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'v2'\n },\n expected_params={\n 'Body': ANY,\n 'Bucket': 'example',\n 'Key': 'large_file.npy',\n }\n )\n\n urls = data_transfer.copy_file_list([\n (path.as_uri(), 's3://example/large_file.npy', path.stat().st_size),\n ])\n assert urls[0] == 's3://example/large_file.npy?versionId=v2'\n\n\n def test_multipart_upload(self):\n name = 'very_large_file.bin'\n path = pathlib.Path(name)\n\n size = 30 * 1024 * 1024\n chunksize = 8 * 1024 * 1024\n\n chunks = -(-size // chunksize)\n\n # Create an empty 30MB file; shouldn't take up any actual space on any reasonable filesystem.\n with open(path, 'wb') as fd:\n fd.seek(size - 1)\n fd.write(b'!')\n\n self.s3_stubber.add_client_error(\n method='head_object',\n http_status_code=404,\n expected_params={\n 'Bucket': 'example',\n 'Key': name,\n }\n )\n\n self.s3_stubber.add_response(\n method='create_multipart_upload',\n service_response={\n 'UploadId': '123'\n },\n expected_params={\n 'Bucket': 'example',\n 'Key': name,\n }\n )\n\n for part_num in range(1, chunks+1):\n self.s3_stubber.add_response(\n method='upload_part',\n service_response={\n 'ETag': 'etag%d' % part_num\n },\n expected_params={\n 'Bucket': 'example',\n 'Key': name,\n 'UploadId': '123',\n 'Body': ANY,\n 'PartNumber': part_num\n }\n )\n\n self.s3_stubber.add_response(\n method='complete_multipart_upload',\n service_response={},\n expected_params={\n 'Bucket': 'example',\n 'Key': name,\n 'UploadId': '123',\n 'MultipartUpload': {\n 'Parts': [{\n 'ETag': 'etag%d' % i,\n 'PartNumber': i\n } for i in range(1, chunks+1)]\n }\n }\n )\n\n with mock.patch('quilt3.data_transfer.s3_threads', 1):\n data_transfer.copy_file_list([\n (path.resolve().as_uri(), f's3://example/{name}', path.stat().st_size),\n ])\n\n\n def test_multipart_copy(self):\n size = 100 * 1024 * 1024 * 1024\n\n # size / 8MB would give us 12501 chunks - but the maximum allowed is 10000,\n # so we should end with 16MB chunks instead.\n chunksize = 8 * 1024 * 1024\n assert size / chunksize > 10000\n chunksize *= 2\n\n chunks = -(-size // chunksize)\n assert chunks <= 10000\n\n self.s3_stubber.add_response(\n method='create_multipart_upload',\n service_response={\n 'UploadId': '123'\n },\n expected_params={\n 'Bucket': 'example2',\n 'Key': 'large_file2.npy',\n }\n )\n\n for part_num in range(1, chunks+1):\n self.s3_stubber.add_response(\n method='upload_part_copy',\n service_response={\n 'CopyPartResult': {\n 'ETag': 'etag%d' % part_num\n }\n },\n expected_params={\n 'Bucket': 'example2',\n 'Key': 'large_file2.npy',\n 'UploadId': '123',\n 'PartNumber': part_num,\n 'CopySource': {\n 'Bucket': 'example1',\n 'Key': 'large_file1.npy'\n },\n 'CopySourceRange': 'bytes=%d-%d' % (\n (part_num-1) * chunksize,\n min(part_num * chunksize, size) - 1\n )\n }\n )\n\n self.s3_stubber.add_response(\n method='complete_multipart_upload',\n service_response={},\n expected_params={\n 'Bucket': 'example2',\n 'Key': 'large_file2.npy',\n 'UploadId': '123',\n 'MultipartUpload': {\n 'Parts': [{\n 'ETag': 'etag%d' % i,\n 'PartNumber': i\n } for i in range(1, chunks+1)]\n }\n }\n )\n\n with mock.patch('quilt3.data_transfer.s3_threads', 1):\n data_transfer.copy_file_list([\n ('s3://example1/large_file1.npy', 's3://example2/large_file2.npy', size),\n ])\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
KurtJacobson/pydm
[ "5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d" ]
[ "pydm/widgets/line_edit.py" ]
[ "import locale\nfrom functools import partial\nimport numpy as np\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom qtpy.QtWidgets import QLineEdit, QMenu, QApplication\nfrom qtpy.QtCore import Property, Q_ENUMS\nfrom .. import utilities\nfrom .base import PyDMWritableWidget, TextFormatter\nfrom .display_format import DisplayFormat, parse_value_for_display\n\n\nclass PyDMLineEdit(QLineEdit, TextFormatter, PyDMWritableWidget, DisplayFormat):\n Q_ENUMS(DisplayFormat)\n DisplayFormat = DisplayFormat\n \"\"\"\n A QLineEdit (writable text field) with support for Channels and more\n from PyDM.\n This widget offers an unit conversion menu when users Right Click\n into it.\n\n Parameters\n ----------\n parent : QWidget\n The parent widget for the Label\n init_channel : str, optional\n The channel to be used by the widget.\n \"\"\"\n\n def __init__(self, parent=None, init_channel=None):\n QLineEdit.__init__(self, parent)\n PyDMWritableWidget.__init__(self, init_channel=init_channel)\n self.app = QApplication.instance()\n self._display = None\n self._scale = 1\n\n self.returnPressed.connect(self.send_value)\n self.unitMenu = QMenu('Convert Units', self)\n self.create_unit_options()\n self._display_format_type = self.DisplayFormat.Default\n self._string_encoding = \"utf_8\"\n if utilities.is_pydm_app():\n self._string_encoding = self.app.get_string_encoding()\n\n @Property(DisplayFormat)\n def displayFormat(self):\n return self._display_format_type\n\n @displayFormat.setter\n def displayFormat(self, new_type):\n if self._display_format_type != new_type:\n self._display_format_type = new_type\n # Trigger the update of display format\n self.value_changed(self.value)\n\n def value_changed(self, new_val):\n \"\"\"\n Receive and update the PyDMLineEdit for a new channel value\n\n The actual value of the input is saved as well as the type received.\n This also resets the PyDMLineEdit display text using\n :meth:`.set_display`\n\n Parameters\n ----------\n value: str, float or int\n The new value of the channel\n \"\"\"\n super(PyDMLineEdit, self).value_changed(new_val)\n self.set_display()\n\n def send_value(self):\n \"\"\"\n Emit a :attr:`send_value_signal` to update channel value.\n\n The text is cleaned of all units, user-formatting and scale values\n before being sent back to the channel. This function is attached the\n ReturnPressed signal of the PyDMLineEdit\n \"\"\"\n send_value = str(self.text())\n # Clean text of unit string\n if self._show_units and self._unit and self._unit in send_value:\n send_value = send_value[:-len(self._unit)].strip()\n try:\n if self.channeltype not in [str, np.ndarray]:\n scale = self._scale\n if scale is None or scale == 0:\n scale = 1.0\n\n if self._display_format_type in [DisplayFormat.Default, DisplayFormat.String]:\n if self.channeltype == float:\n num_value = locale.atof(send_value)\n else:\n num_value = self.channeltype(send_value)\n scale = self.channeltype(scale)\n elif self._display_format_type == DisplayFormat.Hex:\n num_value = int(send_value, 16)\n elif self._display_format_type == DisplayFormat.Binary:\n num_value = int(send_value, 2)\n elif self._display_format_type in [DisplayFormat.Exponential, DisplayFormat.Decimal]:\n num_value = locale.atof(send_value)\n\n num_value = num_value / scale\n self.send_value_signal[self.channeltype].emit(num_value)\n elif self.channeltype == np.ndarray:\n # Arrays will be in the [1.2 3.4 22.214] format\n if self._display_format_type == DisplayFormat.String:\n self.send_value_signal[str].emit(send_value)\n else:\n arr_value = list(filter(None, send_value.replace(\"[\", \"\").replace(\"]\", \"\").split(\" \")))\n arr_value = np.array(arr_value, dtype=self.subtype)\n self.send_value_signal[np.ndarray].emit(arr_value)\n else:\n # Channel Type is String\n # Lets just send what we have after all\n self.send_value_signal[str].emit(send_value)\n except ValueError:\n logger.exception(\"Error trying to set data '{0}' with type '{1}' and format '{2}' at widget '{3}'.\"\n .format(self.text(), self.channeltype, self._display_format_type, self.objectName()))\n\n self.clearFocus()\n self.set_display()\n\n def write_access_changed(self, new_write_access):\n \"\"\"\n Change the PyDMLineEdit to read only if write access is denied\n \"\"\"\n super(PyDMLineEdit, self).write_access_changed(new_write_access)\n self.setReadOnly(not new_write_access)\n\n def unit_changed(self, new_unit):\n \"\"\"\n Accept a unit to display with a channel's value\n\n The unit may or may not be displayed based on the :attr:`showUnits`\n attribute. Receiving a new value for the unit causes the display to\n reset.\n \"\"\"\n super(PyDMLineEdit, self).unit_changed(new_unit)\n self._scale = 1\n self.create_unit_options()\n\n def create_unit_options(self):\n \"\"\"\n Create the menu for displaying possible unit values\n\n The menu is filled with possible unit conversions based on the\n current PyDMLineEdit. If either the unit is not found in the by\n the :func:`utilities.find_unit_options` function, or, the\n :attr:`.showUnits` attribute is set to False, the menu will tell\n the user that there are no available conversions\n \"\"\"\n self.unitMenu.clear()\n units = utilities.find_unit_options(self._unit)\n if units and self._show_units:\n for choice in units:\n self.unitMenu.addAction(choice,\n partial(\n self.apply_conversion,\n choice\n )\n )\n else:\n self.unitMenu.addAction('No Unit Conversions found')\n\n def apply_conversion(self, unit):\n \"\"\"\n Convert the current unit to a different one\n\n This function will attempt to find a scalar to convert the current\n unit type to the desired one and reset the display with the new\n conversion.\n\n Parameters\n ----------\n unit : str\n String name of desired units\n \"\"\"\n if not self._unit:\n logger.warning(\"Warning: Attempting to convert PyDMLineEdit unit, but no initial units supplied.\")\n return None\n\n scale = utilities.convert(str(self._unit), unit)\n if scale:\n self._scale = scale * float(self._scale)\n self._unit = unit\n self.update_format_string()\n self.clearFocus()\n self.set_display()\n else:\n logging.warning(\"Warning: Attempting to convert PyDMLineEdit unit, but '{0}' can not be converted to '{1}'.\"\n .format(self._unit, unit))\n\n def widget_ctx_menu(self):\n \"\"\"\n Fetch the Widget specific context menu which will be populated with additional tools by `assemble_tools_menu`.\n\n Returns\n -------\n QMenu or None\n If the return of this method is None a new QMenu will be created by `assemble_tools_menu`.\n \"\"\"\n menu = self.createStandardContextMenu()\n menu.addSeparator()\n menu.addMenu(self.unitMenu)\n return menu\n\n def set_display(self):\n \"\"\"\n Set the text display of the PyDMLineEdit.\n\n The original value given by the PV is converted to a text entry based\n on the current settings for scale value, precision, a user-defined\n format, and the current units. If the user is currently entering a\n value in the PyDMLineEdit the text will not be changed.\n \"\"\"\n if self.value is None:\n return\n\n if self.hasFocus():\n return\n\n new_value = self.value\n\n if self._display_format_type in [DisplayFormat.Default,\n DisplayFormat.Decimal,\n DisplayFormat.Exponential,\n DisplayFormat.Hex,\n DisplayFormat.Binary]:\n if not isinstance(new_value, (str, np.ndarray)):\n try:\n new_value *= self.channeltype(self._scale)\n except TypeError:\n logger.error(\"Cannot convert the value '{0}', for channel '{1}', to type '{2}'. \".format(\n self._scale, self._channel, self.channeltype))\n\n new_value = parse_value_for_display(value=new_value, precision=self._prec,\n display_format_type=self._display_format_type,\n string_encoding=self._string_encoding,\n widget=self)\n\n self._display = str(new_value)\n\n if self._display_format_type == DisplayFormat.Default:\n if isinstance(new_value, (int, float)):\n self._display = str(self.format_string.format(new_value))\n self.setText(self._display)\n return\n\n if self._show_units:\n self._display += \" {}\".format(self._unit)\n\n self.setText(self._display)\n\n def focusOutEvent(self, event):\n \"\"\"\n Overwrites the function called when a user leaves a PyDMLineEdit\n without pressing return. Resets the value of the text field to the\n current channel value.\n \"\"\"\n if self._display is not None:\n self.setText(self._display)\n super(PyDMLineEdit, self).focusOutEvent(event)\n" ]
[ [ "numpy.array" ] ]
liyuanpeng01/flare
[ "0acbe6ece1afc5fa06b78cbb7d62a6210d624c95" ]
[ "flare/examples/img_ac_example.py" ]
[ "import torch.nn as nn\nimport numpy as np\nfrom flare.algorithm_zoo.simple_algorithms import SimpleAC\nfrom flare.model_zoo.simple_models import SimpleModelAC\nfrom flare.framework.manager import Manager\nfrom flare.agent_zoo.simple_rl_agents import SimpleRLAgent\nfrom flare.framework.agent import OnlineHelper\nfrom flare.env_zoo.gym_env import GymEnvImage\nfrom flare.framework.common_functions import Flatten\n\nif __name__ == '__main__':\n \"\"\"\n A demo of how to train from image inputs\n \"\"\"\n game = \"Assault-v0\"\n\n num_agents = 16\n num_games = 8000\n\n # 1. Create image environments\n im_height, im_width = 84, 84\n envs = []\n for _ in range(num_agents):\n envs.append(\n GymEnvImage(\n game, contexts=4, height=im_height, width=im_width, gray=True))\n # context screens\n d, h, w = envs[-1].observation_dims()[0]\n num_actions = envs[-1].action_dims()[0]\n\n # 2. Construct the network and specify the algorithm.\n # Here we use a small CNN as the perception net for the Actor-Critic algorithm\n cnn = nn.Sequential(\n nn.Conv2d(\n d, 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(\n 32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(\n 64, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n Flatten(), # flatten the CNN cube to a vector\n nn.Linear(7 * 7 * 64, 512),\n nn.ReLU())\n\n alg = SimpleAC(\n model=SimpleModelAC(\n dims=(d, h, w), num_actions=num_actions, perception_net=cnn),\n gpu_id=1)\n\n # 3. Specify the settings for learning: data sampling strategy\n # (OnlineHelper here) and other settings used by\n # ComputationTask.\n ct_settings = {\n \"RL\": dict(\n algorithm=alg,\n hyperparas=dict(grad_clip=5.0),\n # sampling\n agent_helper=OnlineHelper,\n # each agent will call `learn()` every `sample_interval` steps\n sample_interval=5,\n num_agents=num_agents)\n }\n\n # 4. Create Manager that handles the running of the whole pipeline\n manager = Manager(ct_settings)\n\n # 5. Spawn one agent for each instance of environment.\n # Agent's behavior depends on the actual algorithm being used. Since we\n # are using SimpleAC, a proper type of Agent is SimpleRLAgent.\n for env in envs:\n agent = SimpleRLAgent(env, num_games, reward_shaping_f=np.sign)\n # An Agent has to be added into the Manager before we can use it to\n # interact with environment and collect data\n manager.add_agent(agent)\n\n manager.start()\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Linear" ] ]
FoleyLab/FoleyLab.github.io
[ "1f84e4dc2f87286dbd4e07e483ac1e48943cb493" ]
[ "assets/pib_wp.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n\n\n\n# First set up the figure, the axis, and the plot element we want to animate\n#fig = plt.figure()\n#ax = plt.axes(xlim=(0, 500), ylim=(-0.03, 0.03))\n#line, = ax.plot([], [], lw=2)\n\n### Function that takes in a list of x-coordinates, the quantum number, and the length L\n### and returns PIB energy eigenfunction\ndef PIB_Func(x, n, L):\n psi_n = np.sqrt(2./L)*np.sin(n*np.pi*x/L)\n return psi_n\n\n### Function that takes in a list of x-coordinates, a central x value, and central momentum value, \n### and a standard deviation for the position and returns complex-valued Gaussian wavepacket\ndef Gauss_Packet(x, x0, sig, k0):\n ci = 0.+1j\n pre = 1./(sig*np.sqrt(2.*np.pi))\n psi_x = pre*np.exp(-0.5*( (x-x0)/sig )**2)*np.exp(ci*k0*x)\n return psi_x\n\n### Given a complex-valued wavefunction (PsiX), list of x-coordinates (x) between 0 and L, \n### and list of quantum numbers, will return a list of complex expansion coefficients\n### to expand PsiX in terms of PIB energy eigenfunctions\ndef FourierAnalysis(x, PsiX, n, L):\n cn = np.zeros(len(n),dtype=complex)\n dx = x[1]-x[0]\n for i in range (0,len(cn)):\n\n som = 0+0j\n psi_i = PIB_Func(x, n[i], L)\n\n for j in range (0, len(x)):\n som = som + psi_i[j]*PsiX[j]*dx\n\n cn[i] = som\n\n return cn\n\n### Give a quantum number n and a length L, return the energy \n### of an electron in a box of length L in state n\ndef PIB_En(n, L):\n En = (n*n * np.pi*np.pi)/(2*L*L)\n return En\n\n### Give the quantum number and the current time, evaluate the time-dependent part of the wavefunction at current time\n### and return its value\ndef PIB_Time(n, L, t):\n E = PIB_En(n, L)\n ci = 0.+1j\n phi_n_t = np.exp(-1*ci*E*t)\n ### Write code here to define phi_n_t\n return phi_n_t\n\n### Given a vector of not-necessarily-normalized complex expansion coefficients\n### return the normalized version\ndef Normalize(c):\n sm=0.\n for i in range(0, len(c)):\n sm=sm+np.conj(c[i])*c[i]\n\n cn = c/sm\n return cn\n\n\n#### Initialize some variables/arrays that will be used by the animate function\nL = 500.\nxt = np.linspace(0, L, 2000)\n\n### set style for plot\nfig = plt.figure()\nax = plt.axes(xlim=(0, L), ylim=(-0.03, 0.03))\nline, = ax.plot([], [], lw=2)\n\n\npsi_exp = np.zeros(len(xt),dtype=complex)\n### Imaginary unit i\nci = 0.+1j\nsig = 15\nk0 = 60.*np.pi/L\nx0 = 200\nPsi = Gauss_Packet(xt, x0, sig, k0)\n\nnt = np.linspace(1,200,200)\ncn = FourierAnalysis(xt, Psi, nt, L)\n\n##for i in range(0,len(cn)):\n## psi_exp = psi_exp + cn[i]*PIB_Func(x, qn[i], L)\n\n\n\n\n# initialization function: plot the background of each frame\ndef init():\n line.set_data([], [])\n return line,\n\n# animation function. This is called sequentially to generate the animation\ndef animate(i):\n \n ### Once PIB_Func and PIB_En are defined, the following\n ### code can be used to plot the time-evolution of an energy eigenfunction\n\n ### Define x-grid - this will be for a particle in a box of length L=30 atomic units (Bohr radii)\n ### We will represent the function with 1000 grid points (dx = 30/1000)\n L = 500.\n x = np.linspace(0, L, 2000)\n\n ### Imaginary unit i\n ci = 0.+1j\n fwhm = 7*np.pi/L\n k0 = 5*np.pi/L\n psi_t = np.zeros(len(x),dtype=complex)\n for j in range(0,len(cn)):\n psi = PIB_Func(x, nt[j], L) \n ft = PIB_Time(nt[j], L, 4*i)\n psi_t = psi_t + cn[j]*psi*ft\n \n psi_t_star = np.conj(psi_t)\n\n y = np.real(psi_t)\n z = np.imag(psi_t)\n p = np.real(psi_t_star * psi_t)\n line.set_data(x, y)\n return line,\n\n\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=10000, interval=20, blit=True)\n### uncomment to save animation as mp4 anim.save('pib_wp.mp4', fps=15, extra_args=['-vcodec', 'libx264'])\nplt.show()\n\n\n#### Static plot\n#plt.plot(x, np.real(Psi), 'b--', x, np.real(psi_exp), 'red', x, P, 'black')\n#plt.show()\n" ]
[ [ "numpy.sin", "matplotlib.animation.FuncAnimation", "numpy.exp", "numpy.real", "matplotlib.pyplot.figure", "numpy.conj", "numpy.sqrt", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.axes", "numpy.imag" ] ]
hepansls/simpletransformers
[ "abbb8d628f341ef47ee920e1d971510d607b1a90" ]
[ "simpletransformers/classification/classification_model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom __future__ import absolute_import, division, print_function\nimport collections\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nfrom dataclasses import asdict\nfrom multiprocessing import cpu_count\nimport tempfile\nfrom pathlib import Path\n\nfrom collections import Counter\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom scipy.stats import mode, pearsonr\nfrom scipy.special import softmax\nfrom sklearn.metrics import (\n confusion_matrix,\n label_ranking_average_precision_score,\n matthews_corrcoef,\n mean_squared_error,\n roc_curve,\n auc,\n average_precision_score,\n)\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm.auto import tqdm, trange\nfrom tqdm.contrib import tenumerate\nfrom transformers.optimization import (\n get_constant_schedule,\n get_constant_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\nfrom transformers.optimization import AdamW, Adafactor\nfrom transformers import (\n AlbertConfig,\n AlbertTokenizer,\n AlbertForSequenceClassification,\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n BertConfig,\n BertTokenizerFast,\n BertForSequenceClassification,\n BertweetTokenizer,\n BigBirdConfig,\n BigBirdTokenizer,\n BigBirdForSequenceClassification,\n CamembertConfig,\n CamembertTokenizerFast,\n CamembertForSequenceClassification,\n DebertaConfig,\n DebertaForSequenceClassification,\n DebertaTokenizer,\n DebertaV2Config,\n DebertaV2ForSequenceClassification,\n DebertaV2Tokenizer,\n DistilBertConfig,\n DistilBertTokenizerFast,\n DistilBertForSequenceClassification,\n ElectraConfig,\n ElectraTokenizerFast,\n ElectraForSequenceClassification,\n FlaubertConfig,\n FlaubertTokenizer,\n FlaubertForSequenceClassification,\n HerbertTokenizerFast,\n LayoutLMConfig,\n LayoutLMTokenizerFast,\n LayoutLMForSequenceClassification,\n LongformerConfig,\n LongformerTokenizerFast,\n LongformerForSequenceClassification,\n MPNetConfig,\n MPNetForSequenceClassification,\n MPNetTokenizerFast,\n MobileBertConfig,\n MobileBertTokenizerFast,\n MobileBertForSequenceClassification,\n RobertaConfig,\n RobertaTokenizerFast,\n RobertaForSequenceClassification,\n SqueezeBertConfig,\n SqueezeBertForSequenceClassification,\n SqueezeBertTokenizerFast,\n WEIGHTS_NAME,\n XLMConfig,\n XLMRobertaConfig,\n XLMRobertaTokenizerFast,\n XLMRobertaForSequenceClassification,\n XLMTokenizer,\n XLMForSequenceClassification,\n XLNetConfig,\n XLNetTokenizerFast,\n XLNetForSequenceClassification,\n)\nfrom transformers.convert_graph_to_onnx import convert, quantize\n\nfrom simpletransformers.classification.classification_utils import (\n InputExample,\n LazyClassificationDataset,\n ClassificationDataset,\n convert_examples_to_features,\n load_hf_dataset,\n flatten_results,\n)\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.config.model_args import ClassificationArgs\nfrom simpletransformers.config.utils import sweep_config_to_sweep_values\nfrom simpletransformers.losses.loss_utils import init_loss\n\n# from simpletransformers.custom_models.models import ElectraForSequenceClassification\n\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\n\nMODELS_WITHOUT_CLASS_WEIGHTS_SUPPORT = [\"squeezebert\", \"deberta\", \"mpnet\"]\n\nMODELS_WITH_EXTRA_SEP_TOKEN = [\n \"roberta\",\n \"camembert\",\n \"xlmroberta\",\n \"longformer\",\n \"mpnet\",\n]\n\nMODELS_WITH_ADD_PREFIX_SPACE = [\n \"roberta\",\n \"camembert\",\n \"xlmroberta\",\n \"longformer\",\n \"mpnet\",\n]\n\nMODELS_WITHOUT_SLIDING_WINDOW_SUPPORT = [\"squeezebert\"]\n\n\nclass ClassificationModel:\n def __init__(\n self,\n model_type,\n model_name,\n tokenizer_type=None,\n tokenizer_name=None,\n num_labels=None,\n weight=None,\n args=None,\n use_cuda=True,\n cuda_device=-1,\n onnx_execution_provider=None,\n **kwargs,\n ):\n\n \"\"\"\n Initializes a ClassificationModel model.\n\n Args:\n model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)\n model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n tokenizer_type: The type of tokenizer (auto, bert, xlnet, xlm, roberta, distilbert, etc.) to use. If a string is passed, Simple Transformers will try to initialize a tokenizer class from the available MODEL_CLASSES.\n Alternatively, a Tokenizer class (subclassed from PreTrainedTokenizer) can be passed.\n tokenizer_name: The name/path to the tokenizer. If the tokenizer_type is not specified, the model_type will be used to determine the type of the tokenizer.\n num_labels (optional): The number of labels or classes in the dataset.\n weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n onnx_execution_provider (optional): ExecutionProvider to use with ONNX Runtime. Will use CUDA (if use_cuda) or CPU (if use_cuda is False) by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"albert\": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),\n \"auto\": (AutoConfig, AutoModelForSequenceClassification, AutoTokenizer),\n \"bert\": (BertConfig, BertForSequenceClassification, BertTokenizerFast),\n \"bertweet\": (\n RobertaConfig,\n RobertaForSequenceClassification,\n BertweetTokenizer,\n ),\n \"bigbird\": (\n BigBirdConfig,\n BigBirdForSequenceClassification,\n BigBirdTokenizer,\n ),\n \"camembert\": (\n CamembertConfig,\n CamembertForSequenceClassification,\n CamembertTokenizerFast,\n ),\n \"deberta\": (\n DebertaConfig,\n DebertaForSequenceClassification,\n DebertaTokenizer,\n ),\n \"debertav2\": (\n DebertaV2Config,\n DebertaV2ForSequenceClassification,\n DebertaV2Tokenizer,\n ),\n \"distilbert\": (\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizerFast,\n ),\n \"electra\": (\n ElectraConfig,\n ElectraForSequenceClassification,\n ElectraTokenizerFast,\n ),\n \"flaubert\": (\n FlaubertConfig,\n FlaubertForSequenceClassification,\n FlaubertTokenizer,\n ),\n \"herbert\": (\n BertConfig,\n BertForSequenceClassification,\n HerbertTokenizerFast,\n ),\n \"layoutlm\": (\n LayoutLMConfig,\n LayoutLMForSequenceClassification,\n LayoutLMTokenizerFast,\n ),\n \"longformer\": (\n LongformerConfig,\n LongformerForSequenceClassification,\n LongformerTokenizerFast,\n ),\n \"mobilebert\": (\n MobileBertConfig,\n MobileBertForSequenceClassification,\n MobileBertTokenizerFast,\n ),\n \"mpnet\": (MPNetConfig, MPNetForSequenceClassification, MPNetTokenizerFast),\n \"roberta\": (\n RobertaConfig,\n RobertaForSequenceClassification,\n RobertaTokenizerFast,\n ),\n \"squeezebert\": (\n SqueezeBertConfig,\n SqueezeBertForSequenceClassification,\n SqueezeBertTokenizerFast,\n ),\n \"xlm\": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n \"xlmroberta\": (\n XLMRobertaConfig,\n XLMRobertaForSequenceClassification,\n XLMRobertaTokenizerFast,\n ),\n \"xlnet\": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizerFast),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, ClassificationArgs):\n self.args = args\n\n if (\n model_type in MODELS_WITHOUT_SLIDING_WINDOW_SUPPORT\n and self.args.sliding_window\n ):\n raise ValueError(\n \"{} does not currently support sliding window\".format(model_type)\n )\n\n if self.args.thread_count:\n torch.set_num_threads(self.args.thread_count)\n\n if \"sweep_config\" in kwargs:\n self.is_sweeping = True\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = sweep_config_to_sweep_values(sweep_config)\n self.args.update_from_dict(sweep_values)\n else:\n self.is_sweeping = False\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if self.args.labels_list:\n if num_labels:\n assert num_labels == len(self.args.labels_list)\n if self.args.labels_map:\n try:\n assert list(self.args.labels_map.keys()) == self.args.labels_list\n except AssertionError:\n assert [\n int(key) for key in list(self.args.labels_map.keys())\n ] == self.args.labels_list\n self.args.labels_map = {\n int(key): value for key, value in self.args.labels_map.items()\n }\n else:\n self.args.labels_map = {\n label: i for i, label in enumerate(self.args.labels_list)\n }\n else:\n len_labels_list = 2 if not num_labels else num_labels\n self.args.labels_list = [i for i in range(len_labels_list)]\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n\n if tokenizer_type is not None:\n if isinstance(tokenizer_type, str):\n _, _, tokenizer_class = MODEL_CLASSES[tokenizer_type]\n else:\n tokenizer_class = tokenizer_type\n\n if num_labels:\n self.config = config_class.from_pretrained(\n model_name, num_labels=num_labels, **self.args.config\n )\n self.num_labels = num_labels\n else:\n self.config = config_class.from_pretrained(model_name, **self.args.config)\n self.num_labels = self.config.num_labels\n\n if model_type in MODELS_WITHOUT_CLASS_WEIGHTS_SUPPORT and weight is not None:\n raise ValueError(\n \"{} does not currently support class weights\".format(model_type)\n )\n else:\n self.weight = weight\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n self.loss_fct = init_loss(\n weight=self.weight, device=self.device, args=self.args\n )\n\n if self.args.onnx:\n from onnxruntime import InferenceSession, SessionOptions\n\n if not onnx_execution_provider:\n onnx_execution_provider = (\n \"CUDAExecutionProvider\" if use_cuda else \"CPUExecutionProvider\"\n )\n\n options = SessionOptions()\n\n if self.args.dynamic_quantize:\n model_path = quantize(Path(os.path.join(model_name, \"onnx_model.onnx\")))\n self.model = InferenceSession(\n model_path.as_posix(), options, providers=[onnx_execution_provider]\n )\n else:\n model_path = os.path.join(model_name, \"onnx_model.onnx\")\n self.model = InferenceSession(\n model_path, options, providers=[onnx_execution_provider]\n )\n else:\n if not self.args.quantized_model:\n self.model = model_class.from_pretrained(\n model_name, config=self.config, **kwargs\n )\n else:\n quantized_weights = torch.load(\n os.path.join(model_name, \"pytorch_model.bin\")\n )\n\n self.model = model_class.from_pretrained(\n None, config=self.config, state_dict=quantized_weights\n )\n\n if self.args.dynamic_quantize:\n self.model = torch.quantization.quantize_dynamic(\n self.model, {torch.nn.Linear}, dtype=torch.qint8\n )\n if self.args.quantized_model:\n self.model.load_state_dict(quantized_weights)\n if self.args.dynamic_quantize:\n self.args.quantized_model = True\n\n self.results = {}\n\n if not use_cuda:\n self.args.fp16 = False\n\n if self.args.fp16:\n try:\n from torch.cuda import amp\n except AttributeError:\n raise AttributeError(\n \"fp16 requires Pytorch >= 1.6. Please update Pytorch or turn off fp16.\"\n )\n\n if tokenizer_name is None:\n tokenizer_name = model_name\n\n if tokenizer_name in [\n \"vinai/bertweet-base\",\n \"vinai/bertweet-covid19-base-cased\",\n \"vinai/bertweet-covid19-base-uncased\",\n ]:\n self.tokenizer = tokenizer_class.from_pretrained(\n tokenizer_name,\n do_lower_case=self.args.do_lower_case,\n normalization=True,\n **kwargs,\n )\n else:\n self.tokenizer = tokenizer_class.from_pretrained(\n tokenizer_name, do_lower_case=self.args.do_lower_case, **kwargs\n )\n\n if self.args.special_tokens_list:\n self.tokenizer.add_tokens(\n self.args.special_tokens_list, special_tokens=True\n )\n self.model.resize_token_embeddings(len(self.tokenizer))\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n self.args.tokenizer_name = tokenizer_name\n self.args.tokenizer_type = tokenizer_type\n\n if model_type in [\"camembert\", \"xlmroberta\"]:\n warnings.warn(\n f\"use_multiprocessing automatically disabled as {model_type}\"\n \" fails when using multiprocessing for feature conversion.\"\n )\n self.args.use_multiprocessing = False\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\n \"wandb_project specified but wandb is not available. Wandb disabled.\"\n )\n self.args.wandb_project = None\n\n def train_model(\n self,\n train_df,\n multi_label=False,\n output_dir=None,\n show_running_loss=True,\n args=None,\n eval_df=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model using 'train_df'\n\n Args:\n train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,\n the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n global_step: Number of global steps trained\n training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update_from_dict(args)\n\n if self.args.silent:\n show_running_loss = False\n\n if self.args.evaluate_during_training and eval_df is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_df is not specified.\"\n \" Pass eval_df to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n if (\n os.path.exists(output_dir)\n and os.listdir(output_dir)\n and not self.args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Set overwrite_output_dir: True to automatically overwrite.\".format(\n output_dir\n )\n )\n self._move_model_to_device()\n\n if self.args.use_hf_datasets:\n if self.args.sliding_window:\n raise ValueError(\n \"HuggingFace Datasets cannot be used with sliding window.\"\n )\n if self.args.model_type == \"layoutlm\":\n raise NotImplementedError(\n \"HuggingFace Datasets support is not implemented for LayoutLM models\"\n )\n train_dataset = load_hf_dataset(\n train_df, self.tokenizer, self.args, multi_label=multi_label\n )\n elif isinstance(train_df, str) and self.args.lazy_loading:\n if self.args.sliding_window:\n raise ValueError(\"Lazy loading cannot be used with sliding window.\")\n if self.args.model_type == \"layoutlm\":\n raise NotImplementedError(\n \"Lazy loading is not implemented for LayoutLM models\"\n )\n train_dataset = LazyClassificationDataset(\n train_df, self.tokenizer, self.args\n )\n else:\n if self.args.lazy_loading:\n raise ValueError(\n \"Input must be given as a path to a file when using lazy loading\"\n )\n if \"text\" in train_df.columns and \"labels\" in train_df.columns:\n if self.args.model_type == \"layoutlm\":\n train_examples = [\n InputExample(i, text, None, label, x0, y0, x1, y1)\n for i, (text, label, x0, y0, x1, y1) in enumerate(\n zip(\n train_df[\"text\"].astype(str),\n train_df[\"labels\"],\n train_df[\"x0\"],\n train_df[\"y0\"],\n train_df[\"x1\"],\n train_df[\"y1\"],\n )\n )\n ]\n else:\n train_examples = (\n train_df[\"text\"].astype(str).tolist(),\n train_df[\"labels\"].tolist(),\n )\n elif \"text_a\" in train_df.columns and \"text_b\" in train_df.columns:\n if self.args.model_type == \"layoutlm\":\n raise ValueError(\"LayoutLM cannot be used with sentence-pair tasks\")\n else:\n train_examples = (\n train_df[\"text_a\"].astype(str).tolist(),\n train_df[\"text_b\"].astype(str).tolist(),\n train_df[\"labels\"].tolist(),\n )\n else:\n warnings.warn(\n \"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels.\"\n )\n train_examples = (\n train_df.iloc[:, 0].astype(str).tolist(),\n train_df.iloc[:, 1].tolist(),\n )\n train_dataset = self.load_and_cache_examples(\n train_examples, verbose=verbose\n )\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=self.args.train_batch_size,\n num_workers=self.args.dataloader_num_workers,\n )\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, training_details = self.train(\n train_dataloader,\n output_dir,\n multi_label=multi_label,\n show_running_loss=show_running_loss,\n eval_df=eval_df,\n verbose=verbose,\n **kwargs,\n )\n\n # model_to_save = self.model.module if hasattr(self.model, \"module\") else self.model\n # model_to_save.save_pretrained(output_dir)\n # self.tokenizer.save_pretrained(output_dir)\n # torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n self.save_model(model=self.model)\n\n if verbose:\n logger.info(\n \" Training of {} model complete. Saved to {}.\".format(\n self.args.model_type, output_dir\n )\n )\n\n return global_step, training_details\n\n def train(\n self,\n train_dataloader,\n output_dir,\n multi_label=False,\n show_running_loss=True,\n eval_df=None,\n test_df=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n model = self.model\n args = self.args\n\n tb_writer = SummaryWriter(log_dir=args.tensorboard_dir)\n\n t_total = (\n len(train_dataloader)\n // args.gradient_accumulation_steps\n * args.num_train_epochs\n )\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n\n optimizer_grouped_parameters = []\n custom_parameter_names = set()\n for group in self.args.custom_parameter_groups:\n params = group.pop(\"params\")\n custom_parameter_names.update(params)\n param_group = {**group}\n param_group[\"params\"] = [\n p for n, p in model.named_parameters() if n in params\n ]\n optimizer_grouped_parameters.append(param_group)\n\n for group in self.args.custom_layer_parameters:\n layer_number = group.pop(\"layer\")\n layer = f\"layer.{layer_number}.\"\n group_d = {**group}\n group_nd = {**group}\n group_nd[\"weight_decay\"] = 0.0\n params_d = []\n params_nd = []\n for n, p in model.named_parameters():\n if n not in custom_parameter_names and layer in n:\n if any(nd in n for nd in no_decay):\n params_nd.append(p)\n else:\n params_d.append(p)\n custom_parameter_names.add(n)\n group_d[\"params\"] = params_d\n group_nd[\"params\"] = params_nd\n\n optimizer_grouped_parameters.append(group_d)\n optimizer_grouped_parameters.append(group_nd)\n\n if not self.args.train_custom_parameters_only:\n optimizer_grouped_parameters.extend(\n [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names\n and not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names\n and any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n )\n\n warmup_steps = math.ceil(t_total * args.warmup_ratio)\n args.warmup_steps = (\n warmup_steps if args.warmup_steps == 0 else args.warmup_steps\n )\n\n if args.optimizer == \"AdamW\":\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=args.adam_epsilon,\n )\n elif args.optimizer == \"Adafactor\":\n optimizer = Adafactor(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=args.adafactor_eps,\n clip_threshold=args.adafactor_clip_threshold,\n decay_rate=args.adafactor_decay_rate,\n beta1=args.adafactor_beta1,\n weight_decay=args.weight_decay,\n scale_parameter=args.adafactor_scale_parameter,\n relative_step=args.adafactor_relative_step,\n warmup_init=args.adafactor_warmup_init,\n )\n print(\"Using Adafactor for T5\")\n else:\n raise ValueError(\n \"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.\".format(\n args.optimizer\n )\n )\n\n if args.scheduler == \"constant_schedule\":\n scheduler = get_constant_schedule(optimizer)\n\n elif args.scheduler == \"constant_schedule_with_warmup\":\n scheduler = get_constant_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps\n )\n\n elif args.scheduler == \"linear_schedule_with_warmup\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n )\n\n elif args.scheduler == \"cosine_schedule_with_warmup\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"cosine_with_hard_restarts_schedule_with_warmup\":\n scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"polynomial_decay_schedule_with_warmup\":\n scheduler = get_polynomial_decay_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n lr_end=args.polynomial_decay_schedule_lr_end,\n power=args.polynomial_decay_schedule_power,\n )\n\n else:\n raise ValueError(\"{} is not a valid scheduler.\".format(args.scheduler))\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n global_step = 0\n training_progress_scores = None\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n int(args.num_train_epochs), desc=\"Epoch\", disable=args.silent, mininterval=0\n )\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n current_loss = \"Initializing\"\n\n if args.model_name and os.path.exists(args.model_name):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name.split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n\n logger.info(\n \" Continuing training from checkpoint, will skip to saved global_step\"\n )\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\n \" Will skip the first %d steps in the current epoch\",\n steps_trained_in_current_epoch,\n )\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args.evaluate_during_training:\n training_progress_scores = self._create_training_progress_scores(\n multi_label, **kwargs\n )\n\n if args.wandb_project:\n if not wandb.setup().settings.sweep_id:\n logger.info(\" Initializing WandB run for training.\")\n wandb.init(\n project=args.wandb_project,\n config={**asdict(args)},\n **args.wandb_kwargs,\n )\n wandb.run._label(repo=\"simpletransformers\")\n self.wandb_run_id = wandb.run.id\n wandb.watch(self.model)\n\n if self.args.fp16:\n from torch.cuda import amp\n\n scaler = amp.GradScaler()\n\n for _ in train_iterator:\n model.train()\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n train_iterator.set_description(\n f\"Epoch {epoch_number + 1} of {args.num_train_epochs}\"\n )\n batch_iterator = tqdm(\n train_dataloader,\n desc=f\"Running Epoch {epoch_number} of {args.num_train_epochs}\",\n disable=args.silent,\n mininterval=0,\n )\n for step, batch in enumerate(batch_iterator):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n inputs = self._get_inputs_dict(batch)\n if self.args.fp16:\n with amp.autocast():\n loss, *_ = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n else:\n loss, *_ = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n\n if args.n_gpu > 1:\n loss = (\n loss.mean()\n ) # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n batch_iterator.set_description(\n f\"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}\"\n )\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if self.args.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if self.args.fp16:\n scaler.unscale_(optimizer)\n if args.optimizer == \"AdamW\":\n torch.nn.utils.clip_grad_norm_(\n model.parameters(), args.max_grad_norm\n )\n\n if self.args.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n tb_writer.add_scalar(\n \"lr\", scheduler.get_last_lr()[0], global_step\n )\n tb_writer.add_scalar(\n \"loss\",\n (tr_loss - logging_loss) / args.logging_steps,\n global_step,\n )\n logging_loss = tr_loss\n if args.wandb_project or self.is_sweeping:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_last_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(\n output_dir, \"checkpoint-{}\".format(global_step)\n )\n\n self.save_model(\n output_dir_current, optimizer, scheduler, model=model\n )\n\n if args.evaluate_during_training and (\n args.evaluate_during_training_steps > 0\n and global_step % args.evaluate_during_training_steps == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, _, _ = self.eval_model(\n eval_df,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n wandb_log=False,\n **kwargs,\n )\n\n output_dir_current = os.path.join(\n output_dir, \"checkpoint-{}\".format(global_step)\n )\n\n if args.save_eval_checkpoints:\n self.save_model(\n output_dir_current,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n\n if test_df is not None:\n test_results, _, _ = self.eval_model(\n test_df,\n verbose=verbose\n and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n wandb_log=False,\n **kwargs,\n )\n for key in test_results:\n training_progress_scores[\"test_\" + key].append(\n test_results[key]\n )\n\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(\n args.output_dir, \"training_progress_scores.csv\"\n ),\n index=False,\n )\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n for key, value in flatten_results(\n self._get_last_metrics(training_progress_scores)\n ).items():\n try:\n tb_writer.add_scalar(key, value, global_step)\n except (NotImplementedError, AssertionError):\n if verbose:\n logger.warning(\n f\"can't log value of type: {type(value)} to tensorboar\"\n )\n tb_writer.flush()\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n if best_eval_metric and args.early_stopping_metric_minimize:\n if (\n best_eval_metric - results[args.early_stopping_metric]\n > args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if (\n early_stopping_counter\n < args.early_stopping_patience\n ):\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if (\n results[args.early_stopping_metric] - best_eval_metric\n > args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if (\n early_stopping_counter\n < args.early_stopping_patience\n ):\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n model.train()\n\n epoch_number += 1\n output_dir_current = os.path.join(\n output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number)\n )\n\n if args.save_model_every_epoch or args.evaluate_during_training:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args.save_model_every_epoch:\n self.save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training and args.evaluate_each_epoch:\n results, _, _ = self.eval_model(\n eval_df,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n wandb_log=False,\n **kwargs,\n )\n\n self.save_model(\n output_dir_current, optimizer, scheduler, results=results\n )\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n if test_df is not None:\n test_results, _, _ = self.eval_model(\n test_df,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n wandb_log=False,\n **kwargs,\n )\n for key in test_results:\n training_progress_scores[\"test_\" + key].append(\n test_results[key]\n )\n\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"),\n index=False,\n )\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n for key, value in flatten_results(\n self._get_last_metrics(training_progress_scores)\n ).items():\n try:\n tb_writer.add_scalar(key, value, global_step)\n except (NotImplementedError, AssertionError):\n if verbose:\n logger.warning(\n f\"can't log value of type: {type(value)} to tensorboar\"\n )\n tb_writer.flush()\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n if best_eval_metric and args.early_stopping_metric_minimize:\n if (\n best_eval_metric - results[args.early_stopping_metric]\n > args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if (\n args.use_early_stopping\n and args.early_stopping_consider_epochs\n ):\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if (\n results[args.early_stopping_metric] - best_eval_metric\n > args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n model=model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if (\n args.use_early_stopping\n and args.early_stopping_consider_epochs\n ):\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n\n def eval_model(\n self,\n eval_df,\n multi_label=False,\n output_dir=None,\n verbose=True,\n silent=False,\n wandb_log=True,\n **kwargs,\n ):\n \"\"\"\n Evaluates the model on eval_df. Saves results to output_dir.\n\n Args:\n eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,\n the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n wandb_log: If True, evaluation results will be logged to wandb.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results.\n model_outputs: List of model outputs for each row in eval_df\n wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n self._move_model_to_device()\n\n result, model_outputs, wrong_preds = self.evaluate(\n eval_df,\n output_dir,\n multi_label=multi_label,\n verbose=verbose,\n silent=silent,\n wandb_log=wandb_log,\n **kwargs,\n )\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return result, model_outputs, wrong_preds\n\n def evaluate(\n self,\n eval_df,\n output_dir,\n multi_label=False,\n prefix=\"\",\n verbose=True,\n silent=False,\n wandb_log=True,\n **kwargs,\n ):\n \"\"\"\n Evaluates the model on eval_df.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n\n model = self.model\n args = self.args\n eval_output_dir = output_dir\n\n results = {}\n if self.args.use_hf_datasets:\n if self.args.sliding_window:\n raise ValueError(\n \"HuggingFace Datasets cannot be used with sliding window.\"\n )\n if self.args.model_type == \"layoutlm\":\n raise NotImplementedError(\n \"HuggingFace Datasets support is not implemented for LayoutLM models\"\n )\n eval_dataset = load_hf_dataset(\n eval_df, self.tokenizer, self.args, multi_label=multi_label\n )\n eval_examples = None\n elif isinstance(eval_df, str) and self.args.lazy_loading:\n if self.args.model_type == \"layoutlm\":\n raise NotImplementedError(\n \"Lazy loading is not implemented for LayoutLM models\"\n )\n eval_dataset = LazyClassificationDataset(eval_df, self.tokenizer, self.args)\n eval_examples = None\n else:\n if self.args.lazy_loading:\n raise ValueError(\n \"Input must be given as a path to a file when using lazy loading\"\n )\n\n if \"text\" in eval_df.columns and \"labels\" in eval_df.columns:\n if self.args.model_type == \"layoutlm\":\n eval_examples = [\n InputExample(i, text, None, label, x0, y0, x1, y1)\n for i, (text, label, x0, y0, x1, y1) in enumerate(\n zip(\n eval_df[\"text\"].astype(str),\n eval_df[\"labels\"],\n eval_df[\"x0\"],\n eval_df[\"y0\"],\n eval_df[\"x1\"],\n eval_df[\"y1\"],\n )\n )\n ]\n else:\n eval_examples = (\n eval_df[\"text\"].astype(str).tolist(),\n eval_df[\"labels\"].tolist(),\n )\n elif \"text_a\" in eval_df.columns and \"text_b\" in eval_df.columns:\n if self.args.model_type == \"layoutlm\":\n raise ValueError(\"LayoutLM cannot be used with sentence-pair tasks\")\n else:\n eval_examples = (\n eval_df[\"text_a\"].astype(str).tolist(),\n eval_df[\"text_b\"].astype(str).tolist(),\n eval_df[\"labels\"].tolist(),\n )\n else:\n warnings.warn(\n \"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels.\"\n )\n eval_examples = (\n eval_df.iloc[:, 0].astype(str).tolist(),\n eval_df.iloc[:, 1].tolist(),\n )\n\n if args.sliding_window:\n eval_dataset, window_counts = self.load_and_cache_examples(\n eval_examples, evaluate=True, verbose=verbose, silent=silent\n )\n else:\n eval_dataset = self.load_and_cache_examples(\n eval_examples, evaluate=True, verbose=verbose, silent=silent\n )\n os.makedirs(eval_output_dir, exist_ok=True)\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size\n )\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n n_batches = len(eval_dataloader)\n preds = np.empty((len(eval_dataset), self.num_labels))\n if multi_label:\n out_label_ids = np.empty((len(eval_dataset), self.num_labels))\n else:\n out_label_ids = np.empty((len(eval_dataset)))\n model.eval()\n\n if self.args.fp16:\n from torch.cuda import amp\n\n for i, batch in enumerate(\n tqdm(\n eval_dataloader,\n disable=args.silent or silent,\n desc=\"Running Evaluation\",\n )\n ):\n # batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n\n if self.args.fp16:\n with amp.autocast():\n outputs = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n tmp_eval_loss, logits = outputs[:2]\n else:\n outputs = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n tmp_eval_loss, logits = outputs[:2]\n\n if multi_label:\n logits = logits.sigmoid()\n if self.args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean()\n eval_loss += tmp_eval_loss.item()\n\n nb_eval_steps += 1\n\n start_index = self.args.eval_batch_size * i\n end_index = (\n start_index + self.args.eval_batch_size\n if i != (n_batches - 1)\n else len(eval_dataset)\n )\n preds[start_index:end_index] = logits.detach().cpu().numpy()\n out_label_ids[start_index:end_index] = (\n inputs[\"labels\"].detach().cpu().numpy()\n )\n\n # if preds is None:\n # preds = logits.detach().cpu().numpy()\n # out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n # else:\n # preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n # out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n\n if args.sliding_window:\n count = 0\n window_ranges = []\n for n_windows in window_counts:\n window_ranges.append([count, count + n_windows])\n count += n_windows\n\n preds = [\n preds[window_range[0] : window_range[1]]\n for window_range in window_ranges\n ]\n out_label_ids = [\n out_label_ids[i]\n for i in range(len(out_label_ids))\n if i in [window[0] for window in window_ranges]\n ]\n\n model_outputs = preds\n\n preds = [np.argmax(pred, axis=1) for pred in preds]\n final_preds = []\n for pred_row in preds:\n val_freqs_desc = Counter(pred_row).most_common()\n if (\n len(val_freqs_desc) > 1\n and val_freqs_desc[0][1] == val_freqs_desc[1][1]\n ):\n final_preds.append(args.tie_value)\n else:\n final_preds.append(val_freqs_desc[0][0])\n preds = np.array(final_preds)\n elif not multi_label and args.regression is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n else:\n model_outputs = preds\n\n if not multi_label:\n preds = np.argmax(preds, axis=1)\n\n result, wrong = self.compute_metrics(\n preds, model_outputs, out_label_ids, eval_examples, **kwargs\n )\n result[\"eval_loss\"] = eval_loss\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n\n if (\n self.args.wandb_project\n and wandb_log\n and not multi_label\n and not self.args.regression\n ):\n if not wandb.setup().settings.sweep_id:\n logger.info(\" Initializing WandB run for evaluation.\")\n wandb.init(\n project=args.wandb_project,\n config={**asdict(args)},\n **args.wandb_kwargs,\n )\n wandb.run._label(repo=\"simpletransformers\")\n if not args.labels_map:\n self.args.labels_map = {i: i for i in range(self.num_labels)}\n\n labels_list = sorted(list(self.args.labels_map.keys()))\n inverse_labels_map = {\n value: key for key, value in self.args.labels_map.items()\n }\n\n truth = [inverse_labels_map[out] for out in out_label_ids]\n\n # Confusion Matrix\n wandb.sklearn.plot_confusion_matrix(\n truth,\n [inverse_labels_map[pred] for pred in preds],\n labels=labels_list,\n )\n\n if not self.args.sliding_window:\n # ROC`\n wandb.log({\"roc\": wandb.plots.ROC(truth, model_outputs, labels_list)})\n\n # Precision Recall\n wandb.log(\n {\n \"pr\": wandb.plots.precision_recall(\n truth, model_outputs, labels_list\n )\n }\n )\n\n return results, model_outputs, wrong\n\n def load_and_cache_examples(\n self,\n examples,\n evaluate=False,\n no_cache=False,\n multi_label=False,\n verbose=True,\n silent=False,\n ):\n \"\"\"\n Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\"\n\n process_count = self.args.process_count\n\n tokenizer = self.tokenizer\n args = self.args\n\n if not no_cache:\n no_cache = args.no_cache\n\n if not multi_label and args.regression:\n output_mode = \"regression\"\n else:\n output_mode = \"classification\"\n\n if not no_cache:\n os.makedirs(self.args.cache_dir, exist_ok=True)\n\n mode = \"dev\" if evaluate else \"train\"\n if args.sliding_window or self.args.model_type == \"layoutlm\":\n cached_features_file = os.path.join(\n args.cache_dir,\n \"cached_{}_{}_{}_{}_{}\".format(\n mode,\n args.model_type,\n args.max_seq_length,\n self.num_labels,\n len(examples),\n ),\n )\n\n if os.path.exists(cached_features_file) and (\n (not args.reprocess_input_data and not no_cache)\n or (mode == \"dev\" and args.use_cached_eval_features and not no_cache)\n ):\n features = torch.load(cached_features_file)\n if verbose:\n logger.info(\n f\" Features loaded from cache at {cached_features_file}\"\n )\n else:\n if verbose:\n logger.info(\" Converting to features started. Cache is not used.\")\n if args.sliding_window:\n logger.info(\" Sliding window enabled\")\n\n if self.args.model_type != \"layoutlm\":\n if len(examples) == 3:\n examples = [\n InputExample(i, text_a, text_b, label)\n for i, (text_a, text_b, label) in enumerate(zip(*examples))\n ]\n else:\n examples = [\n InputExample(i, text_a, None, label)\n for i, (text_a, label) in enumerate(zip(*examples))\n ]\n\n # If labels_map is defined, then labels need to be replaced with ints\n if self.args.labels_map and not self.args.regression:\n for example in examples:\n if multi_label:\n example.label = [\n self.args.labels_map[label] for label in example.label\n ]\n else:\n example.label = self.args.labels_map[example.label]\n\n features = convert_examples_to_features(\n examples,\n args.max_seq_length,\n tokenizer,\n output_mode,\n # XLNet has a CLS token at the end\n cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n sep_token=tokenizer.sep_token,\n # RoBERTa uses an extra separator b/w pairs of sentences,\n # cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n sep_token_extra=args.model_type in MODELS_WITH_EXTRA_SEP_TOKEN,\n # PAD on the left for XLNet\n pad_on_left=bool(args.model_type in [\"xlnet\"]),\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n process_count=process_count,\n multi_label=multi_label,\n silent=args.silent or silent,\n use_multiprocessing=args.use_multiprocessing_for_evaluation,\n sliding_window=args.sliding_window,\n flatten=not evaluate,\n stride=args.stride,\n add_prefix_space=args.model_type in MODELS_WITH_ADD_PREFIX_SPACE,\n # avoid padding in case of single example/online inferencing to decrease execution time\n pad_to_max_length=bool(len(examples) > 1),\n args=args,\n )\n if verbose and args.sliding_window:\n logger.info(\n f\" {len(features)} features created from {len(examples)} samples.\"\n )\n\n if not no_cache:\n torch.save(features, cached_features_file)\n\n if args.sliding_window and evaluate:\n features = [\n [feature_set] if not isinstance(feature_set, list) else feature_set\n for feature_set in features\n ]\n window_counts = [len(sample) for sample in features]\n features = [\n feature for feature_set in features for feature in feature_set\n ]\n\n all_input_ids = torch.tensor(\n [f.input_ids for f in features], dtype=torch.long\n )\n all_input_mask = torch.tensor(\n [f.input_mask for f in features], dtype=torch.long\n )\n all_segment_ids = torch.tensor(\n [f.segment_ids for f in features], dtype=torch.long\n )\n\n if self.args.model_type == \"layoutlm\":\n all_bboxes = torch.tensor(\n [f.bboxes for f in features], dtype=torch.long\n )\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor(\n [f.label_id for f in features], dtype=torch.long\n )\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor(\n [f.label_id for f in features], dtype=torch.float\n )\n\n if self.args.model_type == \"layoutlm\":\n dataset = TensorDataset(\n all_input_ids,\n all_input_mask,\n all_segment_ids,\n all_label_ids,\n all_bboxes,\n )\n else:\n dataset = TensorDataset(\n all_input_ids, all_input_mask, all_segment_ids, all_label_ids\n )\n\n if args.sliding_window and evaluate:\n return dataset, window_counts\n else:\n return dataset\n else:\n dataset = ClassificationDataset(\n examples,\n self.tokenizer,\n self.args,\n mode=mode,\n multi_label=multi_label,\n output_mode=output_mode,\n no_cache=no_cache,\n )\n return dataset\n\n def compute_metrics(\n self,\n preds,\n model_outputs,\n labels,\n eval_examples=None,\n multi_label=False,\n **kwargs,\n ):\n \"\"\"\n Computes the evaluation metrics for the model predictions.\n\n Args:\n preds: Model predictions\n model_outputs: Model outputs\n labels: Ground truth labels\n eval_examples: List of examples on which evaluation was performed\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results.\n For non-binary classification, the dictionary format is: (Matthews correlation coefficient, tp, tn, fp, fn).\n For binary classification, the dictionary format is: (Matthews correlation coefficient, tp, tn, fp, fn, AUROC, AUPRC).\n wrong: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n\n assert len(preds) == len(labels)\n\n extra_metrics = {}\n for metric, func in kwargs.items():\n if metric.startswith(\"prob_\"):\n extra_metrics[metric] = func(labels, model_outputs)\n else:\n extra_metrics[metric] = func(labels, preds)\n\n if multi_label:\n threshold_values = self.args.threshold if self.args.threshold else 0.5\n if isinstance(threshold_values, list):\n mismatched = labels != [\n [\n self._threshold(pred, threshold_values[i])\n for i, pred in enumerate(example)\n ]\n for example in preds\n ]\n else:\n mismatched = labels != [\n [self._threshold(pred, threshold_values) for pred in example]\n for example in preds\n ]\n else:\n mismatched = labels != preds\n\n if eval_examples:\n wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]\n else:\n wrong = [\"NA\"]\n\n if multi_label:\n label_ranking_score = label_ranking_average_precision_score(labels, preds)\n return {**{\"LRAP\": label_ranking_score}, **extra_metrics}, wrong\n elif self.args.regression:\n return {**extra_metrics}, wrong\n\n mcc = matthews_corrcoef(labels, preds)\n if self.model.num_labels == 2:\n tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()\n if self.args.sliding_window:\n return (\n {\n **{\"mcc\": mcc, \"tp\": tp, \"tn\": tn, \"fp\": fp, \"fn\": fn},\n **extra_metrics,\n },\n wrong,\n )\n else:\n scores = np.array([softmax(element)[1] for element in model_outputs])\n fpr, tpr, thresholds = roc_curve(labels, scores)\n auroc = auc(fpr, tpr)\n auprc = average_precision_score(labels, scores)\n return (\n {\n **{\n \"mcc\": mcc,\n \"tp\": tp,\n \"tn\": tn,\n \"fp\": fp,\n \"fn\": fn,\n \"auroc\": auroc,\n \"auprc\": auprc,\n },\n **extra_metrics,\n },\n wrong,\n )\n else:\n return {**{\"mcc\": mcc}, **extra_metrics}, wrong\n\n def predict(self, to_predict, multi_label=False):\n \"\"\"\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python list of text (str) to be sent to the model for prediction.\n\n Returns:\n preds: A python list of the predictions (0 or 1) for each text.\n model_outputs: A python list of the raw model outputs for each text.\n \"\"\"\n\n model = self.model\n args = self.args\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = np.empty((len(to_predict), self.num_labels))\n if multi_label:\n out_label_ids = np.empty((len(to_predict), self.num_labels))\n else:\n out_label_ids = np.empty((len(to_predict)))\n\n if not multi_label and self.args.onnx:\n model_inputs = self.tokenizer.batch_encode_plus(\n to_predict, return_tensors=\"pt\", padding=True, truncation=True\n )\n\n if self.args.model_type in [\"bert\", \"xlnet\", \"albert\", \"layoutlm\"]:\n for i, (input_ids, attention_mask, token_type_ids) in enumerate(\n zip(\n model_inputs[\"input_ids\"],\n model_inputs[\"attention_mask\"],\n model_inputs[\"token_type_ids\"],\n )\n ):\n input_ids = input_ids.unsqueeze(0).detach().cpu().numpy()\n attention_mask = attention_mask.unsqueeze(0).detach().cpu().numpy()\n token_type_ids = token_type_ids.unsqueeze(0).detach().cpu().numpy()\n inputs_onnx = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n # Run the model (None = get all the outputs)\n output = self.model.run(None, inputs_onnx)\n\n preds[i] = output[0]\n\n else:\n for i, (input_ids, attention_mask) in enumerate(\n zip(model_inputs[\"input_ids\"], model_inputs[\"attention_mask\"])\n ):\n input_ids = input_ids.unsqueeze(0).detach().cpu().numpy()\n attention_mask = attention_mask.unsqueeze(0).detach().cpu().numpy()\n inputs_onnx = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n }\n\n # Run the model (None = get all the outputs)\n output = self.model.run(None, inputs_onnx)\n\n preds[i] = output[0]\n\n model_outputs = preds\n preds = np.argmax(preds, axis=1)\n\n else:\n self._move_model_to_device()\n dummy_label = (\n 0\n if not self.args.labels_map\n else next(iter(self.args.labels_map.keys()))\n )\n\n if multi_label:\n dummy_label = [dummy_label for i in range(self.num_labels)]\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if isinstance(to_predict[0], list):\n eval_examples = (\n *zip(*to_predict),\n [dummy_label for i in range(len(to_predict))],\n )\n else:\n eval_examples = (\n to_predict,\n [dummy_label for i in range(len(to_predict))],\n )\n\n if args.sliding_window:\n eval_dataset, window_counts = self.load_and_cache_examples(\n eval_examples, evaluate=True, no_cache=True\n )\n preds = np.empty((len(eval_dataset), self.num_labels))\n if multi_label:\n out_label_ids = np.empty((len(eval_dataset), self.num_labels))\n else:\n out_label_ids = np.empty((len(eval_dataset)))\n else:\n eval_dataset = self.load_and_cache_examples(\n eval_examples, evaluate=True, multi_label=multi_label, no_cache=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size\n )\n\n if self.args.fp16:\n from torch.cuda import amp\n\n if self.config.output_hidden_states:\n model.eval()\n preds = None\n out_label_ids = None\n for i, batch in enumerate(\n tqdm(\n eval_dataloader, disable=args.silent, desc=\"Running Prediction\"\n )\n ):\n # batch = tuple(t.to(self.device) for t in batch)\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch, no_hf=True)\n\n if self.args.fp16:\n with amp.autocast():\n outputs = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n tmp_eval_loss, logits = outputs[:2]\n else:\n outputs = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n tmp_eval_loss, logits = outputs[:2]\n embedding_outputs, layer_hidden_states = (\n outputs[2][0],\n outputs[2][1:],\n )\n\n if multi_label:\n logits = logits.sigmoid()\n\n if self.args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean()\n eval_loss += tmp_eval_loss.item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n all_layer_hidden_states = np.array(\n [\n state.detach().cpu().numpy()\n for state in layer_hidden_states\n ]\n )\n all_embedding_outputs = embedding_outputs.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(\n out_label_ids,\n inputs[\"labels\"].detach().cpu().numpy(),\n axis=0,\n )\n all_layer_hidden_states = np.append(\n all_layer_hidden_states,\n np.array(\n [\n state.detach().cpu().numpy()\n for state in layer_hidden_states\n ]\n ),\n axis=1,\n )\n all_embedding_outputs = np.append(\n all_embedding_outputs,\n embedding_outputs.detach().cpu().numpy(),\n axis=0,\n )\n else:\n n_batches = len(eval_dataloader)\n for i, batch in enumerate(tqdm(eval_dataloader, disable=args.silent)):\n model.eval()\n # batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch, no_hf=True)\n\n if self.args.fp16:\n with amp.autocast():\n outputs = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n tmp_eval_loss, logits = outputs[:2]\n else:\n outputs = self._calculate_loss(\n model,\n inputs,\n loss_fct=self.loss_fct,\n num_labels=self.num_labels,\n args=self.args,\n )\n tmp_eval_loss, logits = outputs[:2]\n\n if multi_label:\n logits = logits.sigmoid()\n\n if self.args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean()\n eval_loss += tmp_eval_loss.item()\n\n nb_eval_steps += 1\n\n start_index = self.args.eval_batch_size * i\n end_index = (\n start_index + self.args.eval_batch_size\n if i != (n_batches - 1)\n else len(eval_dataset)\n )\n preds[start_index:end_index] = logits.detach().cpu().numpy()\n out_label_ids[start_index:end_index] = (\n inputs[\"labels\"].detach().cpu().numpy()\n )\n\n eval_loss = eval_loss / nb_eval_steps\n\n if args.sliding_window:\n count = 0\n window_ranges = []\n for n_windows in window_counts:\n window_ranges.append([count, count + n_windows])\n count += n_windows\n\n preds = [\n preds[window_range[0] : window_range[1]]\n for window_range in window_ranges\n ]\n\n model_outputs = preds\n\n preds = [np.argmax(pred, axis=1) for pred in preds]\n final_preds = []\n for pred_row in preds:\n mode_pred, counts = mode(pred_row)\n if len(counts) > 1 and counts[0] == counts[1]:\n final_preds.append(args.tie_value)\n else:\n final_preds.append(mode_pred[0])\n preds = np.array(final_preds)\n elif not multi_label and args.regression is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n else:\n model_outputs = preds\n if multi_label:\n if isinstance(args.threshold, list):\n threshold_values = args.threshold\n preds = [\n [\n self._threshold(pred, threshold_values[i])\n for i, pred in enumerate(example)\n ]\n for example in preds\n ]\n else:\n preds = [\n [self._threshold(pred, args.threshold) for pred in example]\n for example in preds\n ]\n else:\n preds = np.argmax(preds, axis=1)\n\n if self.args.labels_map and not self.args.regression:\n inverse_labels_map = {\n value: key for key, value in self.args.labels_map.items()\n }\n preds = [inverse_labels_map[pred] for pred in preds]\n\n if self.config.output_hidden_states:\n return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states\n else:\n return preds, model_outputs\n\n def convert_to_onnx(self, output_dir=None, set_onnx_arg=True):\n \"\"\"Convert the model to ONNX format and save to output_dir\n\n Args:\n output_dir (str, optional): If specified, ONNX model will be saved to output_dir (else args.output_dir will be used). Defaults to None.\n set_onnx_arg (bool, optional): Updates the model args to set onnx=True. Defaults to True.\n \"\"\" # noqa\n if not output_dir:\n output_dir = os.path.join(self.args.output_dir, \"onnx\")\n os.makedirs(output_dir, exist_ok=True)\n\n if os.listdir(output_dir):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Output directory for onnx conversion must be empty.\".format(\n output_dir\n )\n )\n\n onnx_model_name = os.path.join(output_dir, \"onnx_model.onnx\")\n\n with tempfile.TemporaryDirectory() as temp_dir:\n self.save_model(output_dir=temp_dir, model=self.model)\n\n convert(\n framework=\"pt\",\n model=temp_dir,\n tokenizer=self.tokenizer,\n output=Path(onnx_model_name),\n pipeline_name=\"sentiment-analysis\",\n opset=11,\n )\n\n self.args.onnx = True\n self.tokenizer.save_pretrained(output_dir)\n self.config.save_pretrained(output_dir)\n self.save_model_args(output_dir)\n\n def _calculate_loss(self, model, inputs, loss_fct, num_labels, args):\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n if loss_fct:\n logits = outputs[1]\n labels = inputs[\"labels\"]\n\n loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))\n return (loss, *outputs[1:])\n\n def _threshold(self, x, threshold):\n if x >= threshold:\n return 1\n return 0\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_inputs_dict(self, batch, no_hf=False):\n if self.args.use_hf_datasets and not no_hf:\n return {key: value.to(self.device) for key, value in batch.items()}\n if isinstance(batch[0], dict):\n inputs = {\n key: value.squeeze(1).to(self.device) for key, value in batch[0].items()\n }\n inputs[\"labels\"] = batch[1].to(self.device)\n else:\n batch = tuple(t.to(self.device) for t in batch)\n\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3],\n }\n\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n if self.args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2]\n if self.args.model_type in [\"bert\", \"xlnet\", \"albert\", \"layoutlm\"]\n else None\n )\n\n if self.args.model_type == \"layoutlm\":\n inputs[\"bbox\"] = batch[4]\n\n return inputs\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _create_training_progress_scores(self, multi_label, **kwargs):\n return collections.defaultdict(list)\n \"\"\"extra_metrics = {key: [] for key in kwargs}\n if multi_label:\n training_progress_scores = {\n \"global_step\": [],\n \"LRAP\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n if self.model.num_labels == 2:\n if self.args.sliding_window:\n training_progress_scores = {\n \"global_step\": [],\n \"tp\": [],\n \"tn\": [],\n \"fp\": [],\n \"fn\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n training_progress_scores = {\n \"global_step\": [],\n \"tp\": [],\n \"tn\": [],\n \"fp\": [],\n \"fn\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n \"auroc\": [],\n \"auprc\": [],\n **extra_metrics,\n }\n elif self.model.num_labels == 1:\n training_progress_scores = {\n \"global_step\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n training_progress_scores = {\n \"global_step\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\"\"\"\n\n def save_model(\n self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None\n ):\n if not output_dir:\n output_dir = self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n\n if model and not self.args.no_save:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n self.tokenizer.save_pretrained(output_dir)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler and self.args.save_optimizer_and_scheduler:\n torch.save(\n optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\")\n )\n torch.save(\n scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\")\n )\n self.save_model_args(output_dir)\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = ClassificationArgs()\n args.load(input_dir)\n return args\n\n def get_named_parameters(self):\n return [n for n, p in self.model.named_parameters()]\n" ]
[ [ "scipy.stats.mode", "sklearn.metrics.confusion_matrix", "torch.utils.data.RandomSampler", "torch.cuda.amp.autocast", "torch.cuda.is_available", "sklearn.metrics.average_precision_score", "torch.load", "torch.nn.DataParallel", "scipy.special.softmax", "pandas.DataFrame", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "numpy.argmax", "torch.utils.tensorboard.SummaryWriter", "torch.set_num_threads", "torch.device", "numpy.array", "torch.cuda.manual_seed_all", "sklearn.metrics.matthews_corrcoef", "torch.save", "torch.utils.data.SequentialSampler", "torch.cuda.amp.GradScaler", "numpy.squeeze", "torch.utils.data.TensorDataset", "torch.quantization.quantize_dynamic", "numpy.random.seed", "torch.no_grad", "sklearn.metrics.auc", "sklearn.metrics.label_ranking_average_precision_score", "sklearn.metrics.roc_curve" ] ]
THUYimingLi/Semi-supervised_Robust_Training
[ "17a6d6fbb4ff3bc4951c1506981dbb6f87f1c26a" ]
[ "spatial_exps/mnist_eval.py" ]
[ "\"\"\"\nEvaluation of a given checkpoint in the standard and adversarial sense. Can be\ncalled as an infinite loop going through the checkpoints in the model directory\nas they appear and evaluating them. Accuracy and average loss are printed and\nadded as tensorboard summaries.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport json\nimport math\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import trange\n\nimport utils.mnist_input as mnist_input\nimport models.resnet as resnet\nfrom attacks.spatial_attack import SpatialAttack\nimport utils.utilities as utilities\nimport models.small_cnn as small_cnn\n\n# A function for evaluating a single checkpoint\ndef evaluate(model, attack, sess, config, summary_writer=None):\n num_eval_examples = config.eval.num_eval_examples\n # num_eval_examples = config.eval.batch_size\n eval_batch_size = config.eval.batch_size\n data_path = config.data.data_path\n\n model_dir = config.model.output_dir\n # Setting up the Tensorboard and checkpoint outputs\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n cifar = mnist_input.MNISTData(data_path)\n global_step = tf.train.get_or_create_global_step()\n # Iterate over the samples batch-by-batch\n num_batches = int(math.ceil(num_eval_examples / eval_batch_size))\n\n total_xent_nat = 0.\n total_xent_adv = 0.\n total_corr_nat = 0\n total_corr_adv = 0\n\n for ibatch in trange(num_batches):\n bstart = ibatch * eval_batch_size\n bend = min(bstart + eval_batch_size, num_eval_examples)\n\n x_batch = cifar.eval_data.xs[bstart:bend, :]\n y_batch = cifar.eval_data.ys[bstart:bend]\n\n noop_trans = np.zeros([len(x_batch), 3])\n if config.eval.adversarial_eval:\n x_batch_adv, adv_trans = attack.perturb(x_batch, y_batch, sess)\n else:\n x_batch_adv, adv_trans = x_batch, noop_trans\n\n if config.eval.st_adv:\n dict_nat = {model.x_input: x_batch,\n model.y_input: y_batch,\n model.transform: noop_trans,\n model.weights: [1. for i in range(len(x_batch))],\n model.is_training: False,\n model.flows: np.zeros((len(x_batch), 2, 32, 32))}\n\n dict_adv = {model.x_input: x_batch_adv,\n model.y_input: y_batch,\n model.transform: adv_trans,\n model.weights: [1. for i in range(len(x_batch_adv))],\n model.is_training: False,\n model.flows: np.zeros((len(x_batch), 2, 32, 32))}\n else:\n dict_nat = {model.x_input: x_batch,\n model.y_input: y_batch,\n model.transform: noop_trans,\n model.weights: [1. for i in range(len(x_batch))],\n model.is_training: False}\n\n dict_adv = {model.x_input: x_batch_adv,\n model.y_input: y_batch,\n model.transform: adv_trans,\n model.weights: [1. for i in range(len(x_batch_adv))],\n model.is_training: False}\n\n cur_corr_nat, cur_xent_nat = sess.run([model.num_correct, model.xent],\n feed_dict = dict_nat)\n cur_corr_adv, cur_xent_adv = sess.run([model.num_correct, model.xent],\n feed_dict = dict_adv)\n\n total_xent_nat += cur_xent_nat\n total_xent_adv += cur_xent_adv\n total_corr_nat += cur_corr_nat\n total_corr_adv += cur_corr_adv\n\n avg_xent_nat = total_xent_nat / num_eval_examples\n avg_xent_adv = total_xent_adv / num_eval_examples\n acc_nat = total_corr_nat / num_eval_examples\n acc_adv = total_corr_adv / num_eval_examples\n\n if summary_writer:\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='xent_adv_eval', simple_value= avg_xent_adv),\n tf.Summary.Value(tag='xent_nat_eval', simple_value= avg_xent_nat),\n tf.Summary.Value(tag='xent_adv', simple_value= avg_xent_adv),\n tf.Summary.Value(tag='xent_nat', simple_value= avg_xent_nat),\n tf.Summary.Value(tag='accuracy_adv_eval', simple_value= acc_adv),\n tf.Summary.Value(tag='accuracy_nat_eval', simple_value= acc_nat),\n tf.Summary.Value(tag='accuracy_adv', simple_value= acc_adv),\n tf.Summary.Value(tag='accuracy_nat', simple_value= acc_nat)])\n summary_writer.add_summary(summary, global_step.eval(sess))\n\n step = global_step.eval(sess)\n print('Eval at step: {}'.format(step))\n print(' natural: {:.2f}%'.format(100 * acc_nat))\n print(' adversarial: {:.2f}%'.format(100 * acc_adv))\n print(' avg nat xent: {:.4f}'.format(avg_xent_nat))\n print(' avg adv xent: {:.4f}'.format(avg_xent_adv))\n\n result = {'nat': '{:.2f}%'.format(100 * acc_nat),\n 'adv': '{:.2f}%'.format(100 * acc_adv)}\n with open('job_result.json', 'w') as result_file:\n json.dump(result, result_file, sort_keys=True, indent=4)\n\n\n\ndef loop(model, attack, config, summary_writer=None):\n\n last_checkpoint_filename = ''\n already_seen_state = False\n model_dir = config.model.output_dir\n saver = tf.train.Saver()\n\n while True:\n cur_checkpoint = tf.train.latest_checkpoint(model_dir)\n\n # Case 1: No checkpoint yet\n if cur_checkpoint is None:\n if not already_seen_state:\n print('No checkpoint yet, waiting ...', end='')\n already_seen_state = True\n else:\n print('.', end='')\n sys.stdout.flush()\n time.sleep(10)\n # Case 2: Previously unseen checkpoint\n elif cur_checkpoint != last_checkpoint_filename:\n print('\\nCheckpoint {}, evaluating ... ({})'.format(cur_checkpoint,\n datetime.now()))\n sys.stdout.flush()\n last_checkpoint_filename = cur_checkpoint\n already_seen_state = False\n with tf.Session() as sess:\n # Restore the checkpoint\n saver.restore(sess, cur_checkpoint)\n evaluate(model, attack, sess, config, summary_writer)\n # Case 3: Previously evaluated checkpoint\n else:\n if not already_seen_state:\n print('Waiting for the next checkpoint ... ({}) '.format(\n datetime.now()),\n end='')\n already_seen_state = True\n else:\n print('.', end='')\n sys.stdout.flush()\n time.sleep(10)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Eval script options',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-c', '--config', type=str,\n help='path to config file',\n default=\"config.json\", required=False)\n parser.add_argument('--loop', help='continuously monitor model_dir'\n 'evaluating new ckpt', \n action=\"store_true\")\n args = parser.parse_args()\n\n config_dict = utilities.get_config(args.config)\n config = utilities.config_to_namedtuple(config_dict)\n\n model = small_cnn.Model(config.model,config.eval.st_adv)\n model_dir = config.model.output_dir\n\n global_step = tf.train.get_or_create_global_step()\n if config.eval.st_adv:\n attack = StadvAttack(model, config)\n else:\n attack = SpatialAttack(model, config.attack)\n\n if args.loop:\n eval_dir = os.path.join(model_dir, 'eval')\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n summary_writer = tf.summary.FileWriter(eval_dir)\n\n loop(model, attack, config, summary_writer)\n else:\n saver = tf.train.Saver()\n\n cur_checkpoint = tf.train.latest_checkpoint(model_dir)\n if cur_checkpoint is None:\n print('No checkpoint found.')\n else:\n with tf.Session() as sess:\n # Restore the checkpoint\n attack.limits = config.attack.spatial_limits; t1, t2, r = attack.limits; attack.granularity = config.attack.grid_granularity; gt1, gt2, gr = attack.granularity\n print('Evaluating checkpoint {}'.format(cur_checkpoint))\n saver.restore(sess, cur_checkpoint)\n print('############## Evaluating RAND ##############')\n evaluate(model, attack, sess, config)\n print('############## Evaluating GRID ##############')\n attack.method = 'grid'\n evaluate(model, attack, sess, config)\n print('############## Evaluating RAND.T ##############')\n attack.method = 'random'; attack.limits = np.array([t1, t2, 0]); attack.granularity = np.array([gt1, gt2, 1])\n evaluate(model, attack, sess, config)\n print('############## Evaluating GRID.T ##############')\n attack.method = 'grid'\n evaluate(model, attack, sess,config)\n print('############## Evaluating RAND.R ##############')\n attack.method = 'random'; attack.limits = np.array([0, 0, r]); attack.granularity = np.array([1, 1, gr])\n evaluate(model, attack, sess,config)\n print('############## Evaluating GRID.R ##############')\n attack.method = 'grid'; \n evaluate(model, attack, sess,config)\n" ]
[ [ "numpy.array", "tensorflow.train.latest_checkpoint", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.Summary.Value", "tensorflow.train.get_or_create_global_step", "tensorflow.summary.FileWriter" ] ]
Leyan529/InstanceSegmentation-
[ "1017bf40d28598aecb69b26a72b86dacbd113f2d" ]
[ "nets/resnet.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):\n super().__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = norm_layer(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = norm_layer(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = norm_layer(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n \n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, layers, block=Bottleneck, norm_layer=nn.BatchNorm2d):\n super().__init__()\n self.channels = []\n self.inplanes = 64\n self.norm_layer = norm_layer\n\n # 544, 544, 3 -> 272, 272, 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(64)\n self.relu = nn.ReLU(inplace=True)\n # 272, 272, 64 -> 136, 136, 64\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layers = nn.ModuleList()\n # 136, 136, 64 -> 136, 136, 256\n self._make_layer(block, 64, layers[0])\n # 136, 136, 256 -> 68, 68, 512\n self._make_layer(block, 128, layers[1], stride=2)\n # 68, 68, 512 -> 34, 34, 1024\n self._make_layer(block, 256, layers[2], stride=2)\n # 34, 34, 1024 -> 17, 17, 2048\n self._make_layer(block, 512, layers[3], stride=2)\n\n self.backbone_modules = [m for m in self.modules() if isinstance(m, nn.Conv2d)]\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n self.norm_layer(planes * block.expansion)\n )\n\n layers = [block(self.inplanes, planes, stride, downsample, self.norm_layer)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, norm_layer=self.norm_layer))\n layer = nn.Sequential(*layers)\n\n self.channels.append(planes * block.expansion)\n self.layers.append(layer)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n x = layer(x)\n outs.append(x)\n\n return tuple(outs)[-3:]\n\n def init_backbone(self, path):\n state_dict = torch.load(path)\n self.load_state_dict(state_dict, strict=True)\n" ]
[ [ "torch.nn.ModuleList", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.load" ] ]
TranNhiem/solo-learn
[ "7539732b68d153087d09a26a23e1edfdc49bc086" ]
[ "solo/utils/io.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport json\nimport logging\nimport os\nimport pickle\nimport re\nimport time\nfrom urllib.parse import urlparse\n\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom iopath.common.download import download\nfrom iopath.common.file_io import g_pathmgr, file_lock\nfrom solo.utils.slurm import get_slurm_dir\n\n\ndef cache_url(url: str, cache_dir: str) -> str:\n \"\"\"\n This implementation downloads the remote resource and caches it locally.\n The resource will only be downloaded if not previously requested.\n \"\"\"\n parsed_url = urlparse(url)\n dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip(\"/\")))\n makedir(dirname)\n filename = url.split(\"/\")[-1]\n cached = os.path.join(dirname, filename)\n with file_lock(cached):\n if not os.path.isfile(cached):\n logging.info(f\"Downloading {url} to {cached} ...\")\n cached = download(url, dirname, filename=filename)\n logging.info(f\"URL {url} cached in {cached}\")\n return cached\n\n\n# TODO (prigoyal): convert this into RAII-style API\ndef create_file_symlink(file1, file2):\n \"\"\"\n Simply create the symlinks for a given file1 to file2.\n Useful during model checkpointing to symlinks to the\n latest successful checkpoint.\n \"\"\"\n try:\n if g_pathmgr.exists(file2):\n g_pathmgr.rm(file2)\n g_pathmgr.symlink(file1, file2)\n except Exception as e:\n logging.info(f\"Could NOT create symlink. Error: {e}\")\n\n\ndef save_file(data, filename, append_to_json=True, verbose=True):\n \"\"\"\n Common i/o utility to handle saving data to various file formats.\n Supported:\n .pkl, .pickle, .npy, .json\n Specifically for .json, users have the option to either append (default)\n or rewrite by passing in Boolean value to append_to_json.\n \"\"\"\n if verbose:\n logging.info(f\"Saving data to file: {filename}\")\n file_ext = os.path.splitext(filename)[1]\n if file_ext in [\".pkl\", \".pickle\"]:\n with g_pathmgr.open(filename, \"wb\") as fopen:\n pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)\n elif file_ext == \".npy\":\n with g_pathmgr.open(filename, \"wb\") as fopen:\n np.save(fopen, data)\n elif file_ext == \".json\":\n if append_to_json:\n with g_pathmgr.open(filename, \"a\") as fopen:\n fopen.write(json.dumps(data, sort_keys=True) + \"\\n\")\n fopen.flush()\n else:\n with g_pathmgr.open(filename, \"w\") as fopen:\n fopen.write(json.dumps(data, sort_keys=True) + \"\\n\")\n fopen.flush()\n elif file_ext == \".yaml\":\n with g_pathmgr.open(filename, \"w\") as fopen:\n dump = yaml.dump(data)\n fopen.write(dump)\n fopen.flush()\n else:\n raise Exception(f\"Saving {file_ext} is not supported yet\")\n\n if verbose:\n logging.info(f\"Saved data to file: {filename}\")\n\n\ndef load_file(filename, mmap_mode=None):\n \"\"\"\n Common i/o utility to handle loading data from various file formats.\n Supported:\n .pkl, .pickle, .npy, .json\n For the npy files, we support reading the files in mmap_mode.\n If the mmap_mode of reading is not successful, we load data without the\n mmap_mode.\n \"\"\"\n logging.info(f\"Loading data from file: {filename}\")\n file_ext = os.path.splitext(filename)[1]\n if file_ext == \".txt\":\n with g_pathmgr.open(filename, \"r\") as fopen:\n data = fopen.readlines()\n elif file_ext in [\".pkl\", \".pickle\"]:\n with g_pathmgr.open(filename, \"rb\") as fopen:\n data = pickle.load(fopen, encoding=\"latin1\")\n elif file_ext == \".npy\":\n if mmap_mode:\n try:\n with g_pathmgr.open(filename, \"rb\") as fopen:\n data = np.load(fopen, encoding=\"latin1\", mmap_mode=mmap_mode)\n except ValueError as e:\n logging.info(\n f\"Could not mmap {filename}: {e}. Trying without g_pathmgr\"\n )\n data = np.load(filename, encoding=\"latin1\", mmap_mode=mmap_mode)\n logging.info(\"Successfully loaded without g_pathmgr\")\n except Exception:\n logging.info(\"Could not mmap without g_pathmgr. Trying without mmap\")\n with g_pathmgr.open(filename, \"rb\") as fopen:\n data = np.load(fopen, encoding=\"latin1\")\n else:\n with g_pathmgr.open(filename, \"rb\") as fopen:\n data = np.load(fopen, encoding=\"latin1\")\n elif file_ext == \".json\":\n with g_pathmgr.open(filename, \"r\") as fopen:\n data = json.load(fopen)\n elif file_ext == \".yaml\":\n with g_pathmgr.open(filename, \"r\") as fopen:\n data = yaml.load(fopen, Loader=yaml.FullLoader)\n elif file_ext == \".csv\":\n with g_pathmgr.open(filename, \"r\") as fopen:\n data = pd.read_csv(fopen)\n else:\n raise Exception(f\"Reading from {file_ext} is not supported yet\")\n return data\n\n\ndef abspath(resource_path: str):\n \"\"\"\n Make a path absolute, but take into account prefixes like\n \"http://\" or \"manifold://\"\n \"\"\"\n regex = re.compile(r\"^\\w+://\")\n if regex.match(resource_path) is None:\n return os.path.abspath(resource_path)\n else:\n return resource_path\n\n\ndef makedir(dir_path):\n \"\"\"\n Create the directory if it does not exist.\n \"\"\"\n is_success = False\n try:\n if not g_pathmgr.exists(dir_path):\n g_pathmgr.mkdirs(dir_path)\n is_success = True\n except BaseException:\n logging.info(f\"Error creating directory: {dir_path}\")\n return is_success\n\n\ndef is_url(input_url):\n \"\"\"\n Check if an input string is a url. look for http(s):// and ignoring the case\n \"\"\"\n is_url = re.match(r\"^(?:http)s?://\", input_url, re.IGNORECASE) is not None\n return is_url\n\n\ndef cleanup_dir(dir):\n \"\"\"\n Utility for deleting a directory. Useful for cleaning the storage space\n that contains various training artifacts like checkpoints, data etc.\n \"\"\"\n if g_pathmgr.exists(dir):\n logging.info(f\"Deleting directory: {dir}\")\n os.system(f\"rm -rf {dir}\")\n logging.info(f\"Deleted contents of directory: {dir}\")\n\n\ndef get_file_size(filename):\n \"\"\"\n Given a file, get the size of file in MB\n \"\"\"\n size_in_mb = os.path.getsize(filename) / float(1024 ** 2)\n return size_in_mb\n\n\ndef copy_file(input_file, destination_dir, tmp_destination_dir):\n \"\"\"\n Copy a given input_file from source to the destination directory.\n\n Steps:\n 1. We use g_pathmgr to extract the data to local path.\n 2. we simply move the files from the g_pathmgr cached local directory\n to the user specified destination directory. We use rsync.\n How destination dir is chosen:\n a) If user is using slurm, we set destination_dir = slurm_dir (see get_slurm_dir)\n b) If the local path used by PathManafer is same as the input_file path,\n and the destination directory is not specified, we set\n destination_dir = tmp_destination_dir\n\n Returns:\n output_file (str): the new path of the file\n destination_dir (str): the destination dir that was actually used\n \"\"\"\n # we first extract the local path for the files. g_pathmgr\n # determines the local path itself and copies data there.\n logging.info(f\"Copying {input_file} to local path...\")\n out = g_pathmgr.get_local_path(input_file)\n output_dir = os.path.dirname(out)\n logging.info(f\"File coped to: {out}\")\n\n if (out == input_file) and not destination_dir:\n destination_dir = tmp_destination_dir\n logging.info(\n f\"The file wasn't copied. Copying again to temp \"\n f\"destination directory: {destination_dir}\"\n )\n # if the user wants to copy the files to a specific location,\n # we simply move the files from the g_pathmgr cached directory\n # to the user specified directory.\n destination_dir = get_slurm_dir(destination_dir)\n if \"SLURM_JOBID\" in os.environ:\n destination_dir = get_slurm_dir(destination_dir)\n if destination_dir is not None:\n makedir(destination_dir)\n output_file = f\"{destination_dir}/{os.path.basename(input_file)}\"\n if g_pathmgr.exists(output_file):\n logging.info(f\"File already copied: {output_file}\")\n return output_file, destination_dir\n\n logging.info(f\"Copying file: {input_file} to destination: {destination_dir}\")\n stime = time.perf_counter()\n os.system(f\"rsync -a --progress {out} {destination_dir}\")\n etime = time.perf_counter()\n logging.info(\n f\"Copied file | time (sec): {round(etime - stime, 4)} \"\n f\"size: {get_file_size(output_file)}\"\n )\n return output_file, destination_dir\n else:\n return out, output_dir\n\n\ndef copy_dir(input_dir, destination_dir, num_threads):\n \"\"\"\n Copy contents of one directory to the specified destination directory\n using the number of threads to speed up the copy. When the data is\n copied successfully, we create a copy_complete file in the\n destination_dir folder to mark the completion. If the destination_dir\n folder already exists and has the copy_complete file, we don't\n copy the file.\n\n useful for copying datasets like ImageNet to speed up dataloader.\n Using 20 threads for imagenet takes about 20 minutes to copy.\n\n Returns:\n destination_dir (str): directory where the contents were copied\n \"\"\"\n # remove the backslash if user added it\n data_name = input_dir.strip(\"/\").split(\"/\")[-1]\n if \"SLURM_JOBID\" in os.environ:\n destination_dir = get_slurm_dir(destination_dir)\n destination_dir = f\"{destination_dir}/{data_name}\"\n makedir(destination_dir)\n complete_flag = f\"{destination_dir}/copy_complete\"\n if g_pathmgr.isfile(complete_flag):\n logging.info(f\"Found Data already copied: {destination_dir}...\")\n return destination_dir\n logging.info(\n f\"Copying {input_dir} to dir {destination_dir} using {num_threads} threads\"\n )\n # We have to do multi-threaded rsync to speed up copy.\n cmd = (\n f\"ls -d {input_dir}/* | parallel -j {num_threads} --will-cite \"\n f\"rsync -ruW --inplace {{}} {destination_dir}\"\n )\n os.system(cmd)\n g_pathmgr.open(complete_flag, \"a\").close()\n logging.info(\"Copied to local directory\")\n return destination_dir, destination_dir\n\n\ndef copy_data(input_file, destination_dir, num_threads, tmp_destination_dir):\n \"\"\"\n Copy data from one source to the other using num_threads. The data to copy\n can be a single file or a directory. We check what type of data and\n call the relevant functions.\n\n Returns:\n output_file (str): the new path of the data (could be file or dir)\n destination_dir (str): the destination dir that was actually used\n \"\"\"\n # return whatever the input is: whether \"\", None or anything else.\n logging.info(f\"Creating directory: {destination_dir}\")\n if not (destination_dir is None or destination_dir == \"\"):\n makedir(destination_dir)\n else:\n destination_dir = None\n if g_pathmgr.isfile(input_file):\n output_file, output_dir = copy_file(\n input_file, destination_dir, tmp_destination_dir\n )\n elif g_pathmgr.isdir(input_file):\n output_file, output_dir = copy_dir(input_file, destination_dir, num_threads)\n else:\n raise RuntimeError(\"The input_file is neither a file nor a directory\")\n return output_file, output_dir\n\n\ndef copy_data_to_local(\n input_files, destination_dir, num_threads=40, tmp_destination_dir=None\n):\n \"\"\"\n Iteratively copy the list of data to a destination directory.\n Each data to copy could be a single file or a directory.\n\n Returns:\n output_file (str): the new path of the file. If there were\n no files to copy, simply return the input_files\n destination_dir (str): the destination dir that was actually used\n \"\"\"\n # it might be possible that we don't use the labels and hence don't have\n # label files. In that case, we return the input_files itself as we have\n # nothing to copy.\n if len(input_files) > 0:\n output_files = []\n for item in input_files:\n if isinstance(item, list):\n copied_file, output_dir = copy_data_to_local(\n item, destination_dir, num_threads, tmp_destination_dir\n )\n else:\n copied_file, output_dir = copy_data(\n item, destination_dir, num_threads, tmp_destination_dir\n )\n output_files.append(copied_file)\n return output_files, output_dir\n return input_files, destination_dir\n" ]
[ [ "pandas.read_csv", "numpy.load", "numpy.save" ] ]
e-koch/pyuvdata
[ "ac36067f195c75127b28f02479eda1eb7a3400ed" ]
[ "pyuvdata/uvbeam/cst_beam.py" ]
[ "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\"\"\"Class for reading beam CST files.\"\"\"\nimport os\nimport re\nimport warnings\n\nimport numpy as np\n\nfrom .uvbeam import UVBeam\nfrom .. import utils as uvutils\n\n__all__ = [\"CSTBeam\"]\n\n\nclass CSTBeam(UVBeam):\n \"\"\"\n Defines a CST-specific subclass of UVBeam for reading CST text files.\n\n This class should not be interacted with directly, instead use the\n read_cst_beam method on the UVBeam class.\n\n \"\"\"\n\n def name2freq(self, fname):\n \"\"\"\n Extract frequency from the filename.\n\n Assumes the file name contains a substring with the frequency channel\n in MHz that the data represents.\n e.g. \"HERA_Sim_120.87MHz.txt\" should yield 120.87e6\n\n Parameters\n ----------\n fname : str\n Filename to parse.\n\n Returns\n -------\n float\n Frequency extracted from filename in Hz.\n \"\"\"\n fi = fname.rfind(\"Hz\")\n frequency = float(re.findall(r\"\\d*\\.\\d+|\\d+\", fname[:fi])[-1])\n\n si_prefix = fname[fi - 1]\n si_dict = {\"k\": 1e3, \"M\": 1e6, \"G\": 1e9}\n if si_prefix in si_dict.keys():\n frequency = frequency * si_dict[si_prefix]\n\n return frequency\n\n def read_cst_beam(\n self,\n filename,\n beam_type=\"power\",\n feed_pol=\"x\",\n rotate_pol=True,\n frequency=None,\n telescope_name=None,\n feed_name=None,\n feed_version=None,\n model_name=None,\n model_version=None,\n history=\"\",\n x_orientation=None,\n reference_impedance=None,\n extra_keywords=None,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"\n Read in data from a cst file.\n\n Parameters\n ----------\n filename : str\n The cst file to read from.\n beam_type : str\n What beam_type to read in ('power' or 'efield').\n feed_pol : str\n The feed or polarization or list of feeds or polarizations the\n files correspond to.\n Defaults to 'x' (meaning x for efield or xx for power beams).\n rotate_pol : bool\n If True, assume the structure in the simulation is symmetric under\n 90 degree rotations about the z-axis (so that the y polarization can be\n constructed by rotating the x polarization or vice versa).\n Default: True if feed_pol is a single value or a list with all\n the same values in it, False if it is a list with varying values.\n frequency : float or list of float\n The frequency or list of frequencies corresponding to the filename(s).\n This is assumed to be in the same order as the files.\n If not passed, the code attempts to parse it from the filenames.\n telescope_name : str\n The name of the telescope corresponding to the filename(s).\n feed_name : str\n The name of the feed corresponding to the filename(s).\n feed_version : str\n The version of the feed corresponding to the filename(s).\n model_name : str\n The name of the model corresponding to the filename(s).\n model_version : str\n The version of the model corresponding to the filename(s).\n history : str\n A string detailing the history of the filename(s).\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization. Options are \"east\" (indicating\n east/west orientation) and \"north\" (indicating north/south orientation)\n reference_impedance : float, optional\n The reference impedance of the model(s).\n extra_keywords : dict, optional\n A dictionary containing any extra_keywords.\n run_check : bool\n Option to check for the existence and proper shapes of\n required parameters after reading in the file.\n check_extra : bool\n Option to check optional parameters as well as\n required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of\n required parameters after reading in the file.\n\n \"\"\"\n # update filename attribute\n basename = os.path.basename(filename)\n self.filename = [basename]\n self._filename.form = (1,)\n\n self.telescope_name = telescope_name\n self.feed_name = feed_name\n self.feed_version = feed_version\n self.model_name = model_name\n self.model_version = model_version\n self.history = history\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n if x_orientation is not None:\n self.x_orientation = x_orientation\n if reference_impedance is not None:\n self.reference_impedance = float(reference_impedance)\n if extra_keywords is not None:\n self.extra_keywords = extra_keywords\n\n if beam_type == \"power\":\n self.Naxes_vec = 1\n\n if feed_pol == \"x\":\n feed_pol = \"xx\"\n elif feed_pol == \"y\":\n feed_pol = \"yy\"\n\n if rotate_pol:\n rot_pol_dict = {\"xx\": \"yy\", \"yy\": \"xx\", \"xy\": \"yx\", \"yx\": \"xy\"}\n pol2 = rot_pol_dict[feed_pol]\n self.polarization_array = np.array(\n [uvutils.polstr2num(feed_pol), uvutils.polstr2num(pol2)]\n )\n else:\n self.polarization_array = np.array([uvutils.polstr2num(feed_pol)])\n\n self.Npols = len(self.polarization_array)\n self._set_power()\n else:\n self.Naxes_vec = 2\n self.Ncomponents_vec = 2\n if rotate_pol:\n if feed_pol == \"x\":\n self.feed_array = np.array([\"x\", \"y\"])\n else:\n self.feed_array = np.array([\"y\", \"x\"])\n else:\n if feed_pol == \"x\":\n self.feed_array = np.array([\"x\"])\n else:\n self.feed_array = np.array([\"y\"])\n self.Nfeeds = self.feed_array.size\n self._set_efield()\n\n self.data_normalization = \"physical\"\n self.antenna_type = \"simple\"\n\n self.Nfreqs = 1\n self.Nspws = 1\n self.freq_array = np.zeros((self.Nspws, self.Nfreqs))\n self.bandpass_array = np.zeros((self.Nspws, self.Nfreqs))\n\n self.spw_array = np.array([0])\n self.pixel_coordinate_system = \"az_za\"\n self._set_cs_params()\n\n out_file = open(filename, \"r\")\n line = out_file.readline().strip() # Get the first line\n out_file.close()\n raw_names = line.split(\"]\")\n raw_names = [raw_name for raw_name in raw_names if not raw_name == \"\"]\n column_names = []\n units = []\n for raw_name in raw_names:\n column_name, unit = tuple(raw_name.split(\"[\"))\n column_names.append(\"\".join(column_name.lower().split(\" \")))\n units.append(unit.lower().strip())\n\n data = np.loadtxt(filename, skiprows=2)\n\n theta_col = np.where(np.array(column_names) == \"theta\")[0][0]\n phi_col = np.where(np.array(column_names) == \"phi\")[0][0]\n\n if \"deg\" in units[theta_col]:\n theta_data = np.radians(data[:, theta_col])\n else:\n theta_data = data[:, theta_col]\n if \"deg\" in units[phi_col]:\n phi_data = np.radians(data[:, phi_col])\n else:\n phi_data = data[:, phi_col]\n\n theta_axis = np.sort(np.unique(theta_data))\n phi_axis = np.sort(np.unique(phi_data))\n if not theta_axis.size * phi_axis.size == theta_data.size:\n raise ValueError(\"Data does not appear to be on a grid\")\n\n theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order=\"F\")\n phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order=\"F\")\n\n delta_theta = np.diff(theta_axis)\n if not np.isclose(np.max(delta_theta), np.min(delta_theta)):\n raise ValueError(\n \"Data does not appear to be regularly gridded in zenith angle\"\n )\n delta_theta = delta_theta[0]\n\n delta_phi = np.diff(phi_axis)\n if not np.isclose(np.max(delta_phi), np.min(delta_phi)):\n raise ValueError(\n \"Data does not appear to be regularly gridded in azimuth angle\"\n )\n delta_phi = delta_phi[0]\n\n self.axis1_array = phi_axis\n self.Naxes1 = self.axis1_array.size\n self.axis2_array = theta_axis\n self.Naxes2 = self.axis2_array.size\n\n if self.beam_type == \"power\":\n # type depends on whether cross pols are present\n # (if so, complex, else float)\n self.data_array = np.zeros(\n self._data_array.expected_shape(self),\n dtype=self._data_array.expected_type,\n )\n else:\n self.data_array = np.zeros(\n self._data_array.expected_shape(self), dtype=np.complex128\n )\n\n if frequency is not None:\n self.freq_array[0] = frequency\n else:\n self.freq_array[0] = self.name2freq(filename)\n\n if rotate_pol:\n # for second polarization, rotate by pi/2\n rot_phi = phi_data + np.pi / 2\n rot_phi[np.where(rot_phi >= 2 * np.pi)] -= 2 * np.pi\n roll_rot_phi = np.roll(rot_phi, int((np.pi / 2) / delta_phi), axis=1)\n if not np.allclose(roll_rot_phi, phi_data):\n raise ValueError(\"Rotating by pi/2 failed\")\n\n # theta is not affected by the rotation\n\n # get beam\n if self.beam_type == \"power\":\n\n data_col_enum = [\"abs(e)\", \"abs(v)\"]\n data_col = []\n for name in data_col_enum:\n this_col = np.where(np.array(column_names) == name)[0]\n if this_col.size > 0:\n data_col = data_col + this_col.tolist()\n if len(data_col) == 0:\n raise ValueError(\"No power column found in file: {}\".format(filename))\n elif len(data_col) > 1:\n raise ValueError(\n \"Multiple possible power columns found in file: {}\".format(filename)\n )\n data_col = data_col[0]\n power_beam1 = (\n data[:, data_col].reshape((theta_axis.size, phi_axis.size), order=\"F\")\n ** 2.0\n )\n\n self.data_array[0, 0, 0, 0, :, :] = power_beam1\n\n if rotate_pol:\n # rotate by pi/2 for second polarization\n power_beam2 = np.roll(power_beam1, int((np.pi / 2) / delta_phi), axis=1)\n self.data_array[0, 0, 1, 0, :, :] = power_beam2\n else:\n self.basis_vector_array = np.zeros(\n (self.Naxes_vec, self.Ncomponents_vec, self.Naxes2, self.Naxes1)\n )\n self.basis_vector_array[0, 0, :, :] = 1.0\n self.basis_vector_array[1, 1, :, :] = 1.0\n\n theta_mag_col = np.where(np.array(column_names) == \"abs(theta)\")[0][0]\n theta_phase_col = np.where(np.array(column_names) == \"phase(theta)\")[0][0]\n phi_mag_col = np.where(np.array(column_names) == \"abs(phi)\")[0][0]\n phi_phase_col = np.where(np.array(column_names) == \"phase(phi)\")[0][0]\n\n theta_mag = data[:, theta_mag_col].reshape(\n (theta_axis.size, phi_axis.size), order=\"F\"\n )\n phi_mag = data[:, phi_mag_col].reshape(\n (theta_axis.size, phi_axis.size), order=\"F\"\n )\n if \"deg\" in units[theta_phase_col]:\n theta_phase = np.radians(data[:, theta_phase_col])\n else:\n theta_phase = data[:, theta_phase_col]\n if \"deg\" in units[phi_phase_col]:\n phi_phase = np.radians(data[:, phi_phase_col])\n else:\n phi_phase = data[:, phi_phase_col]\n theta_phase = theta_phase.reshape(\n (theta_axis.size, phi_axis.size), order=\"F\"\n )\n phi_phase = phi_phase.reshape((theta_axis.size, phi_axis.size), order=\"F\")\n\n theta_beam = theta_mag * np.exp(1j * theta_phase)\n phi_beam = phi_mag * np.exp(1j * phi_phase)\n\n self.data_array[0, 0, 0, 0, :, :] = phi_beam\n self.data_array[1, 0, 0, 0, :, :] = theta_beam\n\n if rotate_pol:\n # rotate by pi/2 for second polarization\n theta_beam2 = np.roll(theta_beam, int((np.pi / 2) / delta_phi), axis=1)\n phi_beam2 = np.roll(phi_beam, int((np.pi / 2) / delta_phi), axis=1)\n self.data_array[0, 0, 1, 0, :, :] = phi_beam2\n self.data_array[1, 0, 1, 0, :, :] = theta_beam2\n\n self.bandpass_array[0] = 1\n\n if frequency is None:\n warnings.warn(\n \"No frequency provided. Detected frequency is: \"\n \"{freqs} Hz\".format(freqs=self.freq_array)\n )\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.min", "numpy.exp", "numpy.diff", "numpy.radians", "numpy.allclose", "numpy.loadtxt", "numpy.where", "numpy.unique" ] ]
hzy46/pytorch-cifar
[ "2fb4d53004ef19954b8cbf11697169ac556aa23b" ]
[ "DP.py" ]
[ "'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nfrom models import *\nimport time\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--epoch', default=200, type=int, help='epoch num')\nparser.add_argument('--batch_size', default=128, type=int, help='total batch size')\nparser.add_argument('--val_batch_size', default=128, type=int, help='val batch size')\nparser.add_argument('--resume', '-r', action='store_true',\n help='resume from checkpoint')\nparser.add_argument('--mode', choices=['SG', 'MGSN_DP'], \n help='SG: Single GPU; MGSN_DP: Multiple GPUs and Single Node By DataParallel; ')\nargs = parser.parse_args()\n\nif args.mode == 'SG':\n device = torch.device('cuda:0')\nelif args.mode == 'MGSN_DP':\n device = torch.device('cuda')\n\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=args.val_batch_size, shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Model\nprint('==> Building model..')\n# net = VGG('VGG19')\n# net = ResNet18()\n# net = PreActResNet18()\n# net = GoogLeNet()\n# net = DenseNet121()\n# net = ResNeXt29_2x64d()\n# net = MobileNet()\n# net = MobileNetV2()\n# net = DPN92()\n# net = ShuffleNetG2()\n# net = SENet18()\n# net = ShuffleNetV2(1)\n# net = EfficientNetB0()\nnet = RegNetX_200MF()\nif args.mode == 'SG':\n net = net.to(device)\nelif args.mode == 'MGSN_DP':\n net = net.to(device)\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/ckpt.pth')\n net.load_state_dict(checkpoint['net'])\n start_epoch = checkpoint['epoch']\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr,\n momentum=0.9, weight_decay=5e-4)\n\n\n# Training\ndef train(epoch, device):\n print('\\nEpoch: %d' % epoch)\n start_ts = time.time()\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if (batch_idx + 1) % 5 == 0:\n print('[Epoch=%5d][Step=%5d/%5d] Train Loss=%.3f Train Acc=%.3f%%' % (\n epoch,\n batch_idx + 1,\n len(trainloader),\n train_loss / (batch_idx+1),\n 100. * correct / total,\n ))\n print('Epoch %d Elapsed Time: %5ds' % (epoch, int(time.time() - start_ts)))\n\n\ndef test(epoch, device):\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n print('[Epoch=%5d] Test Loss=%.3f Test Acc=%.3f%%' % (\n epoch,\n test_loss / (batch_idx + 1),\n 100. * correct / total,\n ))\n\n # Save checkpoint.\n state = {\n 'net': net.state_dict(),\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt.pth')\n\n\nif __name__ == '__main__':\n for epoch in range(start_epoch, args.epoch):\n train(epoch, device)\n test(epoch, device)\n" ]
[ [ "torch.device", "torch.save", "torch.no_grad", "torch.utils.data.DataLoader", "torch.load", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel" ] ]
kylelo/pytorch3d
[ "818e161ec5394e3e6edad816a18287a08393e8c8" ]
[ "pytorch3d/renderer/cameras.py" ]
[ "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nimport warnings\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom pytorch3d.common.types import Device\nfrom pytorch3d.transforms import Rotate, Transform3d, Translate\n\nfrom .utils import TensorProperties, convert_to_tensors_and_broadcast\n\n\n# Default values for rotation and translation matrices.\n_R = torch.eye(3)[None] # (1, 3, 3)\n_T = torch.zeros(1, 3) # (1, 3)\n\n\nclass CamerasBase(TensorProperties):\n \"\"\"\n `CamerasBase` implements a base class for all cameras.\n\n For cameras, there are four different coordinate systems (or spaces)\n - World coordinate system: This is the system the object lives - the world.\n - Camera view coordinate system: This is the system that has its origin on the camera\n and the and the Z-axis perpendicular to the image plane.\n In PyTorch3D, we assume that +X points left, and +Y points up and\n +Z points out from the image plane.\n The transformation from world --> view happens after applying a rotation (R)\n and translation (T)\n - NDC coordinate system: This is the normalized coordinate system that confines\n in a volume the rendered part of the object or scene. Also known as view volume.\n For square images, given the PyTorch3D convention, (+1, +1, znear)\n is the top left near corner, and (-1, -1, zfar) is the bottom right far\n corner of the volume.\n The transformation from view --> NDC happens after applying the camera\n projection matrix (P) if defined in NDC space.\n For non square images, we scale the points such that smallest side\n has range [-1, 1] and the largest side has range [-u, u], with u > 1.\n - Screen coordinate system: This is another representation of the view volume with\n the XY coordinates defined in image space instead of a normalized space.\n\n A better illustration of the coordinate systems can be found in\n pytorch3d/docs/notes/cameras.md.\n\n It defines methods that are common to all camera models:\n - `get_camera_center` that returns the optical center of the camera in\n world coordinates\n - `get_world_to_view_transform` which returns a 3D transform from\n world coordinates to the camera view coordinates (R, T)\n - `get_full_projection_transform` which composes the projection\n transform (P) with the world-to-view transform (R, T)\n - `transform_points` which takes a set of input points in world coordinates and\n projects to the space the camera is defined in (NDC or screen)\n - `get_ndc_camera_transform` which defines the transform from screen/NDC to\n PyTorch3D's NDC space\n - `transform_points_ndc` which takes a set of points in world coordinates and\n projects them to PyTorch3D's NDC space\n - `transform_points_screen` which takes a set of points in world coordinates and\n projects them to screen space\n\n For each new camera, one should implement the `get_projection_transform`\n routine that returns the mapping from camera view coordinates to camera\n coordinates (NDC or screen).\n\n Another useful function that is specific to each camera model is\n `unproject_points` which sends points from camera coordinates (NDC or screen)\n back to camera view or world coordinates depending on the `world_coordinates`\n boolean argument of the function.\n \"\"\"\n\n # Used in __getitem__ to index the relevant fields\n # When creating a new camera, this should be set in the __init__\n _FIELDS: Tuple[str, ...] = ()\n\n # Names of fields which are a constant property of the whole batch, rather\n # than themselves a batch of data.\n # When joining objects into a batch, they will have to agree.\n _SHARED_FIELDS: Tuple[str, ...] = ()\n\n def get_projection_transform(self):\n \"\"\"\n Calculate the projective transformation matrix.\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in `__init__`.\n\n Return:\n a `Transform3d` object which represents a batch of projection\n matrices of shape (N, 3, 3)\n \"\"\"\n raise NotImplementedError()\n\n def unproject_points(self, xy_depth: torch.Tensor, **kwargs):\n \"\"\"\n Transform input points from camera coodinates (NDC or screen)\n to the world / camera coordinates.\n\n Each of the input points `xy_depth` of shape (..., 3) is\n a concatenation of the x, y location and its depth.\n\n For instance, for an input 2D tensor of shape `(num_points, 3)`\n `xy_depth` takes the following form:\n `xy_depth[i] = [x[i], y[i], depth[i]]`,\n for a each point at an index `i`.\n\n The following example demonstrates the relationship between\n `transform_points` and `unproject_points`:\n\n .. code-block:: python\n\n cameras = # camera object derived from CamerasBase\n xyz = # 3D points of shape (batch_size, num_points, 3)\n # transform xyz to the camera view coordinates\n xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz)\n # extract the depth of each point as the 3rd coord of xyz_cam\n depth = xyz_cam[:, :, 2:]\n # project the points xyz to the camera\n xy = cameras.transform_points(xyz)[:, :, :2]\n # append depth to xy\n xy_depth = torch.cat((xy, depth), dim=2)\n # unproject to the world coordinates\n xyz_unproj_world = cameras.unproject_points(xy_depth, world_coordinates=True)\n print(torch.allclose(xyz, xyz_unproj_world)) # True\n # unproject to the camera coordinates\n xyz_unproj = cameras.unproject_points(xy_depth, world_coordinates=False)\n print(torch.allclose(xyz_cam, xyz_unproj)) # True\n\n Args:\n xy_depth: torch tensor of shape (..., 3).\n world_coordinates: If `True`, unprojects the points back to world\n coordinates using the camera extrinsics `R` and `T`.\n `False` ignores `R` and `T` and unprojects to\n the camera view coordinates.\n from_ndc: If `False` (default), assumes xy part of input is in\n NDC space if self.in_ndc(), otherwise in screen space. If\n `True`, assumes xy is in NDC space even if the camera\n is defined in screen space.\n\n Returns\n new_points: unprojected points with the same shape as `xy_depth`.\n \"\"\"\n raise NotImplementedError()\n\n def get_camera_center(self, **kwargs) -> torch.Tensor:\n \"\"\"\n Return the 3D location of the camera optical center\n in the world coordinates.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting T here will update the values set in init as this\n value may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n C: a batch of 3D locations of shape (N, 3) denoting\n the locations of the center of each camera in the batch.\n \"\"\"\n w2v_trans = self.get_world_to_view_transform(**kwargs)\n P = w2v_trans.inverse().get_matrix()\n # the camera center is the translation component (the first 3 elements\n # of the last row) of the inverted world-to-view\n # transform (4x4 RT matrix)\n C = P[:, 3, :3]\n return C\n\n def get_world_to_view_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the world-to-view transform.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n A Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n R: torch.Tensor = kwargs.get(\"R\", self.R)\n T: torch.Tensor = kwargs.get(\"T\", self.T)\n self.R = R # pyre-ignore[16]\n self.T = T # pyre-ignore[16]\n world_to_view_transform = get_world_to_view_transform(R=R, T=T)\n return world_to_view_transform\n\n def get_full_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the full world-to-camera transform composing the\n world-to-view and view-to-camera transforms.\n If camera is defined in NDC space, the projected points are in NDC space.\n If camera is defined in screen space, the projected points are in screen space.\n\n Args:\n **kwargs: parameters for the projection transforms can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R: torch.Tensor = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T: torch.Tensor = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)\n view_to_proj_transform = self.get_projection_transform(**kwargs)\n return world_to_view_transform.compose(view_to_proj_transform)\n\n def transform_points(\n self, points, eps: Optional[float] = None, **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Transform input points from world to camera space with the\n projection matrix defined by the camera.\n\n For `CamerasBase.transform_points`, setting `eps > 0`\n stabilizes gradients since it leads to avoiding division\n by excessively low numbers for points close to the camera plane.\n\n Args:\n points: torch tensor of shape (..., 3).\n eps: If eps!=None, the argument is used to clamp the\n divisor in the homogeneous normalization of the points\n transformed to the ndc space. Please see\n `transforms.Transform3d.transform_points` for details.\n\n For `CamerasBase.transform_points`, setting `eps > 0`\n stabilizes gradients since it leads to avoiding division\n by excessively low numbers for points close to the\n camera plane.\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n world_to_proj_transform = self.get_full_projection_transform(**kwargs)\n return world_to_proj_transform.transform_points(points, eps=eps)\n\n def get_ndc_camera_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Returns the transform from camera projection space (screen or NDC) to NDC space.\n For cameras that can be specified in screen space, this transform\n allows points to be converted from screen to NDC space.\n The default transform scales the points from [0, W]x[0, H]\n to [-1, 1]x[-u, u] or [-u, u]x[-1, 1] where u > 1 is the aspect ratio of the image.\n This function should be modified per camera definitions if need be,\n e.g. for Perspective/Orthographic cameras we provide a custom implementation.\n This transform assumes PyTorch3D coordinate system conventions for\n both the NDC space and the input points.\n\n This transform interfaces with the PyTorch3D renderer which assumes\n input points to the renderer to be in NDC space.\n \"\"\"\n if self.in_ndc():\n return Transform3d(device=self.device, dtype=torch.float32)\n else:\n # For custom cameras which can be defined in screen space,\n # users might might have to implement the screen to NDC transform based\n # on the definition of the camera parameters.\n # See PerspectiveCameras/OrthographicCameras for an example.\n # We don't flip xy because we assume that world points are in\n # PyTorch3D coordinates, and thus conversion from screen to ndc\n # is a mere scaling from image to [-1, 1] scale.\n image_size = kwargs.get(\"image_size\", self.get_image_size())\n return get_screen_to_ndc_transform(\n self, with_xyflip=False, image_size=image_size\n )\n\n def transform_points_ndc(\n self, points, eps: Optional[float] = None, **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Transforms points from PyTorch3D world/camera space to NDC space.\n Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up.\n Output points are in NDC space: +X left, +Y up, origin at image center.\n\n Args:\n points: torch tensor of shape (..., 3).\n eps: If eps!=None, the argument is used to clamp the\n divisor in the homogeneous normalization of the points\n transformed to the ndc space. Please see\n `transforms.Transform3d.transform_points` for details.\n\n For `CamerasBase.transform_points`, setting `eps > 0`\n stabilizes gradients since it leads to avoiding division\n by excessively low numbers for points close to the\n camera plane.\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n world_to_ndc_transform = self.get_full_projection_transform(**kwargs)\n if not self.in_ndc():\n to_ndc_transform = self.get_ndc_camera_transform(**kwargs)\n world_to_ndc_transform = world_to_ndc_transform.compose(to_ndc_transform)\n\n return world_to_ndc_transform.transform_points(points, eps=eps)\n\n def transform_points_screen(\n self, points, eps: Optional[float] = None, **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Transforms points from PyTorch3D world/camera space to screen space.\n Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up.\n Output points are in screen space: +X right, +Y down, origin at top left corner.\n\n Args:\n points: torch tensor of shape (..., 3).\n eps: If eps!=None, the argument is used to clamp the\n divisor in the homogeneous normalization of the points\n transformed to the ndc space. Please see\n `transforms.Transform3d.transform_points` for details.\n\n For `CamerasBase.transform_points`, setting `eps > 0`\n stabilizes gradients since it leads to avoiding division\n by excessively low numbers for points close to the\n camera plane.\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n points_ndc = self.transform_points_ndc(points, eps=eps, **kwargs)\n image_size = kwargs.get(\"image_size\", self.get_image_size())\n return get_ndc_to_screen_transform(\n self, with_xyflip=True, image_size=image_size\n ).transform_points(points_ndc, eps=eps)\n\n def clone(self):\n \"\"\"\n Returns a copy of `self`.\n \"\"\"\n cam_type = type(self)\n other = cam_type(device=self.device)\n return super().clone(other)\n\n def is_perspective(self):\n raise NotImplementedError()\n\n def in_ndc(self):\n \"\"\"\n Specifies whether the camera is defined in NDC space\n or in screen (image) space\n \"\"\"\n raise NotImplementedError()\n\n def get_znear(self):\n return self.znear if hasattr(self, \"znear\") else None\n\n def get_image_size(self):\n \"\"\"\n Returns the image size, if provided, expected in the form of (height, width)\n The image size is used for conversion of projected points to screen coordinates.\n \"\"\"\n return self.image_size if hasattr(self, \"image_size\") else None\n\n def __getitem__(\n self, index: Union[int, List[int], torch.LongTensor]\n ) -> \"CamerasBase\":\n \"\"\"\n Override for the __getitem__ method in TensorProperties which needs to be\n refactored.\n\n Args:\n index: an int/list/long tensor used to index all the fields in the cameras given by\n self._FIELDS.\n Returns:\n if `index` is an index int/list/long tensor return an instance of the current\n cameras class with only the values at the selected index.\n \"\"\"\n\n kwargs = {}\n\n if not isinstance(index, (int, list, torch.LongTensor, torch.cuda.LongTensor)):\n msg = \"Invalid index type, expected int, List[int] or torch.LongTensor; got %r\"\n raise ValueError(msg % type(index))\n\n if isinstance(index, int):\n index = [index]\n\n if max(index) >= len(self):\n raise ValueError(f\"Index {max(index)} is out of bounds for select cameras\")\n\n for field in self._FIELDS:\n val = getattr(self, field, None)\n if val is None:\n continue\n\n # e.g. \"in_ndc\" is set as attribute \"_in_ndc\" on the class\n # but provided as \"in_ndc\" on initialization\n if field.startswith(\"_\"):\n field = field[1:]\n\n if isinstance(val, (str, bool)):\n kwargs[field] = val\n elif isinstance(val, torch.Tensor):\n # In the init, all inputs will be converted to\n # tensors before setting as attributes\n kwargs[field] = val[index]\n else:\n raise ValueError(f\"Field {field} type is not supported for indexing\")\n\n kwargs[\"device\"] = self.device\n return self.__class__(**kwargs)\n\n\n############################################################\n# Field of View Camera Classes #\n############################################################\n\n\ndef OpenGLPerspectiveCameras(\n znear: float = 1.0,\n zfar: float = 100.0,\n aspect_ratio: float = 1.0,\n fov: float = 60.0,\n degrees: bool = True,\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n device: Device = \"cpu\",\n) -> \"FoVPerspectiveCameras\":\n \"\"\"\n OpenGLPerspectiveCameras has been DEPRECATED. Use FoVPerspectiveCameras instead.\n Preserving OpenGLPerspectiveCameras for backward compatibility.\n \"\"\"\n\n warnings.warn(\n \"\"\"OpenGLPerspectiveCameras is deprecated,\n Use FoVPerspectiveCameras instead.\n OpenGLPerspectiveCameras will be removed in future releases.\"\"\",\n PendingDeprecationWarning,\n )\n\n return FoVPerspectiveCameras(\n znear=znear,\n zfar=zfar,\n aspect_ratio=aspect_ratio,\n fov=fov,\n degrees=degrees,\n R=R,\n T=T,\n device=device,\n )\n\n\nclass FoVPerspectiveCameras(CamerasBase):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n projection matrices by specifying the field of view.\n The definition of the parameters follow the OpenGL perspective camera.\n\n The extrinsics of the camera (R and T matrices) can also be set in the\n initializer or passed in to `get_full_projection_transform` to get\n the full transformation from world -> ndc.\n\n The `transform_points` method calculates the full world -> ndc transform\n and then applies it to the input points.\n\n The transforms can also be returned separately as Transform3d objects.\n\n * Setting the Aspect Ratio for Non Square Images *\n\n If the desired output image size is non square (i.e. a tuple of (H, W) where H != W)\n the aspect ratio needs special consideration: There are two aspect ratios\n to be aware of:\n - the aspect ratio of each pixel\n - the aspect ratio of the output image\n The `aspect_ratio` setting in the FoVPerspectiveCameras sets the\n pixel aspect ratio. When using this camera with the differentiable rasterizer\n be aware that in the rasterizer we assume square pixels, but allow\n variable image aspect ratio (i.e rectangle images).\n\n In most cases you will want to set the camera `aspect_ratio=1.0`\n (i.e. square pixels) and only vary the output image dimensions in pixels\n for rasterization.\n \"\"\"\n\n # For __getitem__\n _FIELDS = (\n \"K\",\n \"znear\",\n \"zfar\",\n \"aspect_ratio\",\n \"fov\",\n \"R\",\n \"T\",\n \"degrees\",\n )\n\n _SHARED_FIELDS = (\"degrees\",)\n\n def __init__(\n self,\n znear=1.0,\n zfar=100.0,\n aspect_ratio=1.0,\n fov=60.0,\n degrees: bool = True,\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n K: Optional[torch.Tensor] = None,\n device: Device = \"cpu\",\n ) -> None:\n \"\"\"\n\n Args:\n znear: near clipping plane of the view frustrum.\n zfar: far clipping plane of the view frustrum.\n aspect_ratio: aspect ratio of the image pixels.\n 1.0 indicates square pixels.\n fov: field of view angle of the camera.\n degrees: bool, set to True if fov is specified in degrees.\n R: Rotation matrix of shape (N, 3, 3)\n T: Translation matrix of shape (N, 3)\n K: (optional) A calibration matrix of shape (N, 4, 4)\n If provided, don't need znear, zfar, fov, aspect_ratio, degrees\n device: Device (as str or torch.device)\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n super().__init__(\n device=device,\n znear=znear,\n zfar=zfar,\n aspect_ratio=aspect_ratio,\n fov=fov,\n R=R,\n T=T,\n K=K,\n )\n\n # No need to convert to tensor or broadcast.\n self.degrees = degrees\n\n def compute_projection_matrix(\n self, znear, zfar, fov, aspect_ratio, degrees: bool\n ) -> torch.Tensor:\n \"\"\"\n Compute the calibration matrix K of shape (N, 4, 4)\n\n Args:\n znear: near clipping plane of the view frustrum.\n zfar: far clipping plane of the view frustrum.\n fov: field of view angle of the camera.\n aspect_ratio: aspect ratio of the image pixels.\n 1.0 indicates square pixels.\n degrees: bool, set to True if fov is specified in degrees.\n\n Returns:\n torch.FloatTensor of the calibration matrix with shape (N, 4, 4)\n \"\"\"\n K = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32)\n ones = torch.ones((self._N), dtype=torch.float32, device=self.device)\n if degrees:\n fov = (np.pi / 180) * fov\n\n if not torch.is_tensor(fov):\n fov = torch.tensor(fov, device=self.device)\n tanHalfFov = torch.tan((fov / 2))\n max_y = tanHalfFov * znear\n min_y = -max_y\n max_x = max_y * aspect_ratio\n min_x = -max_x\n\n # NOTE: In OpenGL the projection matrix changes the handedness of the\n # coordinate frame. i.e the NDC space positive z direction is the\n # camera space negative z direction. This is because the sign of the z\n # in the projection matrix is set to -1.0.\n # In pytorch3d we maintain a right handed coordinate system throughout\n # so the so the z sign is 1.0.\n z_sign = 1.0\n\n K[:, 0, 0] = 2.0 * znear / (max_x - min_x)\n K[:, 1, 1] = 2.0 * znear / (max_y - min_y)\n K[:, 0, 2] = (max_x + min_x) / (max_x - min_x)\n K[:, 1, 2] = (max_y + min_y) / (max_y - min_y)\n K[:, 3, 2] = z_sign * ones\n\n # NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point\n # is at the near clipping plane and z = 1 when the point is at the far\n # clipping plane.\n K[:, 2, 2] = z_sign * zfar / (zfar - znear)\n K[:, 2, 3] = -(zfar * znear) / (zfar - znear)\n\n return K\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the perspective projection matrix with a symmetric\n viewing frustrum. Use column major order.\n The viewing frustrum will be projected into ndc, s.t.\n (max_x, max_y) -> (+1, +1)\n (min_x, min_y) -> (-1, -1)\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in `__init__`.\n\n Return:\n a Transform3d object which represents a batch of projection\n matrices of shape (N, 4, 4)\n\n .. code-block:: python\n\n h1 = (max_y + min_y)/(max_y - min_y)\n w1 = (max_x + min_x)/(max_x - min_x)\n tanhalffov = tan((fov/2))\n s1 = 1/tanhalffov\n s2 = 1/(tanhalffov * (aspect_ratio))\n\n # To map z to the range [0, 1] use:\n f1 = far / (far - near)\n f2 = -(far * near) / (far - near)\n\n # Projection matrix\n K = [\n [s1, 0, w1, 0],\n [0, s2, h1, 0],\n [0, 0, f1, f2],\n [0, 0, 1, 0],\n ]\n \"\"\"\n K = kwargs.get(\"K\", self.K)\n if K is not None:\n if K.shape != (self._N, 4, 4):\n msg = \"Expected K to have shape of (%r, 4, 4)\"\n raise ValueError(msg % (self._N))\n else:\n K = self.compute_projection_matrix(\n kwargs.get(\"znear\", self.znear),\n kwargs.get(\"zfar\", self.zfar),\n kwargs.get(\"fov\", self.fov),\n kwargs.get(\"aspect_ratio\", self.aspect_ratio),\n kwargs.get(\"degrees\", self.degrees),\n )\n\n # Transpose the projection matrix as PyTorch3D transforms use row vectors.\n transform = Transform3d(\n matrix=K.transpose(1, 2).contiguous(), device=self.device\n )\n return transform\n\n def unproject_points(\n self,\n xy_depth: torch.Tensor,\n world_coordinates: bool = True,\n scaled_depth_input: bool = False,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\">!\n FoV cameras further allow for passing depth in world units\n (`scaled_depth_input=False`) or in the [0, 1]-normalized units\n (`scaled_depth_input=True`)\n\n Args:\n scaled_depth_input: If `True`, assumes the input depth is in\n the [0, 1]-normalized units. If `False` the input depth is in\n the world units.\n \"\"\"\n\n # obtain the relevant transformation to ndc\n if world_coordinates:\n to_ndc_transform = self.get_full_projection_transform()\n else:\n to_ndc_transform = self.get_projection_transform()\n\n if scaled_depth_input:\n # the input is scaled depth, so we don't have to do anything\n xy_sdepth = xy_depth\n else:\n # parse out important values from the projection matrix\n K_matrix = self.get_projection_transform(**kwargs.copy()).get_matrix()\n # parse out f1, f2 from K_matrix\n unsqueeze_shape = [1] * xy_depth.dim()\n unsqueeze_shape[0] = K_matrix.shape[0]\n f1 = K_matrix[:, 2, 2].reshape(unsqueeze_shape)\n f2 = K_matrix[:, 3, 2].reshape(unsqueeze_shape)\n # get the scaled depth\n sdepth = (f1 * xy_depth[..., 2:3] + f2) / xy_depth[..., 2:3]\n # concatenate xy + scaled depth\n xy_sdepth = torch.cat((xy_depth[..., 0:2], sdepth), dim=-1)\n\n # unproject with inverse of the projection\n unprojection_transform = to_ndc_transform.inverse()\n return unprojection_transform.transform_points(xy_sdepth)\n\n def is_perspective(self):\n return True\n\n def in_ndc(self):\n return True\n\n\ndef OpenGLOrthographicCameras(\n znear: float = 1.0,\n zfar: float = 100.0,\n top: float = 1.0,\n bottom: float = -1.0,\n left: float = -1.0,\n right: float = 1.0,\n scale_xyz=((1.0, 1.0, 1.0),), # (1, 3)\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n device: Device = \"cpu\",\n) -> \"FoVOrthographicCameras\":\n \"\"\"\n OpenGLOrthographicCameras has been DEPRECATED. Use FoVOrthographicCameras instead.\n Preserving OpenGLOrthographicCameras for backward compatibility.\n \"\"\"\n\n warnings.warn(\n \"\"\"OpenGLOrthographicCameras is deprecated,\n Use FoVOrthographicCameras instead.\n OpenGLOrthographicCameras will be removed in future releases.\"\"\",\n PendingDeprecationWarning,\n )\n\n return FoVOrthographicCameras(\n znear=znear,\n zfar=zfar,\n max_y=top,\n min_y=bottom,\n max_x=right,\n min_x=left,\n scale_xyz=scale_xyz,\n R=R,\n T=T,\n device=device,\n )\n\n\nclass FoVOrthographicCameras(CamerasBase):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n projection matrices by specifying the field of view.\n The definition of the parameters follow the OpenGL orthographic camera.\n \"\"\"\n\n # For __getitem__\n _FIELDS = (\n \"K\",\n \"znear\",\n \"zfar\",\n \"R\",\n \"T\",\n \"max_y\",\n \"min_y\",\n \"max_x\",\n \"min_x\",\n \"scale_xyz\",\n )\n\n def __init__(\n self,\n znear=1.0,\n zfar=100.0,\n max_y=1.0,\n min_y=-1.0,\n max_x=1.0,\n min_x=-1.0,\n scale_xyz=((1.0, 1.0, 1.0),), # (1, 3)\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n K: Optional[torch.Tensor] = None,\n device: Device = \"cpu\",\n ):\n \"\"\"\n\n Args:\n znear: near clipping plane of the view frustrum.\n zfar: far clipping plane of the view frustrum.\n max_y: maximum y coordinate of the frustrum.\n min_y: minimum y coordinate of the frustrum.\n max_x: maximum x coordinate of the frustrum.\n min_x: minimum x coordinate of the frustrum\n scale_xyz: scale factors for each axis of shape (N, 3).\n R: Rotation matrix of shape (N, 3, 3).\n T: Translation of shape (N, 3).\n K: (optional) A calibration matrix of shape (N, 4, 4)\n If provided, don't need znear, zfar, max_y, min_y, max_x, min_x, scale_xyz\n device: torch.device or string.\n\n Only need to set min_x, max_x, min_y, max_y for viewing frustrums\n which are non symmetric about the origin.\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n super().__init__(\n device=device,\n znear=znear,\n zfar=zfar,\n max_y=max_y,\n min_y=min_y,\n max_x=max_x,\n min_x=min_x,\n scale_xyz=scale_xyz,\n R=R,\n T=T,\n K=K,\n )\n\n def compute_projection_matrix(\n self, znear, zfar, max_x, min_x, max_y, min_y, scale_xyz\n ) -> torch.Tensor:\n \"\"\"\n Compute the calibration matrix K of shape (N, 4, 4)\n\n Args:\n znear: near clipping plane of the view frustrum.\n zfar: far clipping plane of the view frustrum.\n max_x: maximum x coordinate of the frustrum.\n min_x: minimum x coordinate of the frustrum\n max_y: maximum y coordinate of the frustrum.\n min_y: minimum y coordinate of the frustrum.\n scale_xyz: scale factors for each axis of shape (N, 3).\n \"\"\"\n K = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device)\n ones = torch.ones((self._N), dtype=torch.float32, device=self.device)\n # NOTE: OpenGL flips handedness of coordinate system between camera\n # space and NDC space so z sign is -ve. In PyTorch3D we maintain a\n # right handed coordinate system throughout.\n z_sign = +1.0\n\n K[:, 0, 0] = (2.0 / (max_x - min_x)) * scale_xyz[:, 0]\n K[:, 1, 1] = (2.0 / (max_y - min_y)) * scale_xyz[:, 1]\n K[:, 0, 3] = -(max_x + min_x) / (max_x - min_x)\n K[:, 1, 3] = -(max_y + min_y) / (max_y - min_y)\n K[:, 3, 3] = ones\n\n # NOTE: This maps the z coordinate to the range [0, 1] and replaces the\n # the OpenGL z normalization to [-1, 1]\n K[:, 2, 2] = z_sign * (1.0 / (zfar - znear)) * scale_xyz[:, 2]\n K[:, 2, 3] = -znear / (zfar - znear)\n\n return K\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the orthographic projection matrix.\n Use column major order.\n\n Args:\n **kwargs: parameters for the projection can be passed in to\n override the default values set in __init__.\n Return:\n a Transform3d object which represents a batch of projection\n matrices of shape (N, 4, 4)\n\n .. code-block:: python\n\n scale_x = 2 / (max_x - min_x)\n scale_y = 2 / (max_y - min_y)\n scale_z = 2 / (far-near)\n mid_x = (max_x + min_x) / (max_x - min_x)\n mix_y = (max_y + min_y) / (max_y - min_y)\n mid_z = (far + near) / (far - near)\n\n K = [\n [scale_x, 0, 0, -mid_x],\n [0, scale_y, 0, -mix_y],\n [0, 0, -scale_z, -mid_z],\n [0, 0, 0, 1],\n ]\n \"\"\"\n K = kwargs.get(\"K\", self.K)\n if K is not None:\n if K.shape != (self._N, 4, 4):\n msg = \"Expected K to have shape of (%r, 4, 4)\"\n raise ValueError(msg % (self._N))\n else:\n K = self.compute_projection_matrix(\n kwargs.get(\"znear\", self.znear),\n kwargs.get(\"zfar\", self.zfar),\n kwargs.get(\"max_x\", self.max_x),\n kwargs.get(\"min_x\", self.min_x),\n kwargs.get(\"max_y\", self.max_y),\n kwargs.get(\"min_y\", self.min_y),\n kwargs.get(\"scale_xyz\", self.scale_xyz),\n )\n\n transform = Transform3d(\n matrix=K.transpose(1, 2).contiguous(), device=self.device\n )\n return transform\n\n def unproject_points(\n self,\n xy_depth: torch.Tensor,\n world_coordinates: bool = True,\n scaled_depth_input: bool = False,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\">!\n FoV cameras further allow for passing depth in world units\n (`scaled_depth_input=False`) or in the [0, 1]-normalized units\n (`scaled_depth_input=True`)\n\n Args:\n scaled_depth_input: If `True`, assumes the input depth is in\n the [0, 1]-normalized units. If `False` the input depth is in\n the world units.\n \"\"\"\n\n if world_coordinates:\n to_ndc_transform = self.get_full_projection_transform(**kwargs.copy())\n else:\n to_ndc_transform = self.get_projection_transform(**kwargs.copy())\n\n if scaled_depth_input:\n # the input depth is already scaled\n xy_sdepth = xy_depth\n else:\n # we have to obtain the scaled depth first\n K = self.get_projection_transform(**kwargs).get_matrix()\n unsqueeze_shape = [1] * K.dim()\n unsqueeze_shape[0] = K.shape[0]\n mid_z = K[:, 3, 2].reshape(unsqueeze_shape)\n scale_z = K[:, 2, 2].reshape(unsqueeze_shape)\n scaled_depth = scale_z * xy_depth[..., 2:3] + mid_z\n # cat xy and scaled depth\n xy_sdepth = torch.cat((xy_depth[..., :2], scaled_depth), dim=-1)\n # finally invert the transform\n unprojection_transform = to_ndc_transform.inverse()\n return unprojection_transform.transform_points(xy_sdepth)\n\n def is_perspective(self):\n return False\n\n def in_ndc(self):\n return True\n\n\n############################################################\n# MultiView Camera Classes #\n############################################################\n\"\"\"\nNote that the MultiView Cameras accept parameters in NDC space.\n\"\"\"\n\n\ndef SfMPerspectiveCameras(\n focal_length: float = 1.0,\n principal_point=((0.0, 0.0),),\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n device: Device = \"cpu\",\n) -> \"PerspectiveCameras\":\n \"\"\"\n SfMPerspectiveCameras has been DEPRECATED. Use PerspectiveCameras instead.\n Preserving SfMPerspectiveCameras for backward compatibility.\n \"\"\"\n\n warnings.warn(\n \"\"\"SfMPerspectiveCameras is deprecated,\n Use PerspectiveCameras instead.\n SfMPerspectiveCameras will be removed in future releases.\"\"\",\n PendingDeprecationWarning,\n )\n\n return PerspectiveCameras(\n focal_length=focal_length,\n principal_point=principal_point,\n R=R,\n T=T,\n device=device,\n )\n\n\nclass PerspectiveCameras(CamerasBase):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n transformation matrices using the multi-view geometry convention for\n perspective camera.\n\n Parameters for this camera are specified in NDC if `in_ndc` is set to True.\n If parameters are specified in screen space, `in_ndc` must be set to False.\n \"\"\"\n\n # For __getitem__\n _FIELDS = (\n \"K\",\n \"R\",\n \"T\",\n \"focal_length\",\n \"principal_point\",\n \"_in_ndc\", # arg is in_ndc but attribute set as _in_ndc\n \"image_size\",\n )\n\n _SHARED_FIELDS = (\"_in_ndc\",)\n\n def __init__(\n self,\n focal_length=1.0,\n principal_point=((0.0, 0.0),),\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n K: Optional[torch.Tensor] = None,\n device: Device = \"cpu\",\n in_ndc: bool = True,\n image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,\n ) -> None:\n \"\"\"\n\n Args:\n focal_length: Focal length of the camera in world units.\n A tensor of shape (N, 1) or (N, 2) for\n square and non-square pixels respectively.\n principal_point: xy coordinates of the center of\n the principal point of the camera in pixels.\n A tensor of shape (N, 2).\n in_ndc: True if camera parameters are specified in NDC.\n If camera parameters are in screen space, it must\n be set to False.\n R: Rotation matrix of shape (N, 3, 3)\n T: Translation matrix of shape (N, 3)\n K: (optional) A calibration matrix of shape (N, 4, 4)\n If provided, don't need focal_length, principal_point\n image_size: (height, width) of image size.\n A tensor of shape (N, 2) or a list/tuple. Required for screen cameras.\n device: torch.device or string\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n kwargs = {\"image_size\": image_size} if image_size is not None else {}\n super().__init__(\n device=device,\n focal_length=focal_length,\n principal_point=principal_point,\n R=R,\n T=T,\n K=K,\n _in_ndc=in_ndc,\n **kwargs, # pyre-ignore\n )\n if image_size is not None:\n if (self.image_size < 1).any(): # pyre-ignore\n raise ValueError(\"Image_size provided has invalid values\")\n else:\n self.image_size = None\n\n # When focal length is provided as one value, expand to\n # create (N, 2) shape tensor\n if self.focal_length.ndim == 1: # (N,)\n self.focal_length = self.focal_length[:, None] # (N, 1)\n self.focal_length = self.focal_length.expand(-1, 2) # (N, 2)\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the projection matrix using the\n multi-view geometry convention.\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in __init__.\n\n Returns:\n A `Transform3d` object with a batch of `N` projection transforms.\n\n .. code-block:: python\n\n fx = focal_length[:, 0]\n fy = focal_length[:, 1]\n px = principal_point[:, 0]\n py = principal_point[:, 1]\n\n K = [\n [fx, 0, px, 0],\n [0, fy, py, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n ]\n \"\"\"\n K = kwargs.get(\"K\", self.K)\n if K is not None:\n if K.shape != (self._N, 4, 4):\n msg = \"Expected K to have shape of (%r, 4, 4)\"\n raise ValueError(msg % (self._N))\n else:\n K = _get_sfm_calibration_matrix(\n self._N,\n self.device,\n kwargs.get(\"focal_length\", self.focal_length),\n kwargs.get(\"principal_point\", self.principal_point),\n orthographic=False,\n )\n\n transform = Transform3d(\n matrix=K.transpose(1, 2).contiguous(), device=self.device\n )\n return transform\n\n def unproject_points(\n self,\n xy_depth: torch.Tensor,\n world_coordinates: bool = True,\n from_ndc: bool = False,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n from_ndc: If `False` (default), assumes xy part of input is in\n NDC space if self.in_ndc(), otherwise in screen space. If\n `True`, assumes xy is in NDC space even if the camera\n is defined in screen space.\n \"\"\"\n if world_coordinates:\n to_camera_transform = self.get_full_projection_transform(**kwargs)\n else:\n to_camera_transform = self.get_projection_transform(**kwargs)\n if from_ndc:\n to_camera_transform = to_camera_transform.compose(\n self.get_ndc_camera_transform()\n )\n\n unprojection_transform = to_camera_transform.inverse()\n xy_inv_depth = torch.cat(\n (xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]), dim=-1 # type: ignore\n )\n return unprojection_transform.transform_points(xy_inv_depth)\n\n def get_principal_point(self, **kwargs) -> torch.Tensor:\n \"\"\"\n Return the camera's principal point\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n \"\"\"\n proj_mat = self.get_projection_transform(**kwargs).get_matrix()\n return proj_mat[:, 2, :2]\n\n def get_ndc_camera_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Returns the transform from camera projection space (screen or NDC) to NDC space.\n If the camera is defined already in NDC space, the transform is identity.\n For cameras defined in screen space, we adjust the principal point computation\n which is defined in the image space (commonly) and scale the points to NDC space.\n\n This transform leaves the depth unchanged.\n\n Important: This transforms assumes PyTorch3D conventions for the input points,\n i.e. +X left, +Y up.\n \"\"\"\n if self.in_ndc():\n ndc_transform = Transform3d(device=self.device, dtype=torch.float32)\n else:\n # when cameras are defined in screen/image space, the principal point is\n # provided in the (+X right, +Y down), aka image, coordinate system.\n # Since input points are defined in the PyTorch3D system (+X left, +Y up),\n # we need to adjust for the principal point transform.\n pr_point_fix = torch.zeros(\n (self._N, 4, 4), device=self.device, dtype=torch.float32\n )\n pr_point_fix[:, 0, 0] = 1.0\n pr_point_fix[:, 1, 1] = 1.0\n pr_point_fix[:, 2, 2] = 1.0\n pr_point_fix[:, 3, 3] = 1.0\n pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs)\n pr_point_fix_transform = Transform3d(\n matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device\n )\n image_size = kwargs.get(\"image_size\", self.get_image_size())\n screen_to_ndc_transform = get_screen_to_ndc_transform(\n self, with_xyflip=False, image_size=image_size\n )\n ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform)\n\n return ndc_transform\n\n def is_perspective(self):\n return True\n\n def in_ndc(self):\n return self._in_ndc\n\n\ndef SfMOrthographicCameras(\n focal_length: float = 1.0,\n principal_point=((0.0, 0.0),),\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n device: Device = \"cpu\",\n) -> \"OrthographicCameras\":\n \"\"\"\n SfMOrthographicCameras has been DEPRECATED. Use OrthographicCameras instead.\n Preserving SfMOrthographicCameras for backward compatibility.\n \"\"\"\n\n warnings.warn(\n \"\"\"SfMOrthographicCameras is deprecated,\n Use OrthographicCameras instead.\n SfMOrthographicCameras will be removed in future releases.\"\"\",\n PendingDeprecationWarning,\n )\n\n return OrthographicCameras(\n focal_length=focal_length,\n principal_point=principal_point,\n R=R,\n T=T,\n device=device,\n )\n\n\nclass OrthographicCameras(CamerasBase):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n transformation matrices using the multi-view geometry convention for\n orthographic camera.\n\n Parameters for this camera are specified in NDC if `in_ndc` is set to True.\n If parameters are specified in screen space, `in_ndc` must be set to False.\n \"\"\"\n\n # For __getitem__\n _FIELDS = (\n \"K\",\n \"R\",\n \"T\",\n \"focal_length\",\n \"principal_point\",\n \"_in_ndc\",\n \"image_size\",\n )\n\n _SHARED_FIELDS = (\"_in_ndc\",)\n\n def __init__(\n self,\n focal_length=1.0,\n principal_point=((0.0, 0.0),),\n R: torch.Tensor = _R,\n T: torch.Tensor = _T,\n K: Optional[torch.Tensor] = None,\n device: Device = \"cpu\",\n in_ndc: bool = True,\n image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,\n ) -> None:\n \"\"\"\n\n Args:\n focal_length: Focal length of the camera in world units.\n A tensor of shape (N, 1) or (N, 2) for\n square and non-square pixels respectively.\n principal_point: xy coordinates of the center of\n the principal point of the camera in pixels.\n A tensor of shape (N, 2).\n in_ndc: True if camera parameters are specified in NDC.\n If False, then camera parameters are in screen space.\n R: Rotation matrix of shape (N, 3, 3)\n T: Translation matrix of shape (N, 3)\n K: (optional) A calibration matrix of shape (N, 4, 4)\n If provided, don't need focal_length, principal_point, image_size\n image_size: (height, width) of image size.\n A tensor of shape (N, 2) or list/tuple. Required for screen cameras.\n device: torch.device or string\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n kwargs = {\"image_size\": image_size} if image_size is not None else {}\n super().__init__(\n device=device,\n focal_length=focal_length,\n principal_point=principal_point,\n R=R,\n T=T,\n K=K,\n _in_ndc=in_ndc,\n **kwargs, # pyre-ignore\n )\n if image_size is not None:\n if (self.image_size < 1).any(): # pyre-ignore\n raise ValueError(\"Image_size provided has invalid values\")\n else:\n self.image_size = None\n\n # When focal length is provided as one value, expand to\n # create (N, 2) shape tensor\n if self.focal_length.ndim == 1: # (N,)\n self.focal_length = self.focal_length[:, None] # (N, 1)\n self.focal_length = self.focal_length.expand(-1, 2) # (N, 2)\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the projection matrix using\n the multi-view geometry convention.\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in __init__.\n\n Returns:\n A `Transform3d` object with a batch of `N` projection transforms.\n\n .. code-block:: python\n\n fx = focal_length[:,0]\n fy = focal_length[:,1]\n px = principal_point[:,0]\n py = principal_point[:,1]\n\n K = [\n [fx, 0, 0, px],\n [0, fy, 0, py],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n \"\"\"\n K = kwargs.get(\"K\", self.K)\n if K is not None:\n if K.shape != (self._N, 4, 4):\n msg = \"Expected K to have shape of (%r, 4, 4)\"\n raise ValueError(msg % (self._N))\n else:\n K = _get_sfm_calibration_matrix(\n self._N,\n self.device,\n kwargs.get(\"focal_length\", self.focal_length),\n kwargs.get(\"principal_point\", self.principal_point),\n orthographic=True,\n )\n\n transform = Transform3d(\n matrix=K.transpose(1, 2).contiguous(), device=self.device\n )\n return transform\n\n def unproject_points(\n self,\n xy_depth: torch.Tensor,\n world_coordinates: bool = True,\n from_ndc: bool = False,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n from_ndc: If `False` (default), assumes xy part of input is in\n NDC space if self.in_ndc(), otherwise in screen space. If\n `True`, assumes xy is in NDC space even if the camera\n is defined in screen space.\n \"\"\"\n if world_coordinates:\n to_camera_transform = self.get_full_projection_transform(**kwargs)\n else:\n to_camera_transform = self.get_projection_transform(**kwargs)\n if from_ndc:\n to_camera_transform = to_camera_transform.compose(\n self.get_ndc_camera_transform()\n )\n\n unprojection_transform = to_camera_transform.inverse()\n return unprojection_transform.transform_points(xy_depth)\n\n def get_principal_point(self, **kwargs) -> torch.Tensor:\n \"\"\"\n Return the camera's principal point\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n \"\"\"\n proj_mat = self.get_projection_transform(**kwargs).get_matrix()\n return proj_mat[:, 3, :2]\n\n def get_ndc_camera_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Returns the transform from camera projection space (screen or NDC) to NDC space.\n If the camera is defined already in NDC space, the transform is identity.\n For cameras defined in screen space, we adjust the principal point computation\n which is defined in the image space (commonly) and scale the points to NDC space.\n\n Important: This transforms assumes PyTorch3D conventions for the input points,\n i.e. +X left, +Y up.\n \"\"\"\n if self.in_ndc():\n ndc_transform = Transform3d(device=self.device, dtype=torch.float32)\n else:\n # when cameras are defined in screen/image space, the principal point is\n # provided in the (+X right, +Y down), aka image, coordinate system.\n # Since input points are defined in the PyTorch3D system (+X left, +Y up),\n # we need to adjust for the principal point transform.\n pr_point_fix = torch.zeros(\n (self._N, 4, 4), device=self.device, dtype=torch.float32\n )\n pr_point_fix[:, 0, 0] = 1.0\n pr_point_fix[:, 1, 1] = 1.0\n pr_point_fix[:, 2, 2] = 1.0\n pr_point_fix[:, 3, 3] = 1.0\n pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs)\n pr_point_fix_transform = Transform3d(\n matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device\n )\n image_size = kwargs.get(\"image_size\", self.get_image_size())\n screen_to_ndc_transform = get_screen_to_ndc_transform(\n self, with_xyflip=False, image_size=image_size\n )\n ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform)\n\n return ndc_transform\n\n def is_perspective(self):\n return False\n\n def in_ndc(self):\n return self._in_ndc\n\n\n################################################\n# Helper functions for cameras #\n################################################\n\n\ndef _get_sfm_calibration_matrix(\n N: int,\n device: Device,\n focal_length,\n principal_point,\n orthographic: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Returns a calibration matrix of a perspective/orthographic camera.\n\n Args:\n N: Number of cameras.\n focal_length: Focal length of the camera.\n principal_point: xy coordinates of the center of\n the principal point of the camera in pixels.\n orthographic: Boolean specifying if the camera is orthographic or not\n\n The calibration matrix `K` is set up as follows:\n\n .. code-block:: python\n\n fx = focal_length[:,0]\n fy = focal_length[:,1]\n px = principal_point[:,0]\n py = principal_point[:,1]\n\n for orthographic==True:\n K = [\n [fx, 0, 0, px],\n [0, fy, 0, py],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n else:\n K = [\n [fx, 0, px, 0],\n [0, fy, py, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n ]\n\n Returns:\n A calibration matrix `K` of the SfM-conventioned camera\n of shape (N, 4, 4).\n \"\"\"\n\n if not torch.is_tensor(focal_length):\n focal_length = torch.tensor(focal_length, device=device)\n\n if focal_length.ndim in (0, 1) or focal_length.shape[1] == 1:\n fx = fy = focal_length\n else:\n fx, fy = focal_length.unbind(1)\n\n if not torch.is_tensor(principal_point):\n principal_point = torch.tensor(principal_point, device=device)\n\n px, py = principal_point.unbind(1)\n\n K = fx.new_zeros(N, 4, 4)\n K[:, 0, 0] = fx\n K[:, 1, 1] = fy\n if orthographic:\n K[:, 0, 3] = px\n K[:, 1, 3] = py\n K[:, 2, 2] = 1.0\n K[:, 3, 3] = 1.0\n else:\n K[:, 0, 2] = px\n K[:, 1, 2] = py\n K[:, 3, 2] = 1.0\n K[:, 2, 3] = 1.0\n\n return K\n\n\n################################################\n# Helper functions for world to view transforms\n################################################\n\n\ndef get_world_to_view_transform(\n R: torch.Tensor = _R, T: torch.Tensor = _T\n) -> Transform3d:\n \"\"\"\n This function returns a Transform3d representing the transformation\n matrix to go from world space to view space by applying a rotation and\n a translation.\n\n PyTorch3D uses the same convention as Hartley & Zisserman.\n I.e., for camera extrinsic parameters R (rotation) and T (translation),\n we map a 3D point `X_world` in world coordinates to\n a point `X_cam` in camera coordinates with:\n `X_cam = X_world R + T`\n\n Args:\n R: (N, 3, 3) matrix representing the rotation.\n T: (N, 3) matrix representing the translation.\n\n Returns:\n a Transform3d object which represents the composed RT transformation.\n\n \"\"\"\n # TODO: also support the case where RT is specified as one matrix\n # of shape (N, 4, 4).\n\n if T.shape[0] != R.shape[0]:\n msg = \"Expected R, T to have the same batch dimension; got %r, %r\"\n raise ValueError(msg % (R.shape[0], T.shape[0]))\n if T.dim() != 2 or T.shape[1:] != (3,):\n msg = \"Expected T to have shape (N, 3); got %r\"\n raise ValueError(msg % repr(T.shape))\n if R.dim() != 3 or R.shape[1:] != (3, 3):\n msg = \"Expected R to have shape (N, 3, 3); got %r\"\n raise ValueError(msg % repr(R.shape))\n\n # Create a Transform3d object\n T_ = Translate(T, device=T.device)\n R_ = Rotate(R, device=R.device)\n return R_.compose(T_)\n\n\ndef camera_position_from_spherical_angles(\n distance: float,\n elevation: float,\n azimuth: float,\n degrees: bool = True,\n device: Device = \"cpu\",\n) -> torch.Tensor:\n \"\"\"\n Calculate the location of the camera based on the distance away from\n the target point, the elevation and azimuth angles.\n\n Args:\n distance: distance of the camera from the object.\n elevation, azimuth: angles.\n The inputs distance, elevation and azimuth can be one of the following\n - Python scalar\n - Torch scalar\n - Torch tensor of shape (N) or (1)\n degrees: bool, whether the angles are specified in degrees or radians.\n device: str or torch.device, device for new tensors to be placed on.\n\n The vectors are broadcast against each other so they all have shape (N, 1).\n\n Returns:\n camera_position: (N, 3) xyz location of the camera.\n \"\"\"\n broadcasted_args = convert_to_tensors_and_broadcast(\n distance, elevation, azimuth, device=device\n )\n dist, elev, azim = broadcasted_args\n if degrees:\n elev = math.pi / 180.0 * elev\n azim = math.pi / 180.0 * azim\n x = dist * torch.cos(elev) * torch.sin(azim)\n y = dist * torch.sin(elev)\n z = dist * torch.cos(elev) * torch.cos(azim)\n camera_position = torch.stack([x, y, z], dim=1)\n if camera_position.dim() == 0:\n camera_position = camera_position.view(1, -1) # add batch dim.\n return camera_position.view(-1, 3)\n\n\ndef look_at_rotation(\n camera_position, at=((0, 0, 0),), up=((0, 1, 0),), device: Device = \"cpu\"\n) -> torch.Tensor:\n \"\"\"\n This function takes a vector 'camera_position' which specifies the location\n of the camera in world coordinates and two vectors `at` and `up` which\n indicate the position of the object and the up directions of the world\n coordinate system respectively. The object is assumed to be centered at\n the origin.\n\n The output is a rotation matrix representing the transformation\n from world coordinates -> view coordinates.\n\n Args:\n camera_position: position of the camera in world coordinates\n at: position of the object in world coordinates\n up: vector specifying the up direction in the world coordinate frame.\n\n The inputs camera_position, at and up can each be a\n - 3 element tuple/list\n - torch tensor of shape (1, 3)\n - torch tensor of shape (N, 3)\n\n The vectors are broadcast against each other so they all have shape (N, 3).\n\n Returns:\n R: (N, 3, 3) batched rotation matrices\n \"\"\"\n # Format input and broadcast\n broadcasted_args = convert_to_tensors_and_broadcast(\n camera_position, at, up, device=device\n )\n camera_position, at, up = broadcasted_args\n for t, n in zip([camera_position, at, up], [\"camera_position\", \"at\", \"up\"]):\n if t.shape[-1] != 3:\n msg = \"Expected arg %s to have shape (N, 3); got %r\"\n raise ValueError(msg % (n, t.shape))\n z_axis = F.normalize(at - camera_position, eps=1e-5)\n x_axis = F.normalize(torch.cross(up, z_axis, dim=1), eps=1e-5)\n y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5)\n is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all(\n dim=1, keepdim=True\n )\n if is_close.any():\n replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5)\n x_axis = torch.where(is_close, replacement, x_axis)\n R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)\n return R.transpose(1, 2)\n\n\ndef look_at_view_transform(\n dist: float = 1.0,\n elev: float = 0.0,\n azim: float = 0.0,\n degrees: bool = True,\n eye: Optional[Sequence] = None,\n at=((0, 0, 0),), # (1, 3)\n up=((0, 1, 0),), # (1, 3)\n device: Device = \"cpu\",\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n This function returns a rotation and translation matrix\n to apply the 'Look At' transformation from world -> view coordinates [0].\n\n Args:\n dist: distance of the camera from the object\n elev: angle in degrees or radians. This is the angle between the\n vector from the object to the camera, and the horizontal plane y = 0 (xz-plane).\n azim: angle in degrees or radians. The vector from the object to\n the camera is projected onto a horizontal plane y = 0.\n azim is the angle between the projected vector and a\n reference vector at (0, 0, 1) on the reference plane (the horizontal plane).\n dist, elev and azim can be of shape (1), (N).\n degrees: boolean flag to indicate if the elevation and azimuth\n angles are specified in degrees or radians.\n eye: the position of the camera(s) in world coordinates. If eye is not\n None, it will override the camera position derived from dist, elev, azim.\n up: the direction of the x axis in the world coordinate system.\n at: the position of the object(s) in world coordinates.\n eye, up and at can be of shape (1, 3) or (N, 3).\n\n Returns:\n 2-element tuple containing\n\n - **R**: the rotation to apply to the points to align with the camera.\n - **T**: the translation to apply to the points to align with the camera.\n\n References:\n [0] https://www.scratchapixel.com\n \"\"\"\n\n if eye is not None:\n broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device)\n eye, at, up = broadcasted_args\n C = eye\n else:\n broadcasted_args = convert_to_tensors_and_broadcast(\n dist, elev, azim, at, up, device=device\n )\n dist, elev, azim, at, up = broadcasted_args\n C = (\n camera_position_from_spherical_angles(\n dist, elev, azim, degrees=degrees, device=device\n )\n + at\n )\n\n R = look_at_rotation(C, at, up, device=device)\n T = -torch.bmm(R.transpose(1, 2), C[:, :, None])[:, :, 0]\n return R, T\n\n\ndef get_ndc_to_screen_transform(\n cameras,\n with_xyflip: bool = False,\n image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,\n) -> Transform3d:\n \"\"\"\n PyTorch3D NDC to screen conversion.\n Conversion from PyTorch3D's NDC space (+X left, +Y up) to screen/image space\n (+X right, +Y down, origin top left).\n\n Args:\n cameras\n with_xyflip: flips x- and y-axis if set to True.\n Optional kwargs:\n image_size: ((height, width),) specifying the height, width\n of the image. If not provided, it reads it from cameras.\n\n We represent the NDC to screen conversion as a Transform3d\n with projection matrix\n\n K = [\n [s, 0, 0, cx],\n [0, s, 0, cy],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n\n \"\"\"\n # We require the image size, which is necessary for the transform\n if image_size is None:\n msg = \"For NDC to screen conversion, image_size=(height, width) needs to be specified.\"\n raise ValueError(msg)\n\n K = torch.zeros((cameras._N, 4, 4), device=cameras.device, dtype=torch.float32)\n if not torch.is_tensor(image_size):\n image_size = torch.tensor(image_size, device=cameras.device)\n image_size = image_size.view(-1, 2) # of shape (1 or B)x2\n height, width = image_size.unbind(1)\n\n # For non square images, we scale the points such that smallest side\n # has range [-1, 1] and the largest side has range [-u, u], with u > 1.\n # This convention is consistent with the PyTorch3D renderer\n scale = (image_size.min(dim=1).values - 0.0) / 2.0\n\n K[:, 0, 0] = scale\n K[:, 1, 1] = scale\n K[:, 0, 3] = -1.0 * (width - 0.0) / 2.0\n K[:, 1, 3] = -1.0 * (height - 0.0) / 2.0\n K[:, 2, 2] = 1.0\n K[:, 3, 3] = 1.0\n\n # Transpose the projection matrix as PyTorch3D transforms use row vectors.\n transform = Transform3d(\n matrix=K.transpose(1, 2).contiguous(), device=cameras.device\n )\n\n if with_xyflip:\n # flip x, y axis\n xyflip = torch.eye(4, device=cameras.device, dtype=torch.float32)\n xyflip[0, 0] = -1.0\n xyflip[1, 1] = -1.0\n xyflip = xyflip.view(1, 4, 4).expand(cameras._N, -1, -1)\n xyflip_transform = Transform3d(\n matrix=xyflip.transpose(1, 2).contiguous(), device=cameras.device\n )\n transform = transform.compose(xyflip_transform)\n return transform\n\n\ndef get_screen_to_ndc_transform(\n cameras,\n with_xyflip: bool = False,\n image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,\n) -> Transform3d:\n \"\"\"\n Screen to PyTorch3D NDC conversion.\n Conversion from screen/image space (+X right, +Y down, origin top left)\n to PyTorch3D's NDC space (+X left, +Y up).\n\n Args:\n cameras\n with_xyflip: flips x- and y-axis if set to True.\n Optional kwargs:\n image_size: ((height, width),) specifying the height, width\n of the image. If not provided, it reads it from cameras.\n\n We represent the screen to NDC conversion as a Transform3d\n with projection matrix\n\n K = [\n [1/s, 0, 0, cx/s],\n [ 0, 1/s, 0, cy/s],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1],\n ]\n\n \"\"\"\n transform = get_ndc_to_screen_transform(\n cameras,\n with_xyflip=with_xyflip,\n image_size=image_size,\n ).inverse()\n return transform\n" ]
[ [ "torch.zeros", "torch.nn.functional.normalize", "torch.cat", "torch.cos", "torch.stack", "torch.tan", "torch.sin", "torch.is_tensor", "torch.ones", "torch.eye", "torch.tensor", "torch.cross", "torch.where" ] ]